]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.2.2-2.6.32.48-201111161802.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-2.6.32.48-201111161802.patch
CommitLineData
6815421a
PK
1diff -urNp linux-2.6.32.48/arch/alpha/include/asm/elf.h linux-2.6.32.48/arch/alpha/include/asm/elf.h
2--- linux-2.6.32.48/arch/alpha/include/asm/elf.h 2011-11-08 19:02:43.000000000 -0500
3+++ linux-2.6.32.48/arch/alpha/include/asm/elf.h 2011-11-15 19:59:42.000000000 -0500
4@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
5
6 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
7
8+#ifdef CONFIG_PAX_ASLR
9+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
10+
11+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
12+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
13+#endif
14+
15 /* $0 is set by ld.so to a pointer to a function which might be
16 registered using atexit. This provides a mean for the dynamic
17 linker to call DT_FINI functions for shared libraries that have
18diff -urNp linux-2.6.32.48/arch/alpha/include/asm/pgtable.h linux-2.6.32.48/arch/alpha/include/asm/pgtable.h
19--- linux-2.6.32.48/arch/alpha/include/asm/pgtable.h 2011-11-08 19:02:43.000000000 -0500
20+++ linux-2.6.32.48/arch/alpha/include/asm/pgtable.h 2011-11-15 19:59:42.000000000 -0500
21@@ -101,6 +101,17 @@ struct vm_area_struct;
22 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
23 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
24 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
25+
26+#ifdef CONFIG_PAX_PAGEEXEC
27+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
28+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
29+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
30+#else
31+# define PAGE_SHARED_NOEXEC PAGE_SHARED
32+# define PAGE_COPY_NOEXEC PAGE_COPY
33+# define PAGE_READONLY_NOEXEC PAGE_READONLY
34+#endif
35+
36 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
37
38 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
39diff -urNp linux-2.6.32.48/arch/alpha/kernel/module.c linux-2.6.32.48/arch/alpha/kernel/module.c
40--- linux-2.6.32.48/arch/alpha/kernel/module.c 2011-11-08 19:02:43.000000000 -0500
41+++ linux-2.6.32.48/arch/alpha/kernel/module.c 2011-11-15 19:59:42.000000000 -0500
42@@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
43
44 /* The small sections were sorted to the end of the segment.
45 The following should definitely cover them. */
46- gp = (u64)me->module_core + me->core_size - 0x8000;
47+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
48 got = sechdrs[me->arch.gotsecindex].sh_addr;
49
50 for (i = 0; i < n; i++) {
51diff -urNp linux-2.6.32.48/arch/alpha/kernel/osf_sys.c linux-2.6.32.48/arch/alpha/kernel/osf_sys.c
52--- linux-2.6.32.48/arch/alpha/kernel/osf_sys.c 2011-11-08 19:02:43.000000000 -0500
53+++ linux-2.6.32.48/arch/alpha/kernel/osf_sys.c 2011-11-15 19:59:42.000000000 -0500
54@@ -1172,7 +1172,7 @@ arch_get_unmapped_area_1(unsigned long a
55 /* At this point: (!vma || addr < vma->vm_end). */
56 if (limit - len < addr)
57 return -ENOMEM;
58- if (!vma || addr + len <= vma->vm_start)
59+ if (check_heap_stack_gap(vma, addr, len))
60 return addr;
61 addr = vma->vm_end;
62 vma = vma->vm_next;
63@@ -1208,6 +1208,10 @@ arch_get_unmapped_area(struct file *filp
64 merely specific addresses, but regions of memory -- perhaps
65 this feature should be incorporated into all ports? */
66
67+#ifdef CONFIG_PAX_RANDMMAP
68+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
69+#endif
70+
71 if (addr) {
72 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
73 if (addr != (unsigned long) -ENOMEM)
74@@ -1215,8 +1219,8 @@ arch_get_unmapped_area(struct file *filp
75 }
76
77 /* Next, try allocating at TASK_UNMAPPED_BASE. */
78- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
79- len, limit);
80+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
81+
82 if (addr != (unsigned long) -ENOMEM)
83 return addr;
84
85diff -urNp linux-2.6.32.48/arch/alpha/mm/fault.c linux-2.6.32.48/arch/alpha/mm/fault.c
86--- linux-2.6.32.48/arch/alpha/mm/fault.c 2011-11-08 19:02:43.000000000 -0500
87+++ linux-2.6.32.48/arch/alpha/mm/fault.c 2011-11-15 19:59:42.000000000 -0500
88@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
89 __reload_thread(pcb);
90 }
91
92+#ifdef CONFIG_PAX_PAGEEXEC
93+/*
94+ * PaX: decide what to do with offenders (regs->pc = fault address)
95+ *
96+ * returns 1 when task should be killed
97+ * 2 when patched PLT trampoline was detected
98+ * 3 when unpatched PLT trampoline was detected
99+ */
100+static int pax_handle_fetch_fault(struct pt_regs *regs)
101+{
102+
103+#ifdef CONFIG_PAX_EMUPLT
104+ int err;
105+
106+ do { /* PaX: patched PLT emulation #1 */
107+ unsigned int ldah, ldq, jmp;
108+
109+ err = get_user(ldah, (unsigned int *)regs->pc);
110+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
111+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
112+
113+ if (err)
114+ break;
115+
116+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
117+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
118+ jmp == 0x6BFB0000U)
119+ {
120+ unsigned long r27, addr;
121+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
122+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
123+
124+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
125+ err = get_user(r27, (unsigned long *)addr);
126+ if (err)
127+ break;
128+
129+ regs->r27 = r27;
130+ regs->pc = r27;
131+ return 2;
132+ }
133+ } while (0);
134+
135+ do { /* PaX: patched PLT emulation #2 */
136+ unsigned int ldah, lda, br;
137+
138+ err = get_user(ldah, (unsigned int *)regs->pc);
139+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
140+ err |= get_user(br, (unsigned int *)(regs->pc+8));
141+
142+ if (err)
143+ break;
144+
145+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
146+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
147+ (br & 0xFFE00000U) == 0xC3E00000U)
148+ {
149+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
150+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
151+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
152+
153+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
154+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
155+ return 2;
156+ }
157+ } while (0);
158+
159+ do { /* PaX: unpatched PLT emulation */
160+ unsigned int br;
161+
162+ err = get_user(br, (unsigned int *)regs->pc);
163+
164+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
165+ unsigned int br2, ldq, nop, jmp;
166+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
167+
168+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
169+ err = get_user(br2, (unsigned int *)addr);
170+ err |= get_user(ldq, (unsigned int *)(addr+4));
171+ err |= get_user(nop, (unsigned int *)(addr+8));
172+ err |= get_user(jmp, (unsigned int *)(addr+12));
173+ err |= get_user(resolver, (unsigned long *)(addr+16));
174+
175+ if (err)
176+ break;
177+
178+ if (br2 == 0xC3600000U &&
179+ ldq == 0xA77B000CU &&
180+ nop == 0x47FF041FU &&
181+ jmp == 0x6B7B0000U)
182+ {
183+ regs->r28 = regs->pc+4;
184+ regs->r27 = addr+16;
185+ regs->pc = resolver;
186+ return 3;
187+ }
188+ }
189+ } while (0);
190+#endif
191+
192+ return 1;
193+}
194+
195+void pax_report_insns(void *pc, void *sp)
196+{
197+ unsigned long i;
198+
199+ printk(KERN_ERR "PAX: bytes at PC: ");
200+ for (i = 0; i < 5; i++) {
201+ unsigned int c;
202+ if (get_user(c, (unsigned int *)pc+i))
203+ printk(KERN_CONT "???????? ");
204+ else
205+ printk(KERN_CONT "%08x ", c);
206+ }
207+ printk("\n");
208+}
209+#endif
210
211 /*
212 * This routine handles page faults. It determines the address,
213@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
214 good_area:
215 si_code = SEGV_ACCERR;
216 if (cause < 0) {
217- if (!(vma->vm_flags & VM_EXEC))
218+ if (!(vma->vm_flags & VM_EXEC)) {
219+
220+#ifdef CONFIG_PAX_PAGEEXEC
221+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
222+ goto bad_area;
223+
224+ up_read(&mm->mmap_sem);
225+ switch (pax_handle_fetch_fault(regs)) {
226+
227+#ifdef CONFIG_PAX_EMUPLT
228+ case 2:
229+ case 3:
230+ return;
231+#endif
232+
233+ }
234+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
235+ do_group_exit(SIGKILL);
236+#else
237 goto bad_area;
238+#endif
239+
240+ }
241 } else if (!cause) {
242 /* Allow reads even for write-only mappings */
243 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
244diff -urNp linux-2.6.32.48/arch/arm/include/asm/elf.h linux-2.6.32.48/arch/arm/include/asm/elf.h
245--- linux-2.6.32.48/arch/arm/include/asm/elf.h 2011-11-08 19:02:43.000000000 -0500
246+++ linux-2.6.32.48/arch/arm/include/asm/elf.h 2011-11-15 19:59:42.000000000 -0500
247@@ -109,7 +109,14 @@ int dump_task_regs(struct task_struct *t
248 the loader. We need to make sure that it is out of the way of the program
249 that it will "exec", and that there is sufficient room for the brk. */
250
251-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
252+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
253+
254+#ifdef CONFIG_PAX_ASLR
255+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
256+
257+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
258+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
259+#endif
260
261 /* When the program starts, a1 contains a pointer to a function to be
262 registered with atexit, as per the SVR4 ABI. A value of 0 means we
263diff -urNp linux-2.6.32.48/arch/arm/include/asm/kmap_types.h linux-2.6.32.48/arch/arm/include/asm/kmap_types.h
264--- linux-2.6.32.48/arch/arm/include/asm/kmap_types.h 2011-11-08 19:02:43.000000000 -0500
265+++ linux-2.6.32.48/arch/arm/include/asm/kmap_types.h 2011-11-15 19:59:42.000000000 -0500
266@@ -19,6 +19,7 @@ enum km_type {
267 KM_SOFTIRQ0,
268 KM_SOFTIRQ1,
269 KM_L2_CACHE,
270+ KM_CLEARPAGE,
271 KM_TYPE_NR
272 };
273
274diff -urNp linux-2.6.32.48/arch/arm/include/asm/uaccess.h linux-2.6.32.48/arch/arm/include/asm/uaccess.h
275--- linux-2.6.32.48/arch/arm/include/asm/uaccess.h 2011-11-08 19:02:43.000000000 -0500
276+++ linux-2.6.32.48/arch/arm/include/asm/uaccess.h 2011-11-15 19:59:42.000000000 -0500
277@@ -22,6 +22,8 @@
278 #define VERIFY_READ 0
279 #define VERIFY_WRITE 1
280
281+extern void check_object_size(const void *ptr, unsigned long n, bool to);
282+
283 /*
284 * The exception table consists of pairs of addresses: the first is the
285 * address of an instruction that is allowed to fault, and the second is
286@@ -387,8 +389,23 @@ do { \
287
288
289 #ifdef CONFIG_MMU
290-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
291-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
292+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
293+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
294+
295+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
296+{
297+ if (!__builtin_constant_p(n))
298+ check_object_size(to, n, false);
299+ return ___copy_from_user(to, from, n);
300+}
301+
302+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
303+{
304+ if (!__builtin_constant_p(n))
305+ check_object_size(from, n, true);
306+ return ___copy_to_user(to, from, n);
307+}
308+
309 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
310 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
311 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
312@@ -403,6 +420,9 @@ extern unsigned long __must_check __strn
313
314 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
315 {
316+ if ((long)n < 0)
317+ return n;
318+
319 if (access_ok(VERIFY_READ, from, n))
320 n = __copy_from_user(to, from, n);
321 else /* security hole - plug it */
322@@ -412,6 +432,9 @@ static inline unsigned long __must_check
323
324 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
325 {
326+ if ((long)n < 0)
327+ return n;
328+
329 if (access_ok(VERIFY_WRITE, to, n))
330 n = __copy_to_user(to, from, n);
331 return n;
332diff -urNp linux-2.6.32.48/arch/arm/kernel/armksyms.c linux-2.6.32.48/arch/arm/kernel/armksyms.c
333--- linux-2.6.32.48/arch/arm/kernel/armksyms.c 2011-11-08 19:02:43.000000000 -0500
334+++ linux-2.6.32.48/arch/arm/kernel/armksyms.c 2011-11-15 19:59:42.000000000 -0500
335@@ -118,8 +118,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
336 #ifdef CONFIG_MMU
337 EXPORT_SYMBOL(copy_page);
338
339-EXPORT_SYMBOL(__copy_from_user);
340-EXPORT_SYMBOL(__copy_to_user);
341+EXPORT_SYMBOL(___copy_from_user);
342+EXPORT_SYMBOL(___copy_to_user);
343 EXPORT_SYMBOL(__clear_user);
344
345 EXPORT_SYMBOL(__get_user_1);
346diff -urNp linux-2.6.32.48/arch/arm/kernel/kgdb.c linux-2.6.32.48/arch/arm/kernel/kgdb.c
347--- linux-2.6.32.48/arch/arm/kernel/kgdb.c 2011-11-08 19:02:43.000000000 -0500
348+++ linux-2.6.32.48/arch/arm/kernel/kgdb.c 2011-11-15 19:59:42.000000000 -0500
349@@ -190,7 +190,7 @@ void kgdb_arch_exit(void)
350 * and we handle the normal undef case within the do_undefinstr
351 * handler.
352 */
353-struct kgdb_arch arch_kgdb_ops = {
354+const struct kgdb_arch arch_kgdb_ops = {
355 #ifndef __ARMEB__
356 .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7}
357 #else /* ! __ARMEB__ */
358diff -urNp linux-2.6.32.48/arch/arm/kernel/traps.c linux-2.6.32.48/arch/arm/kernel/traps.c
359--- linux-2.6.32.48/arch/arm/kernel/traps.c 2011-11-08 19:02:43.000000000 -0500
360+++ linux-2.6.32.48/arch/arm/kernel/traps.c 2011-11-15 19:59:42.000000000 -0500
361@@ -247,6 +247,8 @@ static void __die(const char *str, int e
362
363 DEFINE_SPINLOCK(die_lock);
364
365+extern void gr_handle_kernel_exploit(void);
366+
367 /*
368 * This function is protected against re-entrancy.
369 */
370@@ -271,6 +273,8 @@ NORET_TYPE void die(const char *str, str
371 if (panic_on_oops)
372 panic("Fatal exception");
373
374+ gr_handle_kernel_exploit();
375+
376 do_exit(SIGSEGV);
377 }
378
379diff -urNp linux-2.6.32.48/arch/arm/lib/copy_from_user.S linux-2.6.32.48/arch/arm/lib/copy_from_user.S
380--- linux-2.6.32.48/arch/arm/lib/copy_from_user.S 2011-11-08 19:02:43.000000000 -0500
381+++ linux-2.6.32.48/arch/arm/lib/copy_from_user.S 2011-11-15 19:59:42.000000000 -0500
382@@ -16,7 +16,7 @@
383 /*
384 * Prototype:
385 *
386- * size_t __copy_from_user(void *to, const void *from, size_t n)
387+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
388 *
389 * Purpose:
390 *
391@@ -84,11 +84,11 @@
392
393 .text
394
395-ENTRY(__copy_from_user)
396+ENTRY(___copy_from_user)
397
398 #include "copy_template.S"
399
400-ENDPROC(__copy_from_user)
401+ENDPROC(___copy_from_user)
402
403 .section .fixup,"ax"
404 .align 0
405diff -urNp linux-2.6.32.48/arch/arm/lib/copy_to_user.S linux-2.6.32.48/arch/arm/lib/copy_to_user.S
406--- linux-2.6.32.48/arch/arm/lib/copy_to_user.S 2011-11-08 19:02:43.000000000 -0500
407+++ linux-2.6.32.48/arch/arm/lib/copy_to_user.S 2011-11-15 19:59:42.000000000 -0500
408@@ -16,7 +16,7 @@
409 /*
410 * Prototype:
411 *
412- * size_t __copy_to_user(void *to, const void *from, size_t n)
413+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
414 *
415 * Purpose:
416 *
417@@ -88,11 +88,11 @@
418 .text
419
420 ENTRY(__copy_to_user_std)
421-WEAK(__copy_to_user)
422+WEAK(___copy_to_user)
423
424 #include "copy_template.S"
425
426-ENDPROC(__copy_to_user)
427+ENDPROC(___copy_to_user)
428
429 .section .fixup,"ax"
430 .align 0
431diff -urNp linux-2.6.32.48/arch/arm/lib/uaccess.S linux-2.6.32.48/arch/arm/lib/uaccess.S
432--- linux-2.6.32.48/arch/arm/lib/uaccess.S 2011-11-08 19:02:43.000000000 -0500
433+++ linux-2.6.32.48/arch/arm/lib/uaccess.S 2011-11-15 19:59:42.000000000 -0500
434@@ -19,7 +19,7 @@
435
436 #define PAGE_SHIFT 12
437
438-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
439+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
440 * Purpose : copy a block to user memory from kernel memory
441 * Params : to - user memory
442 * : from - kernel memory
443@@ -39,7 +39,7 @@ USER( strgtbt r3, [r0], #1) @ May fau
444 sub r2, r2, ip
445 b .Lc2u_dest_aligned
446
447-ENTRY(__copy_to_user)
448+ENTRY(___copy_to_user)
449 stmfd sp!, {r2, r4 - r7, lr}
450 cmp r2, #4
451 blt .Lc2u_not_enough
452@@ -277,14 +277,14 @@ USER( strgebt r3, [r0], #1) @ May fau
453 ldrgtb r3, [r1], #0
454 USER( strgtbt r3, [r0], #1) @ May fault
455 b .Lc2u_finished
456-ENDPROC(__copy_to_user)
457+ENDPROC(___copy_to_user)
458
459 .section .fixup,"ax"
460 .align 0
461 9001: ldmfd sp!, {r0, r4 - r7, pc}
462 .previous
463
464-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
465+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
466 * Purpose : copy a block from user memory to kernel memory
467 * Params : to - kernel memory
468 * : from - user memory
469@@ -303,7 +303,7 @@ USER( ldrgtbt r3, [r1], #1) @ May fau
470 sub r2, r2, ip
471 b .Lcfu_dest_aligned
472
473-ENTRY(__copy_from_user)
474+ENTRY(___copy_from_user)
475 stmfd sp!, {r0, r2, r4 - r7, lr}
476 cmp r2, #4
477 blt .Lcfu_not_enough
478@@ -543,7 +543,7 @@ USER( ldrgebt r3, [r1], #1) @ May fau
479 USER( ldrgtbt r3, [r1], #1) @ May fault
480 strgtb r3, [r0], #1
481 b .Lcfu_finished
482-ENDPROC(__copy_from_user)
483+ENDPROC(___copy_from_user)
484
485 .section .fixup,"ax"
486 .align 0
487diff -urNp linux-2.6.32.48/arch/arm/lib/uaccess_with_memcpy.c linux-2.6.32.48/arch/arm/lib/uaccess_with_memcpy.c
488--- linux-2.6.32.48/arch/arm/lib/uaccess_with_memcpy.c 2011-11-08 19:02:43.000000000 -0500
489+++ linux-2.6.32.48/arch/arm/lib/uaccess_with_memcpy.c 2011-11-15 19:59:42.000000000 -0500
490@@ -97,7 +97,7 @@ out:
491 }
492
493 unsigned long
494-__copy_to_user(void __user *to, const void *from, unsigned long n)
495+___copy_to_user(void __user *to, const void *from, unsigned long n)
496 {
497 /*
498 * This test is stubbed out of the main function above to keep
499diff -urNp linux-2.6.32.48/arch/arm/mach-at91/pm.c linux-2.6.32.48/arch/arm/mach-at91/pm.c
500--- linux-2.6.32.48/arch/arm/mach-at91/pm.c 2011-11-08 19:02:43.000000000 -0500
501+++ linux-2.6.32.48/arch/arm/mach-at91/pm.c 2011-11-15 19:59:42.000000000 -0500
502@@ -348,7 +348,7 @@ static void at91_pm_end(void)
503 }
504
505
506-static struct platform_suspend_ops at91_pm_ops ={
507+static const struct platform_suspend_ops at91_pm_ops ={
508 .valid = at91_pm_valid_state,
509 .begin = at91_pm_begin,
510 .enter = at91_pm_enter,
511diff -urNp linux-2.6.32.48/arch/arm/mach-omap1/pm.c linux-2.6.32.48/arch/arm/mach-omap1/pm.c
512--- linux-2.6.32.48/arch/arm/mach-omap1/pm.c 2011-11-08 19:02:43.000000000 -0500
513+++ linux-2.6.32.48/arch/arm/mach-omap1/pm.c 2011-11-15 19:59:42.000000000 -0500
514@@ -647,7 +647,7 @@ static struct irqaction omap_wakeup_irq
515
516
517
518-static struct platform_suspend_ops omap_pm_ops ={
519+static const struct platform_suspend_ops omap_pm_ops ={
520 .prepare = omap_pm_prepare,
521 .enter = omap_pm_enter,
522 .finish = omap_pm_finish,
523diff -urNp linux-2.6.32.48/arch/arm/mach-omap2/pm24xx.c linux-2.6.32.48/arch/arm/mach-omap2/pm24xx.c
524--- linux-2.6.32.48/arch/arm/mach-omap2/pm24xx.c 2011-11-08 19:02:43.000000000 -0500
525+++ linux-2.6.32.48/arch/arm/mach-omap2/pm24xx.c 2011-11-15 19:59:42.000000000 -0500
526@@ -326,7 +326,7 @@ static void omap2_pm_finish(void)
527 enable_hlt();
528 }
529
530-static struct platform_suspend_ops omap_pm_ops = {
531+static const struct platform_suspend_ops omap_pm_ops = {
532 .prepare = omap2_pm_prepare,
533 .enter = omap2_pm_enter,
534 .finish = omap2_pm_finish,
535diff -urNp linux-2.6.32.48/arch/arm/mach-omap2/pm34xx.c linux-2.6.32.48/arch/arm/mach-omap2/pm34xx.c
536--- linux-2.6.32.48/arch/arm/mach-omap2/pm34xx.c 2011-11-08 19:02:43.000000000 -0500
537+++ linux-2.6.32.48/arch/arm/mach-omap2/pm34xx.c 2011-11-15 19:59:42.000000000 -0500
538@@ -401,7 +401,7 @@ static void omap3_pm_end(void)
539 return;
540 }
541
542-static struct platform_suspend_ops omap_pm_ops = {
543+static const struct platform_suspend_ops omap_pm_ops = {
544 .begin = omap3_pm_begin,
545 .end = omap3_pm_end,
546 .prepare = omap3_pm_prepare,
547diff -urNp linux-2.6.32.48/arch/arm/mach-pnx4008/pm.c linux-2.6.32.48/arch/arm/mach-pnx4008/pm.c
548--- linux-2.6.32.48/arch/arm/mach-pnx4008/pm.c 2011-11-08 19:02:43.000000000 -0500
549+++ linux-2.6.32.48/arch/arm/mach-pnx4008/pm.c 2011-11-15 19:59:42.000000000 -0500
550@@ -116,7 +116,7 @@ static int pnx4008_pm_valid(suspend_stat
551 (state == PM_SUSPEND_MEM);
552 }
553
554-static struct platform_suspend_ops pnx4008_pm_ops = {
555+static const struct platform_suspend_ops pnx4008_pm_ops = {
556 .enter = pnx4008_pm_enter,
557 .valid = pnx4008_pm_valid,
558 };
559diff -urNp linux-2.6.32.48/arch/arm/mach-pxa/pm.c linux-2.6.32.48/arch/arm/mach-pxa/pm.c
560--- linux-2.6.32.48/arch/arm/mach-pxa/pm.c 2011-11-08 19:02:43.000000000 -0500
561+++ linux-2.6.32.48/arch/arm/mach-pxa/pm.c 2011-11-15 19:59:42.000000000 -0500
562@@ -95,7 +95,7 @@ void pxa_pm_finish(void)
563 pxa_cpu_pm_fns->finish();
564 }
565
566-static struct platform_suspend_ops pxa_pm_ops = {
567+static const struct platform_suspend_ops pxa_pm_ops = {
568 .valid = pxa_pm_valid,
569 .enter = pxa_pm_enter,
570 .prepare = pxa_pm_prepare,
571diff -urNp linux-2.6.32.48/arch/arm/mach-pxa/sharpsl_pm.c linux-2.6.32.48/arch/arm/mach-pxa/sharpsl_pm.c
572--- linux-2.6.32.48/arch/arm/mach-pxa/sharpsl_pm.c 2011-11-08 19:02:43.000000000 -0500
573+++ linux-2.6.32.48/arch/arm/mach-pxa/sharpsl_pm.c 2011-11-15 19:59:42.000000000 -0500
574@@ -891,7 +891,7 @@ static void sharpsl_apm_get_power_status
575 }
576
577 #ifdef CONFIG_PM
578-static struct platform_suspend_ops sharpsl_pm_ops = {
579+static const struct platform_suspend_ops sharpsl_pm_ops = {
580 .prepare = pxa_pm_prepare,
581 .finish = pxa_pm_finish,
582 .enter = corgi_pxa_pm_enter,
583diff -urNp linux-2.6.32.48/arch/arm/mach-sa1100/pm.c linux-2.6.32.48/arch/arm/mach-sa1100/pm.c
584--- linux-2.6.32.48/arch/arm/mach-sa1100/pm.c 2011-11-08 19:02:43.000000000 -0500
585+++ linux-2.6.32.48/arch/arm/mach-sa1100/pm.c 2011-11-15 19:59:42.000000000 -0500
586@@ -120,7 +120,7 @@ unsigned long sleep_phys_sp(void *sp)
587 return virt_to_phys(sp);
588 }
589
590-static struct platform_suspend_ops sa11x0_pm_ops = {
591+static const struct platform_suspend_ops sa11x0_pm_ops = {
592 .enter = sa11x0_pm_enter,
593 .valid = suspend_valid_only_mem,
594 };
595diff -urNp linux-2.6.32.48/arch/arm/mm/fault.c linux-2.6.32.48/arch/arm/mm/fault.c
596--- linux-2.6.32.48/arch/arm/mm/fault.c 2011-11-08 19:02:43.000000000 -0500
597+++ linux-2.6.32.48/arch/arm/mm/fault.c 2011-11-15 19:59:42.000000000 -0500
598@@ -166,6 +166,13 @@ __do_user_fault(struct task_struct *tsk,
599 }
600 #endif
601
602+#ifdef CONFIG_PAX_PAGEEXEC
603+ if (fsr & FSR_LNX_PF) {
604+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
605+ do_group_exit(SIGKILL);
606+ }
607+#endif
608+
609 tsk->thread.address = addr;
610 tsk->thread.error_code = fsr;
611 tsk->thread.trap_no = 14;
612@@ -357,6 +364,33 @@ do_page_fault(unsigned long addr, unsign
613 }
614 #endif /* CONFIG_MMU */
615
616+#ifdef CONFIG_PAX_PAGEEXEC
617+void pax_report_insns(void *pc, void *sp)
618+{
619+ long i;
620+
621+ printk(KERN_ERR "PAX: bytes at PC: ");
622+ for (i = 0; i < 20; i++) {
623+ unsigned char c;
624+ if (get_user(c, (__force unsigned char __user *)pc+i))
625+ printk(KERN_CONT "?? ");
626+ else
627+ printk(KERN_CONT "%02x ", c);
628+ }
629+ printk("\n");
630+
631+ printk(KERN_ERR "PAX: bytes at SP-4: ");
632+ for (i = -1; i < 20; i++) {
633+ unsigned long c;
634+ if (get_user(c, (__force unsigned long __user *)sp+i))
635+ printk(KERN_CONT "???????? ");
636+ else
637+ printk(KERN_CONT "%08lx ", c);
638+ }
639+ printk("\n");
640+}
641+#endif
642+
643 /*
644 * First Level Translation Fault Handler
645 *
646diff -urNp linux-2.6.32.48/arch/arm/mm/mmap.c linux-2.6.32.48/arch/arm/mm/mmap.c
647--- linux-2.6.32.48/arch/arm/mm/mmap.c 2011-11-08 19:02:43.000000000 -0500
648+++ linux-2.6.32.48/arch/arm/mm/mmap.c 2011-11-15 19:59:42.000000000 -0500
649@@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp
650 if (len > TASK_SIZE)
651 return -ENOMEM;
652
653+#ifdef CONFIG_PAX_RANDMMAP
654+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
655+#endif
656+
657 if (addr) {
658 if (do_align)
659 addr = COLOUR_ALIGN(addr, pgoff);
660@@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp
661 addr = PAGE_ALIGN(addr);
662
663 vma = find_vma(mm, addr);
664- if (TASK_SIZE - len >= addr &&
665- (!vma || addr + len <= vma->vm_start))
666+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
667 return addr;
668 }
669 if (len > mm->cached_hole_size) {
670- start_addr = addr = mm->free_area_cache;
671+ start_addr = addr = mm->free_area_cache;
672 } else {
673- start_addr = addr = TASK_UNMAPPED_BASE;
674- mm->cached_hole_size = 0;
675+ start_addr = addr = mm->mmap_base;
676+ mm->cached_hole_size = 0;
677 }
678
679 full_search:
680@@ -94,14 +97,14 @@ full_search:
681 * Start a new search - just in case we missed
682 * some holes.
683 */
684- if (start_addr != TASK_UNMAPPED_BASE) {
685- start_addr = addr = TASK_UNMAPPED_BASE;
686+ if (start_addr != mm->mmap_base) {
687+ start_addr = addr = mm->mmap_base;
688 mm->cached_hole_size = 0;
689 goto full_search;
690 }
691 return -ENOMEM;
692 }
693- if (!vma || addr + len <= vma->vm_start) {
694+ if (check_heap_stack_gap(vma, addr, len)) {
695 /*
696 * Remember the place where we stopped the search:
697 */
698diff -urNp linux-2.6.32.48/arch/arm/plat-s3c/pm.c linux-2.6.32.48/arch/arm/plat-s3c/pm.c
699--- linux-2.6.32.48/arch/arm/plat-s3c/pm.c 2011-11-08 19:02:43.000000000 -0500
700+++ linux-2.6.32.48/arch/arm/plat-s3c/pm.c 2011-11-15 19:59:42.000000000 -0500
701@@ -355,7 +355,7 @@ static void s3c_pm_finish(void)
702 s3c_pm_check_cleanup();
703 }
704
705-static struct platform_suspend_ops s3c_pm_ops = {
706+static const struct platform_suspend_ops s3c_pm_ops = {
707 .enter = s3c_pm_enter,
708 .prepare = s3c_pm_prepare,
709 .finish = s3c_pm_finish,
710diff -urNp linux-2.6.32.48/arch/avr32/include/asm/elf.h linux-2.6.32.48/arch/avr32/include/asm/elf.h
711--- linux-2.6.32.48/arch/avr32/include/asm/elf.h 2011-11-08 19:02:43.000000000 -0500
712+++ linux-2.6.32.48/arch/avr32/include/asm/elf.h 2011-11-15 19:59:42.000000000 -0500
713@@ -85,8 +85,14 @@ typedef struct user_fpu_struct elf_fpreg
714 the loader. We need to make sure that it is out of the way of the program
715 that it will "exec", and that there is sufficient room for the brk. */
716
717-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
718+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
719
720+#ifdef CONFIG_PAX_ASLR
721+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
722+
723+#define PAX_DELTA_MMAP_LEN 15
724+#define PAX_DELTA_STACK_LEN 15
725+#endif
726
727 /* This yields a mask that user programs can use to figure out what
728 instruction set this CPU supports. This could be done in user space,
729diff -urNp linux-2.6.32.48/arch/avr32/include/asm/kmap_types.h linux-2.6.32.48/arch/avr32/include/asm/kmap_types.h
730--- linux-2.6.32.48/arch/avr32/include/asm/kmap_types.h 2011-11-08 19:02:43.000000000 -0500
731+++ linux-2.6.32.48/arch/avr32/include/asm/kmap_types.h 2011-11-15 19:59:42.000000000 -0500
732@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
733 D(11) KM_IRQ1,
734 D(12) KM_SOFTIRQ0,
735 D(13) KM_SOFTIRQ1,
736-D(14) KM_TYPE_NR
737+D(14) KM_CLEARPAGE,
738+D(15) KM_TYPE_NR
739 };
740
741 #undef D
742diff -urNp linux-2.6.32.48/arch/avr32/mach-at32ap/pm.c linux-2.6.32.48/arch/avr32/mach-at32ap/pm.c
743--- linux-2.6.32.48/arch/avr32/mach-at32ap/pm.c 2011-11-08 19:02:43.000000000 -0500
744+++ linux-2.6.32.48/arch/avr32/mach-at32ap/pm.c 2011-11-15 19:59:42.000000000 -0500
745@@ -176,7 +176,7 @@ out:
746 return 0;
747 }
748
749-static struct platform_suspend_ops avr32_pm_ops = {
750+static const struct platform_suspend_ops avr32_pm_ops = {
751 .valid = avr32_pm_valid_state,
752 .enter = avr32_pm_enter,
753 };
754diff -urNp linux-2.6.32.48/arch/avr32/mm/fault.c linux-2.6.32.48/arch/avr32/mm/fault.c
755--- linux-2.6.32.48/arch/avr32/mm/fault.c 2011-11-08 19:02:43.000000000 -0500
756+++ linux-2.6.32.48/arch/avr32/mm/fault.c 2011-11-15 19:59:42.000000000 -0500
757@@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
758
759 int exception_trace = 1;
760
761+#ifdef CONFIG_PAX_PAGEEXEC
762+void pax_report_insns(void *pc, void *sp)
763+{
764+ unsigned long i;
765+
766+ printk(KERN_ERR "PAX: bytes at PC: ");
767+ for (i = 0; i < 20; i++) {
768+ unsigned char c;
769+ if (get_user(c, (unsigned char *)pc+i))
770+ printk(KERN_CONT "???????? ");
771+ else
772+ printk(KERN_CONT "%02x ", c);
773+ }
774+ printk("\n");
775+}
776+#endif
777+
778 /*
779 * This routine handles page faults. It determines the address and the
780 * problem, and then passes it off to one of the appropriate routines.
781@@ -157,6 +174,16 @@ bad_area:
782 up_read(&mm->mmap_sem);
783
784 if (user_mode(regs)) {
785+
786+#ifdef CONFIG_PAX_PAGEEXEC
787+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
788+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
789+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
790+ do_group_exit(SIGKILL);
791+ }
792+ }
793+#endif
794+
795 if (exception_trace && printk_ratelimit())
796 printk("%s%s[%d]: segfault at %08lx pc %08lx "
797 "sp %08lx ecr %lu\n",
798diff -urNp linux-2.6.32.48/arch/blackfin/kernel/kgdb.c linux-2.6.32.48/arch/blackfin/kernel/kgdb.c
799--- linux-2.6.32.48/arch/blackfin/kernel/kgdb.c 2011-11-08 19:02:43.000000000 -0500
800+++ linux-2.6.32.48/arch/blackfin/kernel/kgdb.c 2011-11-15 19:59:42.000000000 -0500
801@@ -428,7 +428,7 @@ int kgdb_arch_handle_exception(int vecto
802 return -1; /* this means that we do not want to exit from the handler */
803 }
804
805-struct kgdb_arch arch_kgdb_ops = {
806+const struct kgdb_arch arch_kgdb_ops = {
807 .gdb_bpt_instr = {0xa1},
808 #ifdef CONFIG_SMP
809 .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP,
810diff -urNp linux-2.6.32.48/arch/blackfin/mach-common/pm.c linux-2.6.32.48/arch/blackfin/mach-common/pm.c
811--- linux-2.6.32.48/arch/blackfin/mach-common/pm.c 2011-11-08 19:02:43.000000000 -0500
812+++ linux-2.6.32.48/arch/blackfin/mach-common/pm.c 2011-11-15 19:59:42.000000000 -0500
813@@ -255,7 +255,7 @@ static int bfin_pm_enter(suspend_state_t
814 return 0;
815 }
816
817-struct platform_suspend_ops bfin_pm_ops = {
818+const struct platform_suspend_ops bfin_pm_ops = {
819 .enter = bfin_pm_enter,
820 .valid = bfin_pm_valid,
821 };
822diff -urNp linux-2.6.32.48/arch/frv/include/asm/kmap_types.h linux-2.6.32.48/arch/frv/include/asm/kmap_types.h
823--- linux-2.6.32.48/arch/frv/include/asm/kmap_types.h 2011-11-08 19:02:43.000000000 -0500
824+++ linux-2.6.32.48/arch/frv/include/asm/kmap_types.h 2011-11-15 19:59:42.000000000 -0500
825@@ -23,6 +23,7 @@ enum km_type {
826 KM_IRQ1,
827 KM_SOFTIRQ0,
828 KM_SOFTIRQ1,
829+ KM_CLEARPAGE,
830 KM_TYPE_NR
831 };
832
833diff -urNp linux-2.6.32.48/arch/frv/mm/elf-fdpic.c linux-2.6.32.48/arch/frv/mm/elf-fdpic.c
834--- linux-2.6.32.48/arch/frv/mm/elf-fdpic.c 2011-11-08 19:02:43.000000000 -0500
835+++ linux-2.6.32.48/arch/frv/mm/elf-fdpic.c 2011-11-15 19:59:42.000000000 -0500
836@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
837 if (addr) {
838 addr = PAGE_ALIGN(addr);
839 vma = find_vma(current->mm, addr);
840- if (TASK_SIZE - len >= addr &&
841- (!vma || addr + len <= vma->vm_start))
842+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
843 goto success;
844 }
845
846@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
847 for (; vma; vma = vma->vm_next) {
848 if (addr > limit)
849 break;
850- if (addr + len <= vma->vm_start)
851+ if (check_heap_stack_gap(vma, addr, len))
852 goto success;
853 addr = vma->vm_end;
854 }
855@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
856 for (; vma; vma = vma->vm_next) {
857 if (addr > limit)
858 break;
859- if (addr + len <= vma->vm_start)
860+ if (check_heap_stack_gap(vma, addr, len))
861 goto success;
862 addr = vma->vm_end;
863 }
864diff -urNp linux-2.6.32.48/arch/ia64/hp/common/hwsw_iommu.c linux-2.6.32.48/arch/ia64/hp/common/hwsw_iommu.c
865--- linux-2.6.32.48/arch/ia64/hp/common/hwsw_iommu.c 2011-11-08 19:02:43.000000000 -0500
866+++ linux-2.6.32.48/arch/ia64/hp/common/hwsw_iommu.c 2011-11-15 19:59:42.000000000 -0500
867@@ -17,7 +17,7 @@
868 #include <linux/swiotlb.h>
869 #include <asm/machvec.h>
870
871-extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
872+extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
873
874 /* swiotlb declarations & definitions: */
875 extern int swiotlb_late_init_with_default_size (size_t size);
876@@ -33,7 +33,7 @@ static inline int use_swiotlb(struct dev
877 !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
878 }
879
880-struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
881+const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
882 {
883 if (use_swiotlb(dev))
884 return &swiotlb_dma_ops;
885diff -urNp linux-2.6.32.48/arch/ia64/hp/common/sba_iommu.c linux-2.6.32.48/arch/ia64/hp/common/sba_iommu.c
886--- linux-2.6.32.48/arch/ia64/hp/common/sba_iommu.c 2011-11-08 19:02:43.000000000 -0500
887+++ linux-2.6.32.48/arch/ia64/hp/common/sba_iommu.c 2011-11-15 19:59:42.000000000 -0500
888@@ -2097,7 +2097,7 @@ static struct acpi_driver acpi_sba_ioc_d
889 },
890 };
891
892-extern struct dma_map_ops swiotlb_dma_ops;
893+extern const struct dma_map_ops swiotlb_dma_ops;
894
895 static int __init
896 sba_init(void)
897@@ -2211,7 +2211,7 @@ sba_page_override(char *str)
898
899 __setup("sbapagesize=",sba_page_override);
900
901-struct dma_map_ops sba_dma_ops = {
902+const struct dma_map_ops sba_dma_ops = {
903 .alloc_coherent = sba_alloc_coherent,
904 .free_coherent = sba_free_coherent,
905 .map_page = sba_map_page,
906diff -urNp linux-2.6.32.48/arch/ia64/ia32/binfmt_elf32.c linux-2.6.32.48/arch/ia64/ia32/binfmt_elf32.c
907--- linux-2.6.32.48/arch/ia64/ia32/binfmt_elf32.c 2011-11-08 19:02:43.000000000 -0500
908+++ linux-2.6.32.48/arch/ia64/ia32/binfmt_elf32.c 2011-11-15 19:59:42.000000000 -0500
909@@ -45,6 +45,13 @@ randomize_stack_top(unsigned long stack_
910
911 #define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
912
913+#ifdef CONFIG_PAX_ASLR
914+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
915+
916+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
917+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
918+#endif
919+
920 /* Ugly but avoids duplication */
921 #include "../../../fs/binfmt_elf.c"
922
923diff -urNp linux-2.6.32.48/arch/ia64/ia32/ia32priv.h linux-2.6.32.48/arch/ia64/ia32/ia32priv.h
924--- linux-2.6.32.48/arch/ia64/ia32/ia32priv.h 2011-11-08 19:02:43.000000000 -0500
925+++ linux-2.6.32.48/arch/ia64/ia32/ia32priv.h 2011-11-15 19:59:42.000000000 -0500
926@@ -296,7 +296,14 @@ typedef struct compat_siginfo {
927 #define ELF_DATA ELFDATA2LSB
928 #define ELF_ARCH EM_386
929
930-#define IA32_STACK_TOP IA32_PAGE_OFFSET
931+#ifdef CONFIG_PAX_RANDUSTACK
932+#define __IA32_DELTA_STACK (current->mm->delta_stack)
933+#else
934+#define __IA32_DELTA_STACK 0UL
935+#endif
936+
937+#define IA32_STACK_TOP (IA32_PAGE_OFFSET - __IA32_DELTA_STACK)
938+
939 #define IA32_GATE_OFFSET IA32_PAGE_OFFSET
940 #define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE
941
942diff -urNp linux-2.6.32.48/arch/ia64/include/asm/dma-mapping.h linux-2.6.32.48/arch/ia64/include/asm/dma-mapping.h
943--- linux-2.6.32.48/arch/ia64/include/asm/dma-mapping.h 2011-11-08 19:02:43.000000000 -0500
944+++ linux-2.6.32.48/arch/ia64/include/asm/dma-mapping.h 2011-11-15 19:59:42.000000000 -0500
945@@ -12,7 +12,7 @@
946
947 #define ARCH_HAS_DMA_GET_REQUIRED_MASK
948
949-extern struct dma_map_ops *dma_ops;
950+extern const struct dma_map_ops *dma_ops;
951 extern struct ia64_machine_vector ia64_mv;
952 extern void set_iommu_machvec(void);
953
954@@ -24,7 +24,7 @@ extern void machvec_dma_sync_sg(struct d
955 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
956 dma_addr_t *daddr, gfp_t gfp)
957 {
958- struct dma_map_ops *ops = platform_dma_get_ops(dev);
959+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
960 void *caddr;
961
962 caddr = ops->alloc_coherent(dev, size, daddr, gfp);
963@@ -35,7 +35,7 @@ static inline void *dma_alloc_coherent(s
964 static inline void dma_free_coherent(struct device *dev, size_t size,
965 void *caddr, dma_addr_t daddr)
966 {
967- struct dma_map_ops *ops = platform_dma_get_ops(dev);
968+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
969 debug_dma_free_coherent(dev, size, caddr, daddr);
970 ops->free_coherent(dev, size, caddr, daddr);
971 }
972@@ -49,13 +49,13 @@ static inline void dma_free_coherent(str
973
974 static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
975 {
976- struct dma_map_ops *ops = platform_dma_get_ops(dev);
977+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
978 return ops->mapping_error(dev, daddr);
979 }
980
981 static inline int dma_supported(struct device *dev, u64 mask)
982 {
983- struct dma_map_ops *ops = platform_dma_get_ops(dev);
984+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
985 return ops->dma_supported(dev, mask);
986 }
987
988diff -urNp linux-2.6.32.48/arch/ia64/include/asm/elf.h linux-2.6.32.48/arch/ia64/include/asm/elf.h
989--- linux-2.6.32.48/arch/ia64/include/asm/elf.h 2011-11-08 19:02:43.000000000 -0500
990+++ linux-2.6.32.48/arch/ia64/include/asm/elf.h 2011-11-15 19:59:42.000000000 -0500
991@@ -43,6 +43,13 @@
992 */
993 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
994
995+#ifdef CONFIG_PAX_ASLR
996+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
997+
998+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
999+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1000+#endif
1001+
1002 #define PT_IA_64_UNWIND 0x70000001
1003
1004 /* IA-64 relocations: */
1005diff -urNp linux-2.6.32.48/arch/ia64/include/asm/machvec.h linux-2.6.32.48/arch/ia64/include/asm/machvec.h
1006--- linux-2.6.32.48/arch/ia64/include/asm/machvec.h 2011-11-08 19:02:43.000000000 -0500
1007+++ linux-2.6.32.48/arch/ia64/include/asm/machvec.h 2011-11-15 19:59:42.000000000 -0500
1008@@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event
1009 /* DMA-mapping interface: */
1010 typedef void ia64_mv_dma_init (void);
1011 typedef u64 ia64_mv_dma_get_required_mask (struct device *);
1012-typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1013+typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1014
1015 /*
1016 * WARNING: The legacy I/O space is _architected_. Platforms are
1017@@ -251,7 +251,7 @@ extern void machvec_init_from_cmdline(co
1018 # endif /* CONFIG_IA64_GENERIC */
1019
1020 extern void swiotlb_dma_init(void);
1021-extern struct dma_map_ops *dma_get_ops(struct device *);
1022+extern const struct dma_map_ops *dma_get_ops(struct device *);
1023
1024 /*
1025 * Define default versions so we can extend machvec for new platforms without having
1026diff -urNp linux-2.6.32.48/arch/ia64/include/asm/pgtable.h linux-2.6.32.48/arch/ia64/include/asm/pgtable.h
1027--- linux-2.6.32.48/arch/ia64/include/asm/pgtable.h 2011-11-08 19:02:43.000000000 -0500
1028+++ linux-2.6.32.48/arch/ia64/include/asm/pgtable.h 2011-11-15 19:59:42.000000000 -0500
1029@@ -12,7 +12,7 @@
1030 * David Mosberger-Tang <davidm@hpl.hp.com>
1031 */
1032
1033-
1034+#include <linux/const.h>
1035 #include <asm/mman.h>
1036 #include <asm/page.h>
1037 #include <asm/processor.h>
1038@@ -143,6 +143,17 @@
1039 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1040 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1041 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
1042+
1043+#ifdef CONFIG_PAX_PAGEEXEC
1044+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
1045+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1046+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1047+#else
1048+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1049+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1050+# define PAGE_COPY_NOEXEC PAGE_COPY
1051+#endif
1052+
1053 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
1054 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
1055 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
1056diff -urNp linux-2.6.32.48/arch/ia64/include/asm/spinlock.h linux-2.6.32.48/arch/ia64/include/asm/spinlock.h
1057--- linux-2.6.32.48/arch/ia64/include/asm/spinlock.h 2011-11-08 19:02:43.000000000 -0500
1058+++ linux-2.6.32.48/arch/ia64/include/asm/spinlock.h 2011-11-15 19:59:42.000000000 -0500
1059@@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
1060 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
1061
1062 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
1063- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
1064+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
1065 }
1066
1067 static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
1068diff -urNp linux-2.6.32.48/arch/ia64/include/asm/uaccess.h linux-2.6.32.48/arch/ia64/include/asm/uaccess.h
1069--- linux-2.6.32.48/arch/ia64/include/asm/uaccess.h 2011-11-08 19:02:43.000000000 -0500
1070+++ linux-2.6.32.48/arch/ia64/include/asm/uaccess.h 2011-11-15 19:59:42.000000000 -0500
1071@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
1072 const void *__cu_from = (from); \
1073 long __cu_len = (n); \
1074 \
1075- if (__access_ok(__cu_to, __cu_len, get_fs())) \
1076+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
1077 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
1078 __cu_len; \
1079 })
1080@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
1081 long __cu_len = (n); \
1082 \
1083 __chk_user_ptr(__cu_from); \
1084- if (__access_ok(__cu_from, __cu_len, get_fs())) \
1085+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
1086 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
1087 __cu_len; \
1088 })
1089diff -urNp linux-2.6.32.48/arch/ia64/kernel/dma-mapping.c linux-2.6.32.48/arch/ia64/kernel/dma-mapping.c
1090--- linux-2.6.32.48/arch/ia64/kernel/dma-mapping.c 2011-11-08 19:02:43.000000000 -0500
1091+++ linux-2.6.32.48/arch/ia64/kernel/dma-mapping.c 2011-11-15 19:59:42.000000000 -0500
1092@@ -3,7 +3,7 @@
1093 /* Set this to 1 if there is a HW IOMMU in the system */
1094 int iommu_detected __read_mostly;
1095
1096-struct dma_map_ops *dma_ops;
1097+const struct dma_map_ops *dma_ops;
1098 EXPORT_SYMBOL(dma_ops);
1099
1100 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
1101@@ -16,7 +16,7 @@ static int __init dma_init(void)
1102 }
1103 fs_initcall(dma_init);
1104
1105-struct dma_map_ops *dma_get_ops(struct device *dev)
1106+const struct dma_map_ops *dma_get_ops(struct device *dev)
1107 {
1108 return dma_ops;
1109 }
1110diff -urNp linux-2.6.32.48/arch/ia64/kernel/module.c linux-2.6.32.48/arch/ia64/kernel/module.c
1111--- linux-2.6.32.48/arch/ia64/kernel/module.c 2011-11-08 19:02:43.000000000 -0500
1112+++ linux-2.6.32.48/arch/ia64/kernel/module.c 2011-11-15 19:59:42.000000000 -0500
1113@@ -315,8 +315,7 @@ module_alloc (unsigned long size)
1114 void
1115 module_free (struct module *mod, void *module_region)
1116 {
1117- if (mod && mod->arch.init_unw_table &&
1118- module_region == mod->module_init) {
1119+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1120 unw_remove_unwind_table(mod->arch.init_unw_table);
1121 mod->arch.init_unw_table = NULL;
1122 }
1123@@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
1124 }
1125
1126 static inline int
1127+in_init_rx (const struct module *mod, uint64_t addr)
1128+{
1129+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1130+}
1131+
1132+static inline int
1133+in_init_rw (const struct module *mod, uint64_t addr)
1134+{
1135+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1136+}
1137+
1138+static inline int
1139 in_init (const struct module *mod, uint64_t addr)
1140 {
1141- return addr - (uint64_t) mod->module_init < mod->init_size;
1142+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1143+}
1144+
1145+static inline int
1146+in_core_rx (const struct module *mod, uint64_t addr)
1147+{
1148+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1149+}
1150+
1151+static inline int
1152+in_core_rw (const struct module *mod, uint64_t addr)
1153+{
1154+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1155 }
1156
1157 static inline int
1158 in_core (const struct module *mod, uint64_t addr)
1159 {
1160- return addr - (uint64_t) mod->module_core < mod->core_size;
1161+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1162 }
1163
1164 static inline int
1165@@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
1166 break;
1167
1168 case RV_BDREL:
1169- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1170+ if (in_init_rx(mod, val))
1171+ val -= (uint64_t) mod->module_init_rx;
1172+ else if (in_init_rw(mod, val))
1173+ val -= (uint64_t) mod->module_init_rw;
1174+ else if (in_core_rx(mod, val))
1175+ val -= (uint64_t) mod->module_core_rx;
1176+ else if (in_core_rw(mod, val))
1177+ val -= (uint64_t) mod->module_core_rw;
1178 break;
1179
1180 case RV_LTV:
1181@@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
1182 * addresses have been selected...
1183 */
1184 uint64_t gp;
1185- if (mod->core_size > MAX_LTOFF)
1186+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1187 /*
1188 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1189 * at the end of the module.
1190 */
1191- gp = mod->core_size - MAX_LTOFF / 2;
1192+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1193 else
1194- gp = mod->core_size / 2;
1195- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1196+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1197+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1198 mod->arch.gp = gp;
1199 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1200 }
1201diff -urNp linux-2.6.32.48/arch/ia64/kernel/pci-dma.c linux-2.6.32.48/arch/ia64/kernel/pci-dma.c
1202--- linux-2.6.32.48/arch/ia64/kernel/pci-dma.c 2011-11-08 19:02:43.000000000 -0500
1203+++ linux-2.6.32.48/arch/ia64/kernel/pci-dma.c 2011-11-15 19:59:42.000000000 -0500
1204@@ -43,7 +43,7 @@ struct device fallback_dev = {
1205 .dma_mask = &fallback_dev.coherent_dma_mask,
1206 };
1207
1208-extern struct dma_map_ops intel_dma_ops;
1209+extern const struct dma_map_ops intel_dma_ops;
1210
1211 static int __init pci_iommu_init(void)
1212 {
1213@@ -96,15 +96,34 @@ int iommu_dma_supported(struct device *d
1214 }
1215 EXPORT_SYMBOL(iommu_dma_supported);
1216
1217+extern void *intel_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags);
1218+extern void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
1219+extern int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1220+extern void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1221+extern dma_addr_t intel_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1222+extern void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1223+extern int intel_mapping_error(struct device *dev, dma_addr_t dma_addr);
1224+
1225+static const struct dma_map_ops intel_iommu_dma_ops = {
1226+ /* from drivers/pci/intel-iommu.c:intel_dma_ops */
1227+ .alloc_coherent = intel_alloc_coherent,
1228+ .free_coherent = intel_free_coherent,
1229+ .map_sg = intel_map_sg,
1230+ .unmap_sg = intel_unmap_sg,
1231+ .map_page = intel_map_page,
1232+ .unmap_page = intel_unmap_page,
1233+ .mapping_error = intel_mapping_error,
1234+
1235+ .sync_single_for_cpu = machvec_dma_sync_single,
1236+ .sync_sg_for_cpu = machvec_dma_sync_sg,
1237+ .sync_single_for_device = machvec_dma_sync_single,
1238+ .sync_sg_for_device = machvec_dma_sync_sg,
1239+ .dma_supported = iommu_dma_supported,
1240+};
1241+
1242 void __init pci_iommu_alloc(void)
1243 {
1244- dma_ops = &intel_dma_ops;
1245-
1246- dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
1247- dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
1248- dma_ops->sync_single_for_device = machvec_dma_sync_single;
1249- dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
1250- dma_ops->dma_supported = iommu_dma_supported;
1251+ dma_ops = &intel_iommu_dma_ops;
1252
1253 /*
1254 * The order of these functions is important for
1255diff -urNp linux-2.6.32.48/arch/ia64/kernel/pci-swiotlb.c linux-2.6.32.48/arch/ia64/kernel/pci-swiotlb.c
1256--- linux-2.6.32.48/arch/ia64/kernel/pci-swiotlb.c 2011-11-08 19:02:43.000000000 -0500
1257+++ linux-2.6.32.48/arch/ia64/kernel/pci-swiotlb.c 2011-11-15 19:59:42.000000000 -0500
1258@@ -21,7 +21,7 @@ static void *ia64_swiotlb_alloc_coherent
1259 return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
1260 }
1261
1262-struct dma_map_ops swiotlb_dma_ops = {
1263+const struct dma_map_ops swiotlb_dma_ops = {
1264 .alloc_coherent = ia64_swiotlb_alloc_coherent,
1265 .free_coherent = swiotlb_free_coherent,
1266 .map_page = swiotlb_map_page,
1267diff -urNp linux-2.6.32.48/arch/ia64/kernel/sys_ia64.c linux-2.6.32.48/arch/ia64/kernel/sys_ia64.c
1268--- linux-2.6.32.48/arch/ia64/kernel/sys_ia64.c 2011-11-08 19:02:43.000000000 -0500
1269+++ linux-2.6.32.48/arch/ia64/kernel/sys_ia64.c 2011-11-15 19:59:42.000000000 -0500
1270@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
1271 if (REGION_NUMBER(addr) == RGN_HPAGE)
1272 addr = 0;
1273 #endif
1274+
1275+#ifdef CONFIG_PAX_RANDMMAP
1276+ if (mm->pax_flags & MF_PAX_RANDMMAP)
1277+ addr = mm->free_area_cache;
1278+ else
1279+#endif
1280+
1281 if (!addr)
1282 addr = mm->free_area_cache;
1283
1284@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
1285 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1286 /* At this point: (!vma || addr < vma->vm_end). */
1287 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1288- if (start_addr != TASK_UNMAPPED_BASE) {
1289+ if (start_addr != mm->mmap_base) {
1290 /* Start a new search --- just in case we missed some holes. */
1291- addr = TASK_UNMAPPED_BASE;
1292+ addr = mm->mmap_base;
1293 goto full_search;
1294 }
1295 return -ENOMEM;
1296 }
1297- if (!vma || addr + len <= vma->vm_start) {
1298+ if (check_heap_stack_gap(vma, addr, len)) {
1299 /* Remember the address where we stopped this search: */
1300 mm->free_area_cache = addr + len;
1301 return addr;
1302diff -urNp linux-2.6.32.48/arch/ia64/kernel/topology.c linux-2.6.32.48/arch/ia64/kernel/topology.c
1303--- linux-2.6.32.48/arch/ia64/kernel/topology.c 2011-11-08 19:02:43.000000000 -0500
1304+++ linux-2.6.32.48/arch/ia64/kernel/topology.c 2011-11-15 19:59:42.000000000 -0500
1305@@ -282,7 +282,7 @@ static ssize_t cache_show(struct kobject
1306 return ret;
1307 }
1308
1309-static struct sysfs_ops cache_sysfs_ops = {
1310+static const struct sysfs_ops cache_sysfs_ops = {
1311 .show = cache_show
1312 };
1313
1314diff -urNp linux-2.6.32.48/arch/ia64/kernel/vmlinux.lds.S linux-2.6.32.48/arch/ia64/kernel/vmlinux.lds.S
1315--- linux-2.6.32.48/arch/ia64/kernel/vmlinux.lds.S 2011-11-08 19:02:43.000000000 -0500
1316+++ linux-2.6.32.48/arch/ia64/kernel/vmlinux.lds.S 2011-11-15 19:59:42.000000000 -0500
1317@@ -190,7 +190,7 @@ SECTIONS
1318 /* Per-cpu data: */
1319 . = ALIGN(PERCPU_PAGE_SIZE);
1320 PERCPU_VADDR(PERCPU_ADDR, :percpu)
1321- __phys_per_cpu_start = __per_cpu_load;
1322+ __phys_per_cpu_start = per_cpu_load;
1323 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
1324 * into percpu page size
1325 */
1326diff -urNp linux-2.6.32.48/arch/ia64/mm/fault.c linux-2.6.32.48/arch/ia64/mm/fault.c
1327--- linux-2.6.32.48/arch/ia64/mm/fault.c 2011-11-08 19:02:43.000000000 -0500
1328+++ linux-2.6.32.48/arch/ia64/mm/fault.c 2011-11-15 19:59:42.000000000 -0500
1329@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned
1330 return pte_present(pte);
1331 }
1332
1333+#ifdef CONFIG_PAX_PAGEEXEC
1334+void pax_report_insns(void *pc, void *sp)
1335+{
1336+ unsigned long i;
1337+
1338+ printk(KERN_ERR "PAX: bytes at PC: ");
1339+ for (i = 0; i < 8; i++) {
1340+ unsigned int c;
1341+ if (get_user(c, (unsigned int *)pc+i))
1342+ printk(KERN_CONT "???????? ");
1343+ else
1344+ printk(KERN_CONT "%08x ", c);
1345+ }
1346+ printk("\n");
1347+}
1348+#endif
1349+
1350 void __kprobes
1351 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1352 {
1353@@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long addres
1354 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1355 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1356
1357- if ((vma->vm_flags & mask) != mask)
1358+ if ((vma->vm_flags & mask) != mask) {
1359+
1360+#ifdef CONFIG_PAX_PAGEEXEC
1361+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1362+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1363+ goto bad_area;
1364+
1365+ up_read(&mm->mmap_sem);
1366+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1367+ do_group_exit(SIGKILL);
1368+ }
1369+#endif
1370+
1371 goto bad_area;
1372
1373+ }
1374+
1375 survive:
1376 /*
1377 * If for any reason at all we couldn't handle the fault, make
1378diff -urNp linux-2.6.32.48/arch/ia64/mm/hugetlbpage.c linux-2.6.32.48/arch/ia64/mm/hugetlbpage.c
1379--- linux-2.6.32.48/arch/ia64/mm/hugetlbpage.c 2011-11-08 19:02:43.000000000 -0500
1380+++ linux-2.6.32.48/arch/ia64/mm/hugetlbpage.c 2011-11-15 19:59:42.000000000 -0500
1381@@ -172,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(
1382 /* At this point: (!vmm || addr < vmm->vm_end). */
1383 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1384 return -ENOMEM;
1385- if (!vmm || (addr + len) <= vmm->vm_start)
1386+ if (check_heap_stack_gap(vmm, addr, len))
1387 return addr;
1388 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1389 }
1390diff -urNp linux-2.6.32.48/arch/ia64/mm/init.c linux-2.6.32.48/arch/ia64/mm/init.c
1391--- linux-2.6.32.48/arch/ia64/mm/init.c 2011-11-08 19:02:43.000000000 -0500
1392+++ linux-2.6.32.48/arch/ia64/mm/init.c 2011-11-15 19:59:42.000000000 -0500
1393@@ -122,6 +122,19 @@ ia64_init_addr_space (void)
1394 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1395 vma->vm_end = vma->vm_start + PAGE_SIZE;
1396 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1397+
1398+#ifdef CONFIG_PAX_PAGEEXEC
1399+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1400+ vma->vm_flags &= ~VM_EXEC;
1401+
1402+#ifdef CONFIG_PAX_MPROTECT
1403+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
1404+ vma->vm_flags &= ~VM_MAYEXEC;
1405+#endif
1406+
1407+ }
1408+#endif
1409+
1410 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1411 down_write(&current->mm->mmap_sem);
1412 if (insert_vm_struct(current->mm, vma)) {
1413diff -urNp linux-2.6.32.48/arch/ia64/sn/pci/pci_dma.c linux-2.6.32.48/arch/ia64/sn/pci/pci_dma.c
1414--- linux-2.6.32.48/arch/ia64/sn/pci/pci_dma.c 2011-11-08 19:02:43.000000000 -0500
1415+++ linux-2.6.32.48/arch/ia64/sn/pci/pci_dma.c 2011-11-15 19:59:42.000000000 -0500
1416@@ -464,7 +464,7 @@ int sn_pci_legacy_write(struct pci_bus *
1417 return ret;
1418 }
1419
1420-static struct dma_map_ops sn_dma_ops = {
1421+static const struct dma_map_ops sn_dma_ops = {
1422 .alloc_coherent = sn_dma_alloc_coherent,
1423 .free_coherent = sn_dma_free_coherent,
1424 .map_page = sn_dma_map_page,
1425diff -urNp linux-2.6.32.48/arch/m32r/lib/usercopy.c linux-2.6.32.48/arch/m32r/lib/usercopy.c
1426--- linux-2.6.32.48/arch/m32r/lib/usercopy.c 2011-11-08 19:02:43.000000000 -0500
1427+++ linux-2.6.32.48/arch/m32r/lib/usercopy.c 2011-11-15 19:59:42.000000000 -0500
1428@@ -14,6 +14,9 @@
1429 unsigned long
1430 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1431 {
1432+ if ((long)n < 0)
1433+ return n;
1434+
1435 prefetch(from);
1436 if (access_ok(VERIFY_WRITE, to, n))
1437 __copy_user(to,from,n);
1438@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1439 unsigned long
1440 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1441 {
1442+ if ((long)n < 0)
1443+ return n;
1444+
1445 prefetchw(to);
1446 if (access_ok(VERIFY_READ, from, n))
1447 __copy_user_zeroing(to,from,n);
1448diff -urNp linux-2.6.32.48/arch/mips/alchemy/devboards/pm.c linux-2.6.32.48/arch/mips/alchemy/devboards/pm.c
1449--- linux-2.6.32.48/arch/mips/alchemy/devboards/pm.c 2011-11-08 19:02:43.000000000 -0500
1450+++ linux-2.6.32.48/arch/mips/alchemy/devboards/pm.c 2011-11-15 19:59:42.000000000 -0500
1451@@ -78,7 +78,7 @@ static void db1x_pm_end(void)
1452
1453 }
1454
1455-static struct platform_suspend_ops db1x_pm_ops = {
1456+static const struct platform_suspend_ops db1x_pm_ops = {
1457 .valid = suspend_valid_only_mem,
1458 .begin = db1x_pm_begin,
1459 .enter = db1x_pm_enter,
1460diff -urNp linux-2.6.32.48/arch/mips/include/asm/elf.h linux-2.6.32.48/arch/mips/include/asm/elf.h
1461--- linux-2.6.32.48/arch/mips/include/asm/elf.h 2011-11-08 19:02:43.000000000 -0500
1462+++ linux-2.6.32.48/arch/mips/include/asm/elf.h 2011-11-15 19:59:42.000000000 -0500
1463@@ -368,4 +368,11 @@ extern int dump_task_fpu(struct task_str
1464 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1465 #endif
1466
1467+#ifdef CONFIG_PAX_ASLR
1468+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1469+
1470+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1471+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1472+#endif
1473+
1474 #endif /* _ASM_ELF_H */
1475diff -urNp linux-2.6.32.48/arch/mips/include/asm/page.h linux-2.6.32.48/arch/mips/include/asm/page.h
1476--- linux-2.6.32.48/arch/mips/include/asm/page.h 2011-11-08 19:02:43.000000000 -0500
1477+++ linux-2.6.32.48/arch/mips/include/asm/page.h 2011-11-15 19:59:42.000000000 -0500
1478@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1479 #ifdef CONFIG_CPU_MIPS32
1480 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1481 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1482- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1483+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1484 #else
1485 typedef struct { unsigned long long pte; } pte_t;
1486 #define pte_val(x) ((x).pte)
1487diff -urNp linux-2.6.32.48/arch/mips/include/asm/reboot.h linux-2.6.32.48/arch/mips/include/asm/reboot.h
1488--- linux-2.6.32.48/arch/mips/include/asm/reboot.h 2011-11-08 19:02:43.000000000 -0500
1489+++ linux-2.6.32.48/arch/mips/include/asm/reboot.h 2011-11-15 19:59:42.000000000 -0500
1490@@ -9,7 +9,7 @@
1491 #ifndef _ASM_REBOOT_H
1492 #define _ASM_REBOOT_H
1493
1494-extern void (*_machine_restart)(char *command);
1495-extern void (*_machine_halt)(void);
1496+extern void (*__noreturn _machine_restart)(char *command);
1497+extern void (*__noreturn _machine_halt)(void);
1498
1499 #endif /* _ASM_REBOOT_H */
1500diff -urNp linux-2.6.32.48/arch/mips/include/asm/system.h linux-2.6.32.48/arch/mips/include/asm/system.h
1501--- linux-2.6.32.48/arch/mips/include/asm/system.h 2011-11-08 19:02:43.000000000 -0500
1502+++ linux-2.6.32.48/arch/mips/include/asm/system.h 2011-11-15 19:59:42.000000000 -0500
1503@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1504 */
1505 #define __ARCH_WANT_UNLOCKED_CTXSW
1506
1507-extern unsigned long arch_align_stack(unsigned long sp);
1508+#define arch_align_stack(x) ((x) & ~0xfUL)
1509
1510 #endif /* _ASM_SYSTEM_H */
1511diff -urNp linux-2.6.32.48/arch/mips/kernel/binfmt_elfn32.c linux-2.6.32.48/arch/mips/kernel/binfmt_elfn32.c
1512--- linux-2.6.32.48/arch/mips/kernel/binfmt_elfn32.c 2011-11-08 19:02:43.000000000 -0500
1513+++ linux-2.6.32.48/arch/mips/kernel/binfmt_elfn32.c 2011-11-15 19:59:42.000000000 -0500
1514@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1515 #undef ELF_ET_DYN_BASE
1516 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1517
1518+#ifdef CONFIG_PAX_ASLR
1519+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1520+
1521+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1522+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1523+#endif
1524+
1525 #include <asm/processor.h>
1526 #include <linux/module.h>
1527 #include <linux/elfcore.h>
1528diff -urNp linux-2.6.32.48/arch/mips/kernel/binfmt_elfo32.c linux-2.6.32.48/arch/mips/kernel/binfmt_elfo32.c
1529--- linux-2.6.32.48/arch/mips/kernel/binfmt_elfo32.c 2011-11-08 19:02:43.000000000 -0500
1530+++ linux-2.6.32.48/arch/mips/kernel/binfmt_elfo32.c 2011-11-15 19:59:42.000000000 -0500
1531@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1532 #undef ELF_ET_DYN_BASE
1533 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1534
1535+#ifdef CONFIG_PAX_ASLR
1536+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1537+
1538+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1539+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1540+#endif
1541+
1542 #include <asm/processor.h>
1543
1544 /*
1545diff -urNp linux-2.6.32.48/arch/mips/kernel/kgdb.c linux-2.6.32.48/arch/mips/kernel/kgdb.c
1546--- linux-2.6.32.48/arch/mips/kernel/kgdb.c 2011-11-08 19:02:43.000000000 -0500
1547+++ linux-2.6.32.48/arch/mips/kernel/kgdb.c 2011-11-15 19:59:42.000000000 -0500
1548@@ -245,6 +245,7 @@ int kgdb_arch_handle_exception(int vecto
1549 return -1;
1550 }
1551
1552+/* cannot be const */
1553 struct kgdb_arch arch_kgdb_ops;
1554
1555 /*
1556diff -urNp linux-2.6.32.48/arch/mips/kernel/process.c linux-2.6.32.48/arch/mips/kernel/process.c
1557--- linux-2.6.32.48/arch/mips/kernel/process.c 2011-11-08 19:02:43.000000000 -0500
1558+++ linux-2.6.32.48/arch/mips/kernel/process.c 2011-11-15 19:59:42.000000000 -0500
1559@@ -470,15 +470,3 @@ unsigned long get_wchan(struct task_stru
1560 out:
1561 return pc;
1562 }
1563-
1564-/*
1565- * Don't forget that the stack pointer must be aligned on a 8 bytes
1566- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1567- */
1568-unsigned long arch_align_stack(unsigned long sp)
1569-{
1570- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1571- sp -= get_random_int() & ~PAGE_MASK;
1572-
1573- return sp & ALMASK;
1574-}
1575diff -urNp linux-2.6.32.48/arch/mips/kernel/reset.c linux-2.6.32.48/arch/mips/kernel/reset.c
1576--- linux-2.6.32.48/arch/mips/kernel/reset.c 2011-11-08 19:02:43.000000000 -0500
1577+++ linux-2.6.32.48/arch/mips/kernel/reset.c 2011-11-15 19:59:42.000000000 -0500
1578@@ -19,8 +19,8 @@
1579 * So handle all using function pointers to machine specific
1580 * functions.
1581 */
1582-void (*_machine_restart)(char *command);
1583-void (*_machine_halt)(void);
1584+void (*__noreturn _machine_restart)(char *command);
1585+void (*__noreturn _machine_halt)(void);
1586 void (*pm_power_off)(void);
1587
1588 EXPORT_SYMBOL(pm_power_off);
1589@@ -29,16 +29,19 @@ void machine_restart(char *command)
1590 {
1591 if (_machine_restart)
1592 _machine_restart(command);
1593+ BUG();
1594 }
1595
1596 void machine_halt(void)
1597 {
1598 if (_machine_halt)
1599 _machine_halt();
1600+ BUG();
1601 }
1602
1603 void machine_power_off(void)
1604 {
1605 if (pm_power_off)
1606 pm_power_off();
1607+ BUG();
1608 }
1609diff -urNp linux-2.6.32.48/arch/mips/kernel/syscall.c linux-2.6.32.48/arch/mips/kernel/syscall.c
1610--- linux-2.6.32.48/arch/mips/kernel/syscall.c 2011-11-08 19:02:43.000000000 -0500
1611+++ linux-2.6.32.48/arch/mips/kernel/syscall.c 2011-11-15 19:59:42.000000000 -0500
1612@@ -102,17 +102,21 @@ unsigned long arch_get_unmapped_area(str
1613 do_color_align = 0;
1614 if (filp || (flags & MAP_SHARED))
1615 do_color_align = 1;
1616+
1617+#ifdef CONFIG_PAX_RANDMMAP
1618+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1619+#endif
1620+
1621 if (addr) {
1622 if (do_color_align)
1623 addr = COLOUR_ALIGN(addr, pgoff);
1624 else
1625 addr = PAGE_ALIGN(addr);
1626 vmm = find_vma(current->mm, addr);
1627- if (task_size - len >= addr &&
1628- (!vmm || addr + len <= vmm->vm_start))
1629+ if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
1630 return addr;
1631 }
1632- addr = TASK_UNMAPPED_BASE;
1633+ addr = current->mm->mmap_base;
1634 if (do_color_align)
1635 addr = COLOUR_ALIGN(addr, pgoff);
1636 else
1637@@ -122,7 +126,7 @@ unsigned long arch_get_unmapped_area(str
1638 /* At this point: (!vmm || addr < vmm->vm_end). */
1639 if (task_size - len < addr)
1640 return -ENOMEM;
1641- if (!vmm || addr + len <= vmm->vm_start)
1642+ if (check_heap_stack_gap(vmm, addr, len))
1643 return addr;
1644 addr = vmm->vm_end;
1645 if (do_color_align)
1646diff -urNp linux-2.6.32.48/arch/mips/Makefile linux-2.6.32.48/arch/mips/Makefile
1647--- linux-2.6.32.48/arch/mips/Makefile 2011-11-08 19:02:43.000000000 -0500
1648+++ linux-2.6.32.48/arch/mips/Makefile 2011-11-15 19:59:42.000000000 -0500
1649@@ -51,6 +51,8 @@ endif
1650 cflags-y := -ffunction-sections
1651 cflags-y += $(call cc-option, -mno-check-zero-division)
1652
1653+cflags-y += -Wno-sign-compare -Wno-extra
1654+
1655 ifdef CONFIG_32BIT
1656 ld-emul = $(32bit-emul)
1657 vmlinux-32 = vmlinux
1658diff -urNp linux-2.6.32.48/arch/mips/mm/fault.c linux-2.6.32.48/arch/mips/mm/fault.c
1659--- linux-2.6.32.48/arch/mips/mm/fault.c 2011-11-08 19:02:43.000000000 -0500
1660+++ linux-2.6.32.48/arch/mips/mm/fault.c 2011-11-15 19:59:42.000000000 -0500
1661@@ -26,6 +26,23 @@
1662 #include <asm/ptrace.h>
1663 #include <asm/highmem.h> /* For VMALLOC_END */
1664
1665+#ifdef CONFIG_PAX_PAGEEXEC
1666+void pax_report_insns(void *pc, void *sp)
1667+{
1668+ unsigned long i;
1669+
1670+ printk(KERN_ERR "PAX: bytes at PC: ");
1671+ for (i = 0; i < 5; i++) {
1672+ unsigned int c;
1673+ if (get_user(c, (unsigned int *)pc+i))
1674+ printk(KERN_CONT "???????? ");
1675+ else
1676+ printk(KERN_CONT "%08x ", c);
1677+ }
1678+ printk("\n");
1679+}
1680+#endif
1681+
1682 /*
1683 * This routine handles page faults. It determines the address,
1684 * and the problem, and then passes it off to one of the appropriate
1685diff -urNp linux-2.6.32.48/arch/parisc/include/asm/elf.h linux-2.6.32.48/arch/parisc/include/asm/elf.h
1686--- linux-2.6.32.48/arch/parisc/include/asm/elf.h 2011-11-08 19:02:43.000000000 -0500
1687+++ linux-2.6.32.48/arch/parisc/include/asm/elf.h 2011-11-15 19:59:42.000000000 -0500
1688@@ -343,6 +343,13 @@ struct pt_regs; /* forward declaration..
1689
1690 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1691
1692+#ifdef CONFIG_PAX_ASLR
1693+#define PAX_ELF_ET_DYN_BASE 0x10000UL
1694+
1695+#define PAX_DELTA_MMAP_LEN 16
1696+#define PAX_DELTA_STACK_LEN 16
1697+#endif
1698+
1699 /* This yields a mask that user programs can use to figure out what
1700 instruction set this CPU supports. This could be done in user space,
1701 but it's not easy, and we've already done it here. */
1702diff -urNp linux-2.6.32.48/arch/parisc/include/asm/pgtable.h linux-2.6.32.48/arch/parisc/include/asm/pgtable.h
1703--- linux-2.6.32.48/arch/parisc/include/asm/pgtable.h 2011-11-08 19:02:43.000000000 -0500
1704+++ linux-2.6.32.48/arch/parisc/include/asm/pgtable.h 2011-11-15 19:59:42.000000000 -0500
1705@@ -207,6 +207,17 @@
1706 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1707 #define PAGE_COPY PAGE_EXECREAD
1708 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1709+
1710+#ifdef CONFIG_PAX_PAGEEXEC
1711+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1712+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1713+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1714+#else
1715+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1716+# define PAGE_COPY_NOEXEC PAGE_COPY
1717+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1718+#endif
1719+
1720 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1721 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
1722 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
1723diff -urNp linux-2.6.32.48/arch/parisc/kernel/module.c linux-2.6.32.48/arch/parisc/kernel/module.c
1724--- linux-2.6.32.48/arch/parisc/kernel/module.c 2011-11-08 19:02:43.000000000 -0500
1725+++ linux-2.6.32.48/arch/parisc/kernel/module.c 2011-11-15 19:59:42.000000000 -0500
1726@@ -95,16 +95,38 @@
1727
1728 /* three functions to determine where in the module core
1729 * or init pieces the location is */
1730+static inline int in_init_rx(struct module *me, void *loc)
1731+{
1732+ return (loc >= me->module_init_rx &&
1733+ loc < (me->module_init_rx + me->init_size_rx));
1734+}
1735+
1736+static inline int in_init_rw(struct module *me, void *loc)
1737+{
1738+ return (loc >= me->module_init_rw &&
1739+ loc < (me->module_init_rw + me->init_size_rw));
1740+}
1741+
1742 static inline int in_init(struct module *me, void *loc)
1743 {
1744- return (loc >= me->module_init &&
1745- loc <= (me->module_init + me->init_size));
1746+ return in_init_rx(me, loc) || in_init_rw(me, loc);
1747+}
1748+
1749+static inline int in_core_rx(struct module *me, void *loc)
1750+{
1751+ return (loc >= me->module_core_rx &&
1752+ loc < (me->module_core_rx + me->core_size_rx));
1753+}
1754+
1755+static inline int in_core_rw(struct module *me, void *loc)
1756+{
1757+ return (loc >= me->module_core_rw &&
1758+ loc < (me->module_core_rw + me->core_size_rw));
1759 }
1760
1761 static inline int in_core(struct module *me, void *loc)
1762 {
1763- return (loc >= me->module_core &&
1764- loc <= (me->module_core + me->core_size));
1765+ return in_core_rx(me, loc) || in_core_rw(me, loc);
1766 }
1767
1768 static inline int in_local(struct module *me, void *loc)
1769@@ -364,13 +386,13 @@ int module_frob_arch_sections(CONST Elf_
1770 }
1771
1772 /* align things a bit */
1773- me->core_size = ALIGN(me->core_size, 16);
1774- me->arch.got_offset = me->core_size;
1775- me->core_size += gots * sizeof(struct got_entry);
1776-
1777- me->core_size = ALIGN(me->core_size, 16);
1778- me->arch.fdesc_offset = me->core_size;
1779- me->core_size += fdescs * sizeof(Elf_Fdesc);
1780+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1781+ me->arch.got_offset = me->core_size_rw;
1782+ me->core_size_rw += gots * sizeof(struct got_entry);
1783+
1784+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1785+ me->arch.fdesc_offset = me->core_size_rw;
1786+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1787
1788 me->arch.got_max = gots;
1789 me->arch.fdesc_max = fdescs;
1790@@ -388,7 +410,7 @@ static Elf64_Word get_got(struct module
1791
1792 BUG_ON(value == 0);
1793
1794- got = me->module_core + me->arch.got_offset;
1795+ got = me->module_core_rw + me->arch.got_offset;
1796 for (i = 0; got[i].addr; i++)
1797 if (got[i].addr == value)
1798 goto out;
1799@@ -406,7 +428,7 @@ static Elf64_Word get_got(struct module
1800 #ifdef CONFIG_64BIT
1801 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1802 {
1803- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1804+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1805
1806 if (!value) {
1807 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1808@@ -424,7 +446,7 @@ static Elf_Addr get_fdesc(struct module
1809
1810 /* Create new one */
1811 fdesc->addr = value;
1812- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1813+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1814 return (Elf_Addr)fdesc;
1815 }
1816 #endif /* CONFIG_64BIT */
1817@@ -848,7 +870,7 @@ register_unwind_table(struct module *me,
1818
1819 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1820 end = table + sechdrs[me->arch.unwind_section].sh_size;
1821- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1822+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1823
1824 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1825 me->arch.unwind_section, table, end, gp);
1826diff -urNp linux-2.6.32.48/arch/parisc/kernel/sys_parisc.c linux-2.6.32.48/arch/parisc/kernel/sys_parisc.c
1827--- linux-2.6.32.48/arch/parisc/kernel/sys_parisc.c 2011-11-08 19:02:43.000000000 -0500
1828+++ linux-2.6.32.48/arch/parisc/kernel/sys_parisc.c 2011-11-15 19:59:42.000000000 -0500
1829@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1830 /* At this point: (!vma || addr < vma->vm_end). */
1831 if (TASK_SIZE - len < addr)
1832 return -ENOMEM;
1833- if (!vma || addr + len <= vma->vm_start)
1834+ if (check_heap_stack_gap(vma, addr, len))
1835 return addr;
1836 addr = vma->vm_end;
1837 }
1838@@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1839 /* At this point: (!vma || addr < vma->vm_end). */
1840 if (TASK_SIZE - len < addr)
1841 return -ENOMEM;
1842- if (!vma || addr + len <= vma->vm_start)
1843+ if (check_heap_stack_gap(vma, addr, len))
1844 return addr;
1845 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1846 if (addr < vma->vm_end) /* handle wraparound */
1847@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1848 if (flags & MAP_FIXED)
1849 return addr;
1850 if (!addr)
1851- addr = TASK_UNMAPPED_BASE;
1852+ addr = current->mm->mmap_base;
1853
1854 if (filp) {
1855 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1856diff -urNp linux-2.6.32.48/arch/parisc/kernel/traps.c linux-2.6.32.48/arch/parisc/kernel/traps.c
1857--- linux-2.6.32.48/arch/parisc/kernel/traps.c 2011-11-08 19:02:43.000000000 -0500
1858+++ linux-2.6.32.48/arch/parisc/kernel/traps.c 2011-11-15 19:59:42.000000000 -0500
1859@@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1860
1861 down_read(&current->mm->mmap_sem);
1862 vma = find_vma(current->mm,regs->iaoq[0]);
1863- if (vma && (regs->iaoq[0] >= vma->vm_start)
1864- && (vma->vm_flags & VM_EXEC)) {
1865-
1866+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1867 fault_address = regs->iaoq[0];
1868 fault_space = regs->iasq[0];
1869
1870diff -urNp linux-2.6.32.48/arch/parisc/mm/fault.c linux-2.6.32.48/arch/parisc/mm/fault.c
1871--- linux-2.6.32.48/arch/parisc/mm/fault.c 2011-11-08 19:02:43.000000000 -0500
1872+++ linux-2.6.32.48/arch/parisc/mm/fault.c 2011-11-15 19:59:42.000000000 -0500
1873@@ -15,6 +15,7 @@
1874 #include <linux/sched.h>
1875 #include <linux/interrupt.h>
1876 #include <linux/module.h>
1877+#include <linux/unistd.h>
1878
1879 #include <asm/uaccess.h>
1880 #include <asm/traps.h>
1881@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1882 static unsigned long
1883 parisc_acctyp(unsigned long code, unsigned int inst)
1884 {
1885- if (code == 6 || code == 16)
1886+ if (code == 6 || code == 7 || code == 16)
1887 return VM_EXEC;
1888
1889 switch (inst & 0xf0000000) {
1890@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1891 }
1892 #endif
1893
1894+#ifdef CONFIG_PAX_PAGEEXEC
1895+/*
1896+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1897+ *
1898+ * returns 1 when task should be killed
1899+ * 2 when rt_sigreturn trampoline was detected
1900+ * 3 when unpatched PLT trampoline was detected
1901+ */
1902+static int pax_handle_fetch_fault(struct pt_regs *regs)
1903+{
1904+
1905+#ifdef CONFIG_PAX_EMUPLT
1906+ int err;
1907+
1908+ do { /* PaX: unpatched PLT emulation */
1909+ unsigned int bl, depwi;
1910+
1911+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1912+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1913+
1914+ if (err)
1915+ break;
1916+
1917+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1918+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1919+
1920+ err = get_user(ldw, (unsigned int *)addr);
1921+ err |= get_user(bv, (unsigned int *)(addr+4));
1922+ err |= get_user(ldw2, (unsigned int *)(addr+8));
1923+
1924+ if (err)
1925+ break;
1926+
1927+ if (ldw == 0x0E801096U &&
1928+ bv == 0xEAC0C000U &&
1929+ ldw2 == 0x0E881095U)
1930+ {
1931+ unsigned int resolver, map;
1932+
1933+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1934+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1935+ if (err)
1936+ break;
1937+
1938+ regs->gr[20] = instruction_pointer(regs)+8;
1939+ regs->gr[21] = map;
1940+ regs->gr[22] = resolver;
1941+ regs->iaoq[0] = resolver | 3UL;
1942+ regs->iaoq[1] = regs->iaoq[0] + 4;
1943+ return 3;
1944+ }
1945+ }
1946+ } while (0);
1947+#endif
1948+
1949+#ifdef CONFIG_PAX_EMUTRAMP
1950+
1951+#ifndef CONFIG_PAX_EMUSIGRT
1952+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1953+ return 1;
1954+#endif
1955+
1956+ do { /* PaX: rt_sigreturn emulation */
1957+ unsigned int ldi1, ldi2, bel, nop;
1958+
1959+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1960+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1961+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1962+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1963+
1964+ if (err)
1965+ break;
1966+
1967+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1968+ ldi2 == 0x3414015AU &&
1969+ bel == 0xE4008200U &&
1970+ nop == 0x08000240U)
1971+ {
1972+ regs->gr[25] = (ldi1 & 2) >> 1;
1973+ regs->gr[20] = __NR_rt_sigreturn;
1974+ regs->gr[31] = regs->iaoq[1] + 16;
1975+ regs->sr[0] = regs->iasq[1];
1976+ regs->iaoq[0] = 0x100UL;
1977+ regs->iaoq[1] = regs->iaoq[0] + 4;
1978+ regs->iasq[0] = regs->sr[2];
1979+ regs->iasq[1] = regs->sr[2];
1980+ return 2;
1981+ }
1982+ } while (0);
1983+#endif
1984+
1985+ return 1;
1986+}
1987+
1988+void pax_report_insns(void *pc, void *sp)
1989+{
1990+ unsigned long i;
1991+
1992+ printk(KERN_ERR "PAX: bytes at PC: ");
1993+ for (i = 0; i < 5; i++) {
1994+ unsigned int c;
1995+ if (get_user(c, (unsigned int *)pc+i))
1996+ printk(KERN_CONT "???????? ");
1997+ else
1998+ printk(KERN_CONT "%08x ", c);
1999+ }
2000+ printk("\n");
2001+}
2002+#endif
2003+
2004 int fixup_exception(struct pt_regs *regs)
2005 {
2006 const struct exception_table_entry *fix;
2007@@ -192,8 +303,33 @@ good_area:
2008
2009 acc_type = parisc_acctyp(code,regs->iir);
2010
2011- if ((vma->vm_flags & acc_type) != acc_type)
2012+ if ((vma->vm_flags & acc_type) != acc_type) {
2013+
2014+#ifdef CONFIG_PAX_PAGEEXEC
2015+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
2016+ (address & ~3UL) == instruction_pointer(regs))
2017+ {
2018+ up_read(&mm->mmap_sem);
2019+ switch (pax_handle_fetch_fault(regs)) {
2020+
2021+#ifdef CONFIG_PAX_EMUPLT
2022+ case 3:
2023+ return;
2024+#endif
2025+
2026+#ifdef CONFIG_PAX_EMUTRAMP
2027+ case 2:
2028+ return;
2029+#endif
2030+
2031+ }
2032+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
2033+ do_group_exit(SIGKILL);
2034+ }
2035+#endif
2036+
2037 goto bad_area;
2038+ }
2039
2040 /*
2041 * If for any reason at all we couldn't handle the fault, make
2042diff -urNp linux-2.6.32.48/arch/powerpc/include/asm/device.h linux-2.6.32.48/arch/powerpc/include/asm/device.h
2043--- linux-2.6.32.48/arch/powerpc/include/asm/device.h 2011-11-08 19:02:43.000000000 -0500
2044+++ linux-2.6.32.48/arch/powerpc/include/asm/device.h 2011-11-15 19:59:42.000000000 -0500
2045@@ -14,7 +14,7 @@ struct dev_archdata {
2046 struct device_node *of_node;
2047
2048 /* DMA operations on that device */
2049- struct dma_map_ops *dma_ops;
2050+ const struct dma_map_ops *dma_ops;
2051
2052 /*
2053 * When an iommu is in use, dma_data is used as a ptr to the base of the
2054diff -urNp linux-2.6.32.48/arch/powerpc/include/asm/dma-mapping.h linux-2.6.32.48/arch/powerpc/include/asm/dma-mapping.h
2055--- linux-2.6.32.48/arch/powerpc/include/asm/dma-mapping.h 2011-11-08 19:02:43.000000000 -0500
2056+++ linux-2.6.32.48/arch/powerpc/include/asm/dma-mapping.h 2011-11-15 19:59:42.000000000 -0500
2057@@ -69,9 +69,9 @@ static inline unsigned long device_to_ma
2058 #ifdef CONFIG_PPC64
2059 extern struct dma_map_ops dma_iommu_ops;
2060 #endif
2061-extern struct dma_map_ops dma_direct_ops;
2062+extern const struct dma_map_ops dma_direct_ops;
2063
2064-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
2065+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
2066 {
2067 /* We don't handle the NULL dev case for ISA for now. We could
2068 * do it via an out of line call but it is not needed for now. The
2069@@ -84,7 +84,7 @@ static inline struct dma_map_ops *get_dm
2070 return dev->archdata.dma_ops;
2071 }
2072
2073-static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
2074+static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
2075 {
2076 dev->archdata.dma_ops = ops;
2077 }
2078@@ -118,7 +118,7 @@ static inline void set_dma_offset(struct
2079
2080 static inline int dma_supported(struct device *dev, u64 mask)
2081 {
2082- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2083+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2084
2085 if (unlikely(dma_ops == NULL))
2086 return 0;
2087@@ -132,7 +132,7 @@ static inline int dma_supported(struct d
2088
2089 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
2090 {
2091- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2092+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2093
2094 if (unlikely(dma_ops == NULL))
2095 return -EIO;
2096@@ -147,7 +147,7 @@ static inline int dma_set_mask(struct de
2097 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2098 dma_addr_t *dma_handle, gfp_t flag)
2099 {
2100- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2101+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2102 void *cpu_addr;
2103
2104 BUG_ON(!dma_ops);
2105@@ -162,7 +162,7 @@ static inline void *dma_alloc_coherent(s
2106 static inline void dma_free_coherent(struct device *dev, size_t size,
2107 void *cpu_addr, dma_addr_t dma_handle)
2108 {
2109- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2110+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2111
2112 BUG_ON(!dma_ops);
2113
2114@@ -173,7 +173,7 @@ static inline void dma_free_coherent(str
2115
2116 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
2117 {
2118- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2119+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2120
2121 if (dma_ops->mapping_error)
2122 return dma_ops->mapping_error(dev, dma_addr);
2123diff -urNp linux-2.6.32.48/arch/powerpc/include/asm/elf.h linux-2.6.32.48/arch/powerpc/include/asm/elf.h
2124--- linux-2.6.32.48/arch/powerpc/include/asm/elf.h 2011-11-08 19:02:43.000000000 -0500
2125+++ linux-2.6.32.48/arch/powerpc/include/asm/elf.h 2011-11-15 19:59:42.000000000 -0500
2126@@ -179,8 +179,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
2127 the loader. We need to make sure that it is out of the way of the program
2128 that it will "exec", and that there is sufficient room for the brk. */
2129
2130-extern unsigned long randomize_et_dyn(unsigned long base);
2131-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
2132+#define ELF_ET_DYN_BASE (0x20000000)
2133+
2134+#ifdef CONFIG_PAX_ASLR
2135+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
2136+
2137+#ifdef __powerpc64__
2138+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2139+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2140+#else
2141+#define PAX_DELTA_MMAP_LEN 15
2142+#define PAX_DELTA_STACK_LEN 15
2143+#endif
2144+#endif
2145
2146 /*
2147 * Our registers are always unsigned longs, whether we're a 32 bit
2148@@ -275,9 +286,6 @@ extern int arch_setup_additional_pages(s
2149 (0x7ff >> (PAGE_SHIFT - 12)) : \
2150 (0x3ffff >> (PAGE_SHIFT - 12)))
2151
2152-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2153-#define arch_randomize_brk arch_randomize_brk
2154-
2155 #endif /* __KERNEL__ */
2156
2157 /*
2158diff -urNp linux-2.6.32.48/arch/powerpc/include/asm/iommu.h linux-2.6.32.48/arch/powerpc/include/asm/iommu.h
2159--- linux-2.6.32.48/arch/powerpc/include/asm/iommu.h 2011-11-08 19:02:43.000000000 -0500
2160+++ linux-2.6.32.48/arch/powerpc/include/asm/iommu.h 2011-11-15 19:59:42.000000000 -0500
2161@@ -116,6 +116,9 @@ extern void iommu_init_early_iSeries(voi
2162 extern void iommu_init_early_dart(void);
2163 extern void iommu_init_early_pasemi(void);
2164
2165+/* dma-iommu.c */
2166+extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
2167+
2168 #ifdef CONFIG_PCI
2169 extern void pci_iommu_init(void);
2170 extern void pci_direct_iommu_init(void);
2171diff -urNp linux-2.6.32.48/arch/powerpc/include/asm/kmap_types.h linux-2.6.32.48/arch/powerpc/include/asm/kmap_types.h
2172--- linux-2.6.32.48/arch/powerpc/include/asm/kmap_types.h 2011-11-08 19:02:43.000000000 -0500
2173+++ linux-2.6.32.48/arch/powerpc/include/asm/kmap_types.h 2011-11-15 19:59:42.000000000 -0500
2174@@ -26,6 +26,7 @@ enum km_type {
2175 KM_SOFTIRQ1,
2176 KM_PPC_SYNC_PAGE,
2177 KM_PPC_SYNC_ICACHE,
2178+ KM_CLEARPAGE,
2179 KM_TYPE_NR
2180 };
2181
2182diff -urNp linux-2.6.32.48/arch/powerpc/include/asm/page_64.h linux-2.6.32.48/arch/powerpc/include/asm/page_64.h
2183--- linux-2.6.32.48/arch/powerpc/include/asm/page_64.h 2011-11-08 19:02:43.000000000 -0500
2184+++ linux-2.6.32.48/arch/powerpc/include/asm/page_64.h 2011-11-15 19:59:42.000000000 -0500
2185@@ -180,15 +180,18 @@ do { \
2186 * stack by default, so in the absense of a PT_GNU_STACK program header
2187 * we turn execute permission off.
2188 */
2189-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2190- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2191+#define VM_STACK_DEFAULT_FLAGS32 \
2192+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2193+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2194
2195 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2196 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2197
2198+#ifndef CONFIG_PAX_PAGEEXEC
2199 #define VM_STACK_DEFAULT_FLAGS \
2200 (test_thread_flag(TIF_32BIT) ? \
2201 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2202+#endif
2203
2204 #include <asm-generic/getorder.h>
2205
2206diff -urNp linux-2.6.32.48/arch/powerpc/include/asm/page.h linux-2.6.32.48/arch/powerpc/include/asm/page.h
2207--- linux-2.6.32.48/arch/powerpc/include/asm/page.h 2011-11-08 19:02:43.000000000 -0500
2208+++ linux-2.6.32.48/arch/powerpc/include/asm/page.h 2011-11-15 19:59:42.000000000 -0500
2209@@ -116,8 +116,9 @@ extern phys_addr_t kernstart_addr;
2210 * and needs to be executable. This means the whole heap ends
2211 * up being executable.
2212 */
2213-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2214- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2215+#define VM_DATA_DEFAULT_FLAGS32 \
2216+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2217+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2218
2219 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2220 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2221@@ -145,6 +146,9 @@ extern phys_addr_t kernstart_addr;
2222 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2223 #endif
2224
2225+#define ktla_ktva(addr) (addr)
2226+#define ktva_ktla(addr) (addr)
2227+
2228 #ifndef __ASSEMBLY__
2229
2230 #undef STRICT_MM_TYPECHECKS
2231diff -urNp linux-2.6.32.48/arch/powerpc/include/asm/pci.h linux-2.6.32.48/arch/powerpc/include/asm/pci.h
2232--- linux-2.6.32.48/arch/powerpc/include/asm/pci.h 2011-11-08 19:02:43.000000000 -0500
2233+++ linux-2.6.32.48/arch/powerpc/include/asm/pci.h 2011-11-15 19:59:42.000000000 -0500
2234@@ -65,8 +65,8 @@ static inline int pci_get_legacy_ide_irq
2235 }
2236
2237 #ifdef CONFIG_PCI
2238-extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
2239-extern struct dma_map_ops *get_pci_dma_ops(void);
2240+extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
2241+extern const struct dma_map_ops *get_pci_dma_ops(void);
2242 #else /* CONFIG_PCI */
2243 #define set_pci_dma_ops(d)
2244 #define get_pci_dma_ops() NULL
2245diff -urNp linux-2.6.32.48/arch/powerpc/include/asm/pgtable.h linux-2.6.32.48/arch/powerpc/include/asm/pgtable.h
2246--- linux-2.6.32.48/arch/powerpc/include/asm/pgtable.h 2011-11-08 19:02:43.000000000 -0500
2247+++ linux-2.6.32.48/arch/powerpc/include/asm/pgtable.h 2011-11-15 19:59:42.000000000 -0500
2248@@ -2,6 +2,7 @@
2249 #define _ASM_POWERPC_PGTABLE_H
2250 #ifdef __KERNEL__
2251
2252+#include <linux/const.h>
2253 #ifndef __ASSEMBLY__
2254 #include <asm/processor.h> /* For TASK_SIZE */
2255 #include <asm/mmu.h>
2256diff -urNp linux-2.6.32.48/arch/powerpc/include/asm/pte-hash32.h linux-2.6.32.48/arch/powerpc/include/asm/pte-hash32.h
2257--- linux-2.6.32.48/arch/powerpc/include/asm/pte-hash32.h 2011-11-08 19:02:43.000000000 -0500
2258+++ linux-2.6.32.48/arch/powerpc/include/asm/pte-hash32.h 2011-11-15 19:59:42.000000000 -0500
2259@@ -21,6 +21,7 @@
2260 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2261 #define _PAGE_USER 0x004 /* usermode access allowed */
2262 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2263+#define _PAGE_EXEC _PAGE_GUARDED
2264 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2265 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2266 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2267diff -urNp linux-2.6.32.48/arch/powerpc/include/asm/ptrace.h linux-2.6.32.48/arch/powerpc/include/asm/ptrace.h
2268--- linux-2.6.32.48/arch/powerpc/include/asm/ptrace.h 2011-11-08 19:02:43.000000000 -0500
2269+++ linux-2.6.32.48/arch/powerpc/include/asm/ptrace.h 2011-11-15 19:59:42.000000000 -0500
2270@@ -103,7 +103,7 @@ extern unsigned long profile_pc(struct p
2271 } while(0)
2272
2273 struct task_struct;
2274-extern unsigned long ptrace_get_reg(struct task_struct *task, int regno);
2275+extern unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno);
2276 extern int ptrace_put_reg(struct task_struct *task, int regno,
2277 unsigned long data);
2278
2279diff -urNp linux-2.6.32.48/arch/powerpc/include/asm/reg.h linux-2.6.32.48/arch/powerpc/include/asm/reg.h
2280--- linux-2.6.32.48/arch/powerpc/include/asm/reg.h 2011-11-08 19:02:43.000000000 -0500
2281+++ linux-2.6.32.48/arch/powerpc/include/asm/reg.h 2011-11-15 19:59:42.000000000 -0500
2282@@ -191,6 +191,7 @@
2283 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2284 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2285 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2286+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2287 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2288 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2289 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2290diff -urNp linux-2.6.32.48/arch/powerpc/include/asm/swiotlb.h linux-2.6.32.48/arch/powerpc/include/asm/swiotlb.h
2291--- linux-2.6.32.48/arch/powerpc/include/asm/swiotlb.h 2011-11-08 19:02:43.000000000 -0500
2292+++ linux-2.6.32.48/arch/powerpc/include/asm/swiotlb.h 2011-11-15 19:59:42.000000000 -0500
2293@@ -13,7 +13,7 @@
2294
2295 #include <linux/swiotlb.h>
2296
2297-extern struct dma_map_ops swiotlb_dma_ops;
2298+extern const struct dma_map_ops swiotlb_dma_ops;
2299
2300 static inline void dma_mark_clean(void *addr, size_t size) {}
2301
2302diff -urNp linux-2.6.32.48/arch/powerpc/include/asm/system.h linux-2.6.32.48/arch/powerpc/include/asm/system.h
2303--- linux-2.6.32.48/arch/powerpc/include/asm/system.h 2011-11-08 19:02:43.000000000 -0500
2304+++ linux-2.6.32.48/arch/powerpc/include/asm/system.h 2011-11-15 19:59:42.000000000 -0500
2305@@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsi
2306 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2307 #endif
2308
2309-extern unsigned long arch_align_stack(unsigned long sp);
2310+#define arch_align_stack(x) ((x) & ~0xfUL)
2311
2312 /* Used in very early kernel initialization. */
2313 extern unsigned long reloc_offset(void);
2314diff -urNp linux-2.6.32.48/arch/powerpc/include/asm/uaccess.h linux-2.6.32.48/arch/powerpc/include/asm/uaccess.h
2315--- linux-2.6.32.48/arch/powerpc/include/asm/uaccess.h 2011-11-08 19:02:43.000000000 -0500
2316+++ linux-2.6.32.48/arch/powerpc/include/asm/uaccess.h 2011-11-15 19:59:42.000000000 -0500
2317@@ -13,6 +13,8 @@
2318 #define VERIFY_READ 0
2319 #define VERIFY_WRITE 1
2320
2321+extern void check_object_size(const void *ptr, unsigned long n, bool to);
2322+
2323 /*
2324 * The fs value determines whether argument validity checking should be
2325 * performed or not. If get_fs() == USER_DS, checking is performed, with
2326@@ -327,52 +329,6 @@ do { \
2327 extern unsigned long __copy_tofrom_user(void __user *to,
2328 const void __user *from, unsigned long size);
2329
2330-#ifndef __powerpc64__
2331-
2332-static inline unsigned long copy_from_user(void *to,
2333- const void __user *from, unsigned long n)
2334-{
2335- unsigned long over;
2336-
2337- if (access_ok(VERIFY_READ, from, n))
2338- return __copy_tofrom_user((__force void __user *)to, from, n);
2339- if ((unsigned long)from < TASK_SIZE) {
2340- over = (unsigned long)from + n - TASK_SIZE;
2341- return __copy_tofrom_user((__force void __user *)to, from,
2342- n - over) + over;
2343- }
2344- return n;
2345-}
2346-
2347-static inline unsigned long copy_to_user(void __user *to,
2348- const void *from, unsigned long n)
2349-{
2350- unsigned long over;
2351-
2352- if (access_ok(VERIFY_WRITE, to, n))
2353- return __copy_tofrom_user(to, (__force void __user *)from, n);
2354- if ((unsigned long)to < TASK_SIZE) {
2355- over = (unsigned long)to + n - TASK_SIZE;
2356- return __copy_tofrom_user(to, (__force void __user *)from,
2357- n - over) + over;
2358- }
2359- return n;
2360-}
2361-
2362-#else /* __powerpc64__ */
2363-
2364-#define __copy_in_user(to, from, size) \
2365- __copy_tofrom_user((to), (from), (size))
2366-
2367-extern unsigned long copy_from_user(void *to, const void __user *from,
2368- unsigned long n);
2369-extern unsigned long copy_to_user(void __user *to, const void *from,
2370- unsigned long n);
2371-extern unsigned long copy_in_user(void __user *to, const void __user *from,
2372- unsigned long n);
2373-
2374-#endif /* __powerpc64__ */
2375-
2376 static inline unsigned long __copy_from_user_inatomic(void *to,
2377 const void __user *from, unsigned long n)
2378 {
2379@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
2380 if (ret == 0)
2381 return 0;
2382 }
2383+
2384+ if (!__builtin_constant_p(n))
2385+ check_object_size(to, n, false);
2386+
2387 return __copy_tofrom_user((__force void __user *)to, from, n);
2388 }
2389
2390@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
2391 if (ret == 0)
2392 return 0;
2393 }
2394+
2395+ if (!__builtin_constant_p(n))
2396+ check_object_size(from, n, true);
2397+
2398 return __copy_tofrom_user(to, (__force const void __user *)from, n);
2399 }
2400
2401@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
2402 return __copy_to_user_inatomic(to, from, size);
2403 }
2404
2405+#ifndef __powerpc64__
2406+
2407+static inline unsigned long __must_check copy_from_user(void *to,
2408+ const void __user *from, unsigned long n)
2409+{
2410+ unsigned long over;
2411+
2412+ if ((long)n < 0)
2413+ return n;
2414+
2415+ if (access_ok(VERIFY_READ, from, n)) {
2416+ if (!__builtin_constant_p(n))
2417+ check_object_size(to, n, false);
2418+ return __copy_tofrom_user((__force void __user *)to, from, n);
2419+ }
2420+ if ((unsigned long)from < TASK_SIZE) {
2421+ over = (unsigned long)from + n - TASK_SIZE;
2422+ if (!__builtin_constant_p(n - over))
2423+ check_object_size(to, n - over, false);
2424+ return __copy_tofrom_user((__force void __user *)to, from,
2425+ n - over) + over;
2426+ }
2427+ return n;
2428+}
2429+
2430+static inline unsigned long __must_check copy_to_user(void __user *to,
2431+ const void *from, unsigned long n)
2432+{
2433+ unsigned long over;
2434+
2435+ if ((long)n < 0)
2436+ return n;
2437+
2438+ if (access_ok(VERIFY_WRITE, to, n)) {
2439+ if (!__builtin_constant_p(n))
2440+ check_object_size(from, n, true);
2441+ return __copy_tofrom_user(to, (__force void __user *)from, n);
2442+ }
2443+ if ((unsigned long)to < TASK_SIZE) {
2444+ over = (unsigned long)to + n - TASK_SIZE;
2445+ if (!__builtin_constant_p(n))
2446+ check_object_size(from, n - over, true);
2447+ return __copy_tofrom_user(to, (__force void __user *)from,
2448+ n - over) + over;
2449+ }
2450+ return n;
2451+}
2452+
2453+#else /* __powerpc64__ */
2454+
2455+#define __copy_in_user(to, from, size) \
2456+ __copy_tofrom_user((to), (from), (size))
2457+
2458+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2459+{
2460+ if ((long)n < 0 || n > INT_MAX)
2461+ return n;
2462+
2463+ if (!__builtin_constant_p(n))
2464+ check_object_size(to, n, false);
2465+
2466+ if (likely(access_ok(VERIFY_READ, from, n)))
2467+ n = __copy_from_user(to, from, n);
2468+ else
2469+ memset(to, 0, n);
2470+ return n;
2471+}
2472+
2473+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2474+{
2475+ if ((long)n < 0 || n > INT_MAX)
2476+ return n;
2477+
2478+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
2479+ if (!__builtin_constant_p(n))
2480+ check_object_size(from, n, true);
2481+ n = __copy_to_user(to, from, n);
2482+ }
2483+ return n;
2484+}
2485+
2486+extern unsigned long copy_in_user(void __user *to, const void __user *from,
2487+ unsigned long n);
2488+
2489+#endif /* __powerpc64__ */
2490+
2491 extern unsigned long __clear_user(void __user *addr, unsigned long size);
2492
2493 static inline unsigned long clear_user(void __user *addr, unsigned long size)
2494diff -urNp linux-2.6.32.48/arch/powerpc/kernel/cacheinfo.c linux-2.6.32.48/arch/powerpc/kernel/cacheinfo.c
2495--- linux-2.6.32.48/arch/powerpc/kernel/cacheinfo.c 2011-11-08 19:02:43.000000000 -0500
2496+++ linux-2.6.32.48/arch/powerpc/kernel/cacheinfo.c 2011-11-15 19:59:42.000000000 -0500
2497@@ -642,7 +642,7 @@ static struct kobj_attribute *cache_inde
2498 &cache_assoc_attr,
2499 };
2500
2501-static struct sysfs_ops cache_index_ops = {
2502+static const struct sysfs_ops cache_index_ops = {
2503 .show = cache_index_show,
2504 };
2505
2506diff -urNp linux-2.6.32.48/arch/powerpc/kernel/dma.c linux-2.6.32.48/arch/powerpc/kernel/dma.c
2507--- linux-2.6.32.48/arch/powerpc/kernel/dma.c 2011-11-08 19:02:43.000000000 -0500
2508+++ linux-2.6.32.48/arch/powerpc/kernel/dma.c 2011-11-15 19:59:42.000000000 -0500
2509@@ -134,7 +134,7 @@ static inline void dma_direct_sync_singl
2510 }
2511 #endif
2512
2513-struct dma_map_ops dma_direct_ops = {
2514+const struct dma_map_ops dma_direct_ops = {
2515 .alloc_coherent = dma_direct_alloc_coherent,
2516 .free_coherent = dma_direct_free_coherent,
2517 .map_sg = dma_direct_map_sg,
2518diff -urNp linux-2.6.32.48/arch/powerpc/kernel/dma-iommu.c linux-2.6.32.48/arch/powerpc/kernel/dma-iommu.c
2519--- linux-2.6.32.48/arch/powerpc/kernel/dma-iommu.c 2011-11-08 19:02:43.000000000 -0500
2520+++ linux-2.6.32.48/arch/powerpc/kernel/dma-iommu.c 2011-11-15 19:59:42.000000000 -0500
2521@@ -70,7 +70,7 @@ static void dma_iommu_unmap_sg(struct de
2522 }
2523
2524 /* We support DMA to/from any memory page via the iommu */
2525-static int dma_iommu_dma_supported(struct device *dev, u64 mask)
2526+int dma_iommu_dma_supported(struct device *dev, u64 mask)
2527 {
2528 struct iommu_table *tbl = get_iommu_table_base(dev);
2529
2530diff -urNp linux-2.6.32.48/arch/powerpc/kernel/dma-swiotlb.c linux-2.6.32.48/arch/powerpc/kernel/dma-swiotlb.c
2531--- linux-2.6.32.48/arch/powerpc/kernel/dma-swiotlb.c 2011-11-08 19:02:43.000000000 -0500
2532+++ linux-2.6.32.48/arch/powerpc/kernel/dma-swiotlb.c 2011-11-15 19:59:42.000000000 -0500
2533@@ -31,7 +31,7 @@ unsigned int ppc_swiotlb_enable;
2534 * map_page, and unmap_page on highmem, use normal dma_ops
2535 * for everything else.
2536 */
2537-struct dma_map_ops swiotlb_dma_ops = {
2538+const struct dma_map_ops swiotlb_dma_ops = {
2539 .alloc_coherent = dma_direct_alloc_coherent,
2540 .free_coherent = dma_direct_free_coherent,
2541 .map_sg = swiotlb_map_sg_attrs,
2542diff -urNp linux-2.6.32.48/arch/powerpc/kernel/exceptions-64e.S linux-2.6.32.48/arch/powerpc/kernel/exceptions-64e.S
2543--- linux-2.6.32.48/arch/powerpc/kernel/exceptions-64e.S 2011-11-08 19:02:43.000000000 -0500
2544+++ linux-2.6.32.48/arch/powerpc/kernel/exceptions-64e.S 2011-11-15 19:59:42.000000000 -0500
2545@@ -455,6 +455,7 @@ storage_fault_common:
2546 std r14,_DAR(r1)
2547 std r15,_DSISR(r1)
2548 addi r3,r1,STACK_FRAME_OVERHEAD
2549+ bl .save_nvgprs
2550 mr r4,r14
2551 mr r5,r15
2552 ld r14,PACA_EXGEN+EX_R14(r13)
2553@@ -464,8 +465,7 @@ storage_fault_common:
2554 cmpdi r3,0
2555 bne- 1f
2556 b .ret_from_except_lite
2557-1: bl .save_nvgprs
2558- mr r5,r3
2559+1: mr r5,r3
2560 addi r3,r1,STACK_FRAME_OVERHEAD
2561 ld r4,_DAR(r1)
2562 bl .bad_page_fault
2563diff -urNp linux-2.6.32.48/arch/powerpc/kernel/exceptions-64s.S linux-2.6.32.48/arch/powerpc/kernel/exceptions-64s.S
2564--- linux-2.6.32.48/arch/powerpc/kernel/exceptions-64s.S 2011-11-08 19:02:43.000000000 -0500
2565+++ linux-2.6.32.48/arch/powerpc/kernel/exceptions-64s.S 2011-11-15 19:59:42.000000000 -0500
2566@@ -818,10 +818,10 @@ handle_page_fault:
2567 11: ld r4,_DAR(r1)
2568 ld r5,_DSISR(r1)
2569 addi r3,r1,STACK_FRAME_OVERHEAD
2570+ bl .save_nvgprs
2571 bl .do_page_fault
2572 cmpdi r3,0
2573 beq+ 13f
2574- bl .save_nvgprs
2575 mr r5,r3
2576 addi r3,r1,STACK_FRAME_OVERHEAD
2577 lwz r4,_DAR(r1)
2578diff -urNp linux-2.6.32.48/arch/powerpc/kernel/ibmebus.c linux-2.6.32.48/arch/powerpc/kernel/ibmebus.c
2579--- linux-2.6.32.48/arch/powerpc/kernel/ibmebus.c 2011-11-08 19:02:43.000000000 -0500
2580+++ linux-2.6.32.48/arch/powerpc/kernel/ibmebus.c 2011-11-15 19:59:42.000000000 -0500
2581@@ -127,7 +127,7 @@ static int ibmebus_dma_supported(struct
2582 return 1;
2583 }
2584
2585-static struct dma_map_ops ibmebus_dma_ops = {
2586+static const struct dma_map_ops ibmebus_dma_ops = {
2587 .alloc_coherent = ibmebus_alloc_coherent,
2588 .free_coherent = ibmebus_free_coherent,
2589 .map_sg = ibmebus_map_sg,
2590diff -urNp linux-2.6.32.48/arch/powerpc/kernel/kgdb.c linux-2.6.32.48/arch/powerpc/kernel/kgdb.c
2591--- linux-2.6.32.48/arch/powerpc/kernel/kgdb.c 2011-11-08 19:02:43.000000000 -0500
2592+++ linux-2.6.32.48/arch/powerpc/kernel/kgdb.c 2011-11-15 19:59:42.000000000 -0500
2593@@ -126,7 +126,7 @@ static int kgdb_handle_breakpoint(struct
2594 if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
2595 return 0;
2596
2597- if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
2598+ if (*(u32 *) (regs->nip) == *(const u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
2599 regs->nip += 4;
2600
2601 return 1;
2602@@ -353,7 +353,7 @@ int kgdb_arch_handle_exception(int vecto
2603 /*
2604 * Global data
2605 */
2606-struct kgdb_arch arch_kgdb_ops = {
2607+const struct kgdb_arch arch_kgdb_ops = {
2608 .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
2609 };
2610
2611diff -urNp linux-2.6.32.48/arch/powerpc/kernel/module_32.c linux-2.6.32.48/arch/powerpc/kernel/module_32.c
2612--- linux-2.6.32.48/arch/powerpc/kernel/module_32.c 2011-11-08 19:02:43.000000000 -0500
2613+++ linux-2.6.32.48/arch/powerpc/kernel/module_32.c 2011-11-15 19:59:42.000000000 -0500
2614@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
2615 me->arch.core_plt_section = i;
2616 }
2617 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2618- printk("Module doesn't contain .plt or .init.plt sections.\n");
2619+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2620 return -ENOEXEC;
2621 }
2622
2623@@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
2624
2625 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2626 /* Init, or core PLT? */
2627- if (location >= mod->module_core
2628- && location < mod->module_core + mod->core_size)
2629+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2630+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2631 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2632- else
2633+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2634+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2635 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2636+ else {
2637+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2638+ return ~0UL;
2639+ }
2640
2641 /* Find this entry, or if that fails, the next avail. entry */
2642 while (entry->jump[0]) {
2643diff -urNp linux-2.6.32.48/arch/powerpc/kernel/module.c linux-2.6.32.48/arch/powerpc/kernel/module.c
2644--- linux-2.6.32.48/arch/powerpc/kernel/module.c 2011-11-08 19:02:43.000000000 -0500
2645+++ linux-2.6.32.48/arch/powerpc/kernel/module.c 2011-11-15 19:59:42.000000000 -0500
2646@@ -31,11 +31,24 @@
2647
2648 LIST_HEAD(module_bug_list);
2649
2650+#ifdef CONFIG_PAX_KERNEXEC
2651 void *module_alloc(unsigned long size)
2652 {
2653 if (size == 0)
2654 return NULL;
2655
2656+ return vmalloc(size);
2657+}
2658+
2659+void *module_alloc_exec(unsigned long size)
2660+#else
2661+void *module_alloc(unsigned long size)
2662+#endif
2663+
2664+{
2665+ if (size == 0)
2666+ return NULL;
2667+
2668 return vmalloc_exec(size);
2669 }
2670
2671@@ -45,6 +58,13 @@ void module_free(struct module *mod, voi
2672 vfree(module_region);
2673 }
2674
2675+#ifdef CONFIG_PAX_KERNEXEC
2676+void module_free_exec(struct module *mod, void *module_region)
2677+{
2678+ module_free(mod, module_region);
2679+}
2680+#endif
2681+
2682 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
2683 const Elf_Shdr *sechdrs,
2684 const char *name)
2685diff -urNp linux-2.6.32.48/arch/powerpc/kernel/pci-common.c linux-2.6.32.48/arch/powerpc/kernel/pci-common.c
2686--- linux-2.6.32.48/arch/powerpc/kernel/pci-common.c 2011-11-08 19:02:43.000000000 -0500
2687+++ linux-2.6.32.48/arch/powerpc/kernel/pci-common.c 2011-11-15 19:59:42.000000000 -0500
2688@@ -50,14 +50,14 @@ resource_size_t isa_mem_base;
2689 unsigned int ppc_pci_flags = 0;
2690
2691
2692-static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
2693+static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
2694
2695-void set_pci_dma_ops(struct dma_map_ops *dma_ops)
2696+void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
2697 {
2698 pci_dma_ops = dma_ops;
2699 }
2700
2701-struct dma_map_ops *get_pci_dma_ops(void)
2702+const struct dma_map_ops *get_pci_dma_ops(void)
2703 {
2704 return pci_dma_ops;
2705 }
2706diff -urNp linux-2.6.32.48/arch/powerpc/kernel/process.c linux-2.6.32.48/arch/powerpc/kernel/process.c
2707--- linux-2.6.32.48/arch/powerpc/kernel/process.c 2011-11-08 19:02:43.000000000 -0500
2708+++ linux-2.6.32.48/arch/powerpc/kernel/process.c 2011-11-15 19:59:42.000000000 -0500
2709@@ -539,8 +539,8 @@ void show_regs(struct pt_regs * regs)
2710 * Lookup NIP late so we have the best change of getting the
2711 * above info out without failing
2712 */
2713- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2714- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2715+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2716+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2717 #endif
2718 show_stack(current, (unsigned long *) regs->gpr[1]);
2719 if (!user_mode(regs))
2720@@ -1034,10 +1034,10 @@ void show_stack(struct task_struct *tsk,
2721 newsp = stack[0];
2722 ip = stack[STACK_FRAME_LR_SAVE];
2723 if (!firstframe || ip != lr) {
2724- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2725+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2726 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2727 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2728- printk(" (%pS)",
2729+ printk(" (%pA)",
2730 (void *)current->ret_stack[curr_frame].ret);
2731 curr_frame--;
2732 }
2733@@ -1057,7 +1057,7 @@ void show_stack(struct task_struct *tsk,
2734 struct pt_regs *regs = (struct pt_regs *)
2735 (sp + STACK_FRAME_OVERHEAD);
2736 lr = regs->link;
2737- printk("--- Exception: %lx at %pS\n LR = %pS\n",
2738+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
2739 regs->trap, (void *)regs->nip, (void *)lr);
2740 firstframe = 1;
2741 }
2742@@ -1134,58 +1134,3 @@ void thread_info_cache_init(void)
2743 }
2744
2745 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2746-
2747-unsigned long arch_align_stack(unsigned long sp)
2748-{
2749- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2750- sp -= get_random_int() & ~PAGE_MASK;
2751- return sp & ~0xf;
2752-}
2753-
2754-static inline unsigned long brk_rnd(void)
2755-{
2756- unsigned long rnd = 0;
2757-
2758- /* 8MB for 32bit, 1GB for 64bit */
2759- if (is_32bit_task())
2760- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2761- else
2762- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2763-
2764- return rnd << PAGE_SHIFT;
2765-}
2766-
2767-unsigned long arch_randomize_brk(struct mm_struct *mm)
2768-{
2769- unsigned long base = mm->brk;
2770- unsigned long ret;
2771-
2772-#ifdef CONFIG_PPC_STD_MMU_64
2773- /*
2774- * If we are using 1TB segments and we are allowed to randomise
2775- * the heap, we can put it above 1TB so it is backed by a 1TB
2776- * segment. Otherwise the heap will be in the bottom 1TB
2777- * which always uses 256MB segments and this may result in a
2778- * performance penalty.
2779- */
2780- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2781- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2782-#endif
2783-
2784- ret = PAGE_ALIGN(base + brk_rnd());
2785-
2786- if (ret < mm->brk)
2787- return mm->brk;
2788-
2789- return ret;
2790-}
2791-
2792-unsigned long randomize_et_dyn(unsigned long base)
2793-{
2794- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2795-
2796- if (ret < base)
2797- return base;
2798-
2799- return ret;
2800-}
2801diff -urNp linux-2.6.32.48/arch/powerpc/kernel/ptrace.c linux-2.6.32.48/arch/powerpc/kernel/ptrace.c
2802--- linux-2.6.32.48/arch/powerpc/kernel/ptrace.c 2011-11-08 19:02:43.000000000 -0500
2803+++ linux-2.6.32.48/arch/powerpc/kernel/ptrace.c 2011-11-15 19:59:42.000000000 -0500
2804@@ -86,7 +86,7 @@ static int set_user_trap(struct task_str
2805 /*
2806 * Get contents of register REGNO in task TASK.
2807 */
2808-unsigned long ptrace_get_reg(struct task_struct *task, int regno)
2809+unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno)
2810 {
2811 if (task->thread.regs == NULL)
2812 return -EIO;
2813@@ -894,7 +894,7 @@ long arch_ptrace(struct task_struct *chi
2814
2815 CHECK_FULL_REGS(child->thread.regs);
2816 if (index < PT_FPR0) {
2817- tmp = ptrace_get_reg(child, (int) index);
2818+ tmp = ptrace_get_reg(child, index);
2819 } else {
2820 flush_fp_to_thread(child);
2821 tmp = ((unsigned long *)child->thread.fpr)
2822diff -urNp linux-2.6.32.48/arch/powerpc/kernel/signal_32.c linux-2.6.32.48/arch/powerpc/kernel/signal_32.c
2823--- linux-2.6.32.48/arch/powerpc/kernel/signal_32.c 2011-11-08 19:02:43.000000000 -0500
2824+++ linux-2.6.32.48/arch/powerpc/kernel/signal_32.c 2011-11-15 19:59:42.000000000 -0500
2825@@ -857,7 +857,7 @@ int handle_rt_signal32(unsigned long sig
2826 /* Save user registers on the stack */
2827 frame = &rt_sf->uc.uc_mcontext;
2828 addr = frame;
2829- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2830+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2831 if (save_user_regs(regs, frame, 0, 1))
2832 goto badframe;
2833 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2834diff -urNp linux-2.6.32.48/arch/powerpc/kernel/signal_64.c linux-2.6.32.48/arch/powerpc/kernel/signal_64.c
2835--- linux-2.6.32.48/arch/powerpc/kernel/signal_64.c 2011-11-08 19:02:43.000000000 -0500
2836+++ linux-2.6.32.48/arch/powerpc/kernel/signal_64.c 2011-11-15 19:59:42.000000000 -0500
2837@@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct
2838 current->thread.fpscr.val = 0;
2839
2840 /* Set up to return from userspace. */
2841- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2842+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2843 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2844 } else {
2845 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2846diff -urNp linux-2.6.32.48/arch/powerpc/kernel/sys_ppc32.c linux-2.6.32.48/arch/powerpc/kernel/sys_ppc32.c
2847--- linux-2.6.32.48/arch/powerpc/kernel/sys_ppc32.c 2011-11-08 19:02:43.000000000 -0500
2848+++ linux-2.6.32.48/arch/powerpc/kernel/sys_ppc32.c 2011-11-15 19:59:42.000000000 -0500
2849@@ -563,10 +563,10 @@ asmlinkage long compat_sys_sysctl(struct
2850 if (oldlenp) {
2851 if (!error) {
2852 if (get_user(oldlen, oldlenp) ||
2853- put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)))
2854+ put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)) ||
2855+ copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)))
2856 error = -EFAULT;
2857 }
2858- copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
2859 }
2860 return error;
2861 }
2862diff -urNp linux-2.6.32.48/arch/powerpc/kernel/traps.c linux-2.6.32.48/arch/powerpc/kernel/traps.c
2863--- linux-2.6.32.48/arch/powerpc/kernel/traps.c 2011-11-08 19:02:43.000000000 -0500
2864+++ linux-2.6.32.48/arch/powerpc/kernel/traps.c 2011-11-15 19:59:42.000000000 -0500
2865@@ -99,6 +99,8 @@ static void pmac_backlight_unblank(void)
2866 static inline void pmac_backlight_unblank(void) { }
2867 #endif
2868
2869+extern void gr_handle_kernel_exploit(void);
2870+
2871 int die(const char *str, struct pt_regs *regs, long err)
2872 {
2873 static struct {
2874@@ -168,6 +170,8 @@ int die(const char *str, struct pt_regs
2875 if (panic_on_oops)
2876 panic("Fatal exception");
2877
2878+ gr_handle_kernel_exploit();
2879+
2880 oops_exit();
2881 do_exit(err);
2882
2883diff -urNp linux-2.6.32.48/arch/powerpc/kernel/vdso.c linux-2.6.32.48/arch/powerpc/kernel/vdso.c
2884--- linux-2.6.32.48/arch/powerpc/kernel/vdso.c 2011-11-08 19:02:43.000000000 -0500
2885+++ linux-2.6.32.48/arch/powerpc/kernel/vdso.c 2011-11-15 19:59:42.000000000 -0500
2886@@ -36,6 +36,7 @@
2887 #include <asm/firmware.h>
2888 #include <asm/vdso.h>
2889 #include <asm/vdso_datapage.h>
2890+#include <asm/mman.h>
2891
2892 #include "setup.h"
2893
2894@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2895 vdso_base = VDSO32_MBASE;
2896 #endif
2897
2898- current->mm->context.vdso_base = 0;
2899+ current->mm->context.vdso_base = ~0UL;
2900
2901 /* vDSO has a problem and was disabled, just don't "enable" it for the
2902 * process
2903@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2904 vdso_base = get_unmapped_area(NULL, vdso_base,
2905 (vdso_pages << PAGE_SHIFT) +
2906 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2907- 0, 0);
2908+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
2909 if (IS_ERR_VALUE(vdso_base)) {
2910 rc = vdso_base;
2911 goto fail_mmapsem;
2912diff -urNp linux-2.6.32.48/arch/powerpc/kernel/vio.c linux-2.6.32.48/arch/powerpc/kernel/vio.c
2913--- linux-2.6.32.48/arch/powerpc/kernel/vio.c 2011-11-08 19:02:43.000000000 -0500
2914+++ linux-2.6.32.48/arch/powerpc/kernel/vio.c 2011-11-15 19:59:42.000000000 -0500
2915@@ -601,11 +601,12 @@ static void vio_dma_iommu_unmap_sg(struc
2916 vio_cmo_dealloc(viodev, alloc_size);
2917 }
2918
2919-struct dma_map_ops vio_dma_mapping_ops = {
2920+static const struct dma_map_ops vio_dma_mapping_ops = {
2921 .alloc_coherent = vio_dma_iommu_alloc_coherent,
2922 .free_coherent = vio_dma_iommu_free_coherent,
2923 .map_sg = vio_dma_iommu_map_sg,
2924 .unmap_sg = vio_dma_iommu_unmap_sg,
2925+ .dma_supported = dma_iommu_dma_supported,
2926 .map_page = vio_dma_iommu_map_page,
2927 .unmap_page = vio_dma_iommu_unmap_page,
2928
2929@@ -857,7 +858,6 @@ static void vio_cmo_bus_remove(struct vi
2930
2931 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
2932 {
2933- vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
2934 viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
2935 }
2936
2937diff -urNp linux-2.6.32.48/arch/powerpc/lib/usercopy_64.c linux-2.6.32.48/arch/powerpc/lib/usercopy_64.c
2938--- linux-2.6.32.48/arch/powerpc/lib/usercopy_64.c 2011-11-08 19:02:43.000000000 -0500
2939+++ linux-2.6.32.48/arch/powerpc/lib/usercopy_64.c 2011-11-15 19:59:42.000000000 -0500
2940@@ -9,22 +9,6 @@
2941 #include <linux/module.h>
2942 #include <asm/uaccess.h>
2943
2944-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2945-{
2946- if (likely(access_ok(VERIFY_READ, from, n)))
2947- n = __copy_from_user(to, from, n);
2948- else
2949- memset(to, 0, n);
2950- return n;
2951-}
2952-
2953-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2954-{
2955- if (likely(access_ok(VERIFY_WRITE, to, n)))
2956- n = __copy_to_user(to, from, n);
2957- return n;
2958-}
2959-
2960 unsigned long copy_in_user(void __user *to, const void __user *from,
2961 unsigned long n)
2962 {
2963@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2964 return n;
2965 }
2966
2967-EXPORT_SYMBOL(copy_from_user);
2968-EXPORT_SYMBOL(copy_to_user);
2969 EXPORT_SYMBOL(copy_in_user);
2970
2971diff -urNp linux-2.6.32.48/arch/powerpc/Makefile linux-2.6.32.48/arch/powerpc/Makefile
2972--- linux-2.6.32.48/arch/powerpc/Makefile 2011-11-08 19:02:43.000000000 -0500
2973+++ linux-2.6.32.48/arch/powerpc/Makefile 2011-11-15 19:59:42.000000000 -0500
2974@@ -74,6 +74,8 @@ KBUILD_AFLAGS += -Iarch/$(ARCH)
2975 KBUILD_CFLAGS += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y)
2976 CPP = $(CC) -E $(KBUILD_CFLAGS)
2977
2978+cflags-y += -Wno-sign-compare -Wno-extra
2979+
2980 CHECKFLAGS += -m$(CONFIG_WORD_SIZE) -D__powerpc__ -D__powerpc$(CONFIG_WORD_SIZE)__
2981
2982 ifeq ($(CONFIG_PPC64),y)
2983diff -urNp linux-2.6.32.48/arch/powerpc/mm/fault.c linux-2.6.32.48/arch/powerpc/mm/fault.c
2984--- linux-2.6.32.48/arch/powerpc/mm/fault.c 2011-11-08 19:02:43.000000000 -0500
2985+++ linux-2.6.32.48/arch/powerpc/mm/fault.c 2011-11-15 19:59:42.000000000 -0500
2986@@ -30,6 +30,10 @@
2987 #include <linux/kprobes.h>
2988 #include <linux/kdebug.h>
2989 #include <linux/perf_event.h>
2990+#include <linux/slab.h>
2991+#include <linux/pagemap.h>
2992+#include <linux/compiler.h>
2993+#include <linux/unistd.h>
2994
2995 #include <asm/firmware.h>
2996 #include <asm/page.h>
2997@@ -40,6 +44,7 @@
2998 #include <asm/uaccess.h>
2999 #include <asm/tlbflush.h>
3000 #include <asm/siginfo.h>
3001+#include <asm/ptrace.h>
3002
3003
3004 #ifdef CONFIG_KPROBES
3005@@ -64,6 +69,33 @@ static inline int notify_page_fault(stru
3006 }
3007 #endif
3008
3009+#ifdef CONFIG_PAX_PAGEEXEC
3010+/*
3011+ * PaX: decide what to do with offenders (regs->nip = fault address)
3012+ *
3013+ * returns 1 when task should be killed
3014+ */
3015+static int pax_handle_fetch_fault(struct pt_regs *regs)
3016+{
3017+ return 1;
3018+}
3019+
3020+void pax_report_insns(void *pc, void *sp)
3021+{
3022+ unsigned long i;
3023+
3024+ printk(KERN_ERR "PAX: bytes at PC: ");
3025+ for (i = 0; i < 5; i++) {
3026+ unsigned int c;
3027+ if (get_user(c, (unsigned int __user *)pc+i))
3028+ printk(KERN_CONT "???????? ");
3029+ else
3030+ printk(KERN_CONT "%08x ", c);
3031+ }
3032+ printk("\n");
3033+}
3034+#endif
3035+
3036 /*
3037 * Check whether the instruction at regs->nip is a store using
3038 * an update addressing form which will update r1.
3039@@ -134,7 +166,7 @@ int __kprobes do_page_fault(struct pt_re
3040 * indicate errors in DSISR but can validly be set in SRR1.
3041 */
3042 if (trap == 0x400)
3043- error_code &= 0x48200000;
3044+ error_code &= 0x58200000;
3045 else
3046 is_write = error_code & DSISR_ISSTORE;
3047 #else
3048@@ -250,7 +282,7 @@ good_area:
3049 * "undefined". Of those that can be set, this is the only
3050 * one which seems bad.
3051 */
3052- if (error_code & 0x10000000)
3053+ if (error_code & DSISR_GUARDED)
3054 /* Guarded storage error. */
3055 goto bad_area;
3056 #endif /* CONFIG_8xx */
3057@@ -265,7 +297,7 @@ good_area:
3058 * processors use the same I/D cache coherency mechanism
3059 * as embedded.
3060 */
3061- if (error_code & DSISR_PROTFAULT)
3062+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
3063 goto bad_area;
3064 #endif /* CONFIG_PPC_STD_MMU */
3065
3066@@ -335,6 +367,23 @@ bad_area:
3067 bad_area_nosemaphore:
3068 /* User mode accesses cause a SIGSEGV */
3069 if (user_mode(regs)) {
3070+
3071+#ifdef CONFIG_PAX_PAGEEXEC
3072+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
3073+#ifdef CONFIG_PPC_STD_MMU
3074+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
3075+#else
3076+ if (is_exec && regs->nip == address) {
3077+#endif
3078+ switch (pax_handle_fetch_fault(regs)) {
3079+ }
3080+
3081+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
3082+ do_group_exit(SIGKILL);
3083+ }
3084+ }
3085+#endif
3086+
3087 _exception(SIGSEGV, regs, code, address);
3088 return 0;
3089 }
3090diff -urNp linux-2.6.32.48/arch/powerpc/mm/mem.c linux-2.6.32.48/arch/powerpc/mm/mem.c
3091--- linux-2.6.32.48/arch/powerpc/mm/mem.c 2011-11-08 19:02:43.000000000 -0500
3092+++ linux-2.6.32.48/arch/powerpc/mm/mem.c 2011-11-15 19:59:42.000000000 -0500
3093@@ -250,7 +250,7 @@ static int __init mark_nonram_nosave(voi
3094 {
3095 unsigned long lmb_next_region_start_pfn,
3096 lmb_region_max_pfn;
3097- int i;
3098+ unsigned int i;
3099
3100 for (i = 0; i < lmb.memory.cnt - 1; i++) {
3101 lmb_region_max_pfn =
3102diff -urNp linux-2.6.32.48/arch/powerpc/mm/mmap_64.c linux-2.6.32.48/arch/powerpc/mm/mmap_64.c
3103--- linux-2.6.32.48/arch/powerpc/mm/mmap_64.c 2011-11-08 19:02:43.000000000 -0500
3104+++ linux-2.6.32.48/arch/powerpc/mm/mmap_64.c 2011-11-15 19:59:42.000000000 -0500
3105@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
3106 */
3107 if (mmap_is_legacy()) {
3108 mm->mmap_base = TASK_UNMAPPED_BASE;
3109+
3110+#ifdef CONFIG_PAX_RANDMMAP
3111+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3112+ mm->mmap_base += mm->delta_mmap;
3113+#endif
3114+
3115 mm->get_unmapped_area = arch_get_unmapped_area;
3116 mm->unmap_area = arch_unmap_area;
3117 } else {
3118 mm->mmap_base = mmap_base();
3119+
3120+#ifdef CONFIG_PAX_RANDMMAP
3121+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3122+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3123+#endif
3124+
3125 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3126 mm->unmap_area = arch_unmap_area_topdown;
3127 }
3128diff -urNp linux-2.6.32.48/arch/powerpc/mm/slice.c linux-2.6.32.48/arch/powerpc/mm/slice.c
3129--- linux-2.6.32.48/arch/powerpc/mm/slice.c 2011-11-08 19:02:43.000000000 -0500
3130+++ linux-2.6.32.48/arch/powerpc/mm/slice.c 2011-11-15 19:59:42.000000000 -0500
3131@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
3132 if ((mm->task_size - len) < addr)
3133 return 0;
3134 vma = find_vma(mm, addr);
3135- return (!vma || (addr + len) <= vma->vm_start);
3136+ return check_heap_stack_gap(vma, addr, len);
3137 }
3138
3139 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
3140@@ -256,7 +256,7 @@ full_search:
3141 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
3142 continue;
3143 }
3144- if (!vma || addr + len <= vma->vm_start) {
3145+ if (check_heap_stack_gap(vma, addr, len)) {
3146 /*
3147 * Remember the place where we stopped the search:
3148 */
3149@@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
3150 }
3151 }
3152
3153- addr = mm->mmap_base;
3154- while (addr > len) {
3155+ if (mm->mmap_base < len)
3156+ addr = -ENOMEM;
3157+ else
3158+ addr = mm->mmap_base - len;
3159+
3160+ while (!IS_ERR_VALUE(addr)) {
3161 /* Go down by chunk size */
3162- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
3163+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
3164
3165 /* Check for hit with different page size */
3166 mask = slice_range_to_mask(addr, len);
3167@@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
3168 * return with success:
3169 */
3170 vma = find_vma(mm, addr);
3171- if (!vma || (addr + len) <= vma->vm_start) {
3172+ if (check_heap_stack_gap(vma, addr, len)) {
3173 /* remember the address as a hint for next time */
3174 if (use_cache)
3175 mm->free_area_cache = addr;
3176@@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
3177 mm->cached_hole_size = vma->vm_start - addr;
3178
3179 /* try just below the current vma->vm_start */
3180- addr = vma->vm_start;
3181+ addr = skip_heap_stack_gap(vma, len);
3182 }
3183
3184 /*
3185@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
3186 if (fixed && addr > (mm->task_size - len))
3187 return -EINVAL;
3188
3189+#ifdef CONFIG_PAX_RANDMMAP
3190+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
3191+ addr = 0;
3192+#endif
3193+
3194 /* If hint, make sure it matches our alignment restrictions */
3195 if (!fixed && addr) {
3196 addr = _ALIGN_UP(addr, 1ul << pshift);
3197diff -urNp linux-2.6.32.48/arch/powerpc/platforms/52xx/lite5200_pm.c linux-2.6.32.48/arch/powerpc/platforms/52xx/lite5200_pm.c
3198--- linux-2.6.32.48/arch/powerpc/platforms/52xx/lite5200_pm.c 2011-11-08 19:02:43.000000000 -0500
3199+++ linux-2.6.32.48/arch/powerpc/platforms/52xx/lite5200_pm.c 2011-11-15 19:59:42.000000000 -0500
3200@@ -235,7 +235,7 @@ static void lite5200_pm_end(void)
3201 lite5200_pm_target_state = PM_SUSPEND_ON;
3202 }
3203
3204-static struct platform_suspend_ops lite5200_pm_ops = {
3205+static const struct platform_suspend_ops lite5200_pm_ops = {
3206 .valid = lite5200_pm_valid,
3207 .begin = lite5200_pm_begin,
3208 .prepare = lite5200_pm_prepare,
3209diff -urNp linux-2.6.32.48/arch/powerpc/platforms/52xx/mpc52xx_pm.c linux-2.6.32.48/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3210--- linux-2.6.32.48/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2011-11-08 19:02:43.000000000 -0500
3211+++ linux-2.6.32.48/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2011-11-15 19:59:42.000000000 -0500
3212@@ -180,7 +180,7 @@ void mpc52xx_pm_finish(void)
3213 iounmap(mbar);
3214 }
3215
3216-static struct platform_suspend_ops mpc52xx_pm_ops = {
3217+static const struct platform_suspend_ops mpc52xx_pm_ops = {
3218 .valid = mpc52xx_pm_valid,
3219 .prepare = mpc52xx_pm_prepare,
3220 .enter = mpc52xx_pm_enter,
3221diff -urNp linux-2.6.32.48/arch/powerpc/platforms/83xx/suspend.c linux-2.6.32.48/arch/powerpc/platforms/83xx/suspend.c
3222--- linux-2.6.32.48/arch/powerpc/platforms/83xx/suspend.c 2011-11-08 19:02:43.000000000 -0500
3223+++ linux-2.6.32.48/arch/powerpc/platforms/83xx/suspend.c 2011-11-15 19:59:42.000000000 -0500
3224@@ -273,7 +273,7 @@ static int mpc83xx_is_pci_agent(void)
3225 return ret;
3226 }
3227
3228-static struct platform_suspend_ops mpc83xx_suspend_ops = {
3229+static const struct platform_suspend_ops mpc83xx_suspend_ops = {
3230 .valid = mpc83xx_suspend_valid,
3231 .begin = mpc83xx_suspend_begin,
3232 .enter = mpc83xx_suspend_enter,
3233diff -urNp linux-2.6.32.48/arch/powerpc/platforms/cell/iommu.c linux-2.6.32.48/arch/powerpc/platforms/cell/iommu.c
3234--- linux-2.6.32.48/arch/powerpc/platforms/cell/iommu.c 2011-11-08 19:02:43.000000000 -0500
3235+++ linux-2.6.32.48/arch/powerpc/platforms/cell/iommu.c 2011-11-15 19:59:42.000000000 -0500
3236@@ -642,7 +642,7 @@ static int dma_fixed_dma_supported(struc
3237
3238 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
3239
3240-struct dma_map_ops dma_iommu_fixed_ops = {
3241+const struct dma_map_ops dma_iommu_fixed_ops = {
3242 .alloc_coherent = dma_fixed_alloc_coherent,
3243 .free_coherent = dma_fixed_free_coherent,
3244 .map_sg = dma_fixed_map_sg,
3245diff -urNp linux-2.6.32.48/arch/powerpc/platforms/ps3/system-bus.c linux-2.6.32.48/arch/powerpc/platforms/ps3/system-bus.c
3246--- linux-2.6.32.48/arch/powerpc/platforms/ps3/system-bus.c 2011-11-08 19:02:43.000000000 -0500
3247+++ linux-2.6.32.48/arch/powerpc/platforms/ps3/system-bus.c 2011-11-15 19:59:42.000000000 -0500
3248@@ -694,7 +694,7 @@ static int ps3_dma_supported(struct devi
3249 return mask >= DMA_BIT_MASK(32);
3250 }
3251
3252-static struct dma_map_ops ps3_sb_dma_ops = {
3253+static const struct dma_map_ops ps3_sb_dma_ops = {
3254 .alloc_coherent = ps3_alloc_coherent,
3255 .free_coherent = ps3_free_coherent,
3256 .map_sg = ps3_sb_map_sg,
3257@@ -704,7 +704,7 @@ static struct dma_map_ops ps3_sb_dma_ops
3258 .unmap_page = ps3_unmap_page,
3259 };
3260
3261-static struct dma_map_ops ps3_ioc0_dma_ops = {
3262+static const struct dma_map_ops ps3_ioc0_dma_ops = {
3263 .alloc_coherent = ps3_alloc_coherent,
3264 .free_coherent = ps3_free_coherent,
3265 .map_sg = ps3_ioc0_map_sg,
3266diff -urNp linux-2.6.32.48/arch/powerpc/platforms/pseries/Kconfig linux-2.6.32.48/arch/powerpc/platforms/pseries/Kconfig
3267--- linux-2.6.32.48/arch/powerpc/platforms/pseries/Kconfig 2011-11-08 19:02:43.000000000 -0500
3268+++ linux-2.6.32.48/arch/powerpc/platforms/pseries/Kconfig 2011-11-15 19:59:42.000000000 -0500
3269@@ -2,6 +2,8 @@ config PPC_PSERIES
3270 depends on PPC64 && PPC_BOOK3S
3271 bool "IBM pSeries & new (POWER5-based) iSeries"
3272 select MPIC
3273+ select PCI_MSI
3274+ select XICS
3275 select PPC_I8259
3276 select PPC_RTAS
3277 select RTAS_ERROR_LOGGING
3278diff -urNp linux-2.6.32.48/arch/s390/include/asm/elf.h linux-2.6.32.48/arch/s390/include/asm/elf.h
3279--- linux-2.6.32.48/arch/s390/include/asm/elf.h 2011-11-08 19:02:43.000000000 -0500
3280+++ linux-2.6.32.48/arch/s390/include/asm/elf.h 2011-11-15 19:59:42.000000000 -0500
3281@@ -164,6 +164,13 @@ extern unsigned int vdso_enabled;
3282 that it will "exec", and that there is sufficient room for the brk. */
3283 #define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
3284
3285+#ifdef CONFIG_PAX_ASLR
3286+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
3287+
3288+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3289+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3290+#endif
3291+
3292 /* This yields a mask that user programs can use to figure out what
3293 instruction set this CPU supports. */
3294
3295diff -urNp linux-2.6.32.48/arch/s390/include/asm/setup.h linux-2.6.32.48/arch/s390/include/asm/setup.h
3296--- linux-2.6.32.48/arch/s390/include/asm/setup.h 2011-11-08 19:02:43.000000000 -0500
3297+++ linux-2.6.32.48/arch/s390/include/asm/setup.h 2011-11-15 19:59:42.000000000 -0500
3298@@ -50,13 +50,13 @@ extern unsigned long memory_end;
3299 void detect_memory_layout(struct mem_chunk chunk[]);
3300
3301 #ifdef CONFIG_S390_SWITCH_AMODE
3302-extern unsigned int switch_amode;
3303+#define switch_amode (1)
3304 #else
3305 #define switch_amode (0)
3306 #endif
3307
3308 #ifdef CONFIG_S390_EXEC_PROTECT
3309-extern unsigned int s390_noexec;
3310+#define s390_noexec (1)
3311 #else
3312 #define s390_noexec (0)
3313 #endif
3314diff -urNp linux-2.6.32.48/arch/s390/include/asm/uaccess.h linux-2.6.32.48/arch/s390/include/asm/uaccess.h
3315--- linux-2.6.32.48/arch/s390/include/asm/uaccess.h 2011-11-08 19:02:43.000000000 -0500
3316+++ linux-2.6.32.48/arch/s390/include/asm/uaccess.h 2011-11-15 19:59:42.000000000 -0500
3317@@ -232,6 +232,10 @@ static inline unsigned long __must_check
3318 copy_to_user(void __user *to, const void *from, unsigned long n)
3319 {
3320 might_fault();
3321+
3322+ if ((long)n < 0)
3323+ return n;
3324+
3325 if (access_ok(VERIFY_WRITE, to, n))
3326 n = __copy_to_user(to, from, n);
3327 return n;
3328@@ -257,6 +261,9 @@ copy_to_user(void __user *to, const void
3329 static inline unsigned long __must_check
3330 __copy_from_user(void *to, const void __user *from, unsigned long n)
3331 {
3332+ if ((long)n < 0)
3333+ return n;
3334+
3335 if (__builtin_constant_p(n) && (n <= 256))
3336 return uaccess.copy_from_user_small(n, from, to);
3337 else
3338@@ -283,6 +290,10 @@ static inline unsigned long __must_check
3339 copy_from_user(void *to, const void __user *from, unsigned long n)
3340 {
3341 might_fault();
3342+
3343+ if ((long)n < 0)
3344+ return n;
3345+
3346 if (access_ok(VERIFY_READ, from, n))
3347 n = __copy_from_user(to, from, n);
3348 else
3349diff -urNp linux-2.6.32.48/arch/s390/Kconfig linux-2.6.32.48/arch/s390/Kconfig
3350--- linux-2.6.32.48/arch/s390/Kconfig 2011-11-08 19:02:43.000000000 -0500
3351+++ linux-2.6.32.48/arch/s390/Kconfig 2011-11-15 19:59:42.000000000 -0500
3352@@ -194,28 +194,26 @@ config AUDIT_ARCH
3353
3354 config S390_SWITCH_AMODE
3355 bool "Switch kernel/user addressing modes"
3356+ default y
3357 help
3358 This option allows to switch the addressing modes of kernel and user
3359- space. The kernel parameter switch_amode=on will enable this feature,
3360- default is disabled. Enabling this (via kernel parameter) on machines
3361- earlier than IBM System z9-109 EC/BC will reduce system performance.
3362+ space. Enabling this on machines earlier than IBM System z9-109 EC/BC
3363+ will reduce system performance.
3364
3365 Note that this option will also be selected by selecting the execute
3366- protection option below. Enabling the execute protection via the
3367- noexec kernel parameter will also switch the addressing modes,
3368- independent of the switch_amode kernel parameter.
3369+ protection option below. Enabling the execute protection will also
3370+ switch the addressing modes, independent of this option.
3371
3372
3373 config S390_EXEC_PROTECT
3374 bool "Data execute protection"
3375+ default y
3376 select S390_SWITCH_AMODE
3377 help
3378 This option allows to enable a buffer overflow protection for user
3379 space programs and it also selects the addressing mode option above.
3380- The kernel parameter noexec=on will enable this feature and also
3381- switch the addressing modes, default is disabled. Enabling this (via
3382- kernel parameter) on machines earlier than IBM System z9-109 EC/BC
3383- will reduce system performance.
3384+ Enabling this on machines earlier than IBM System z9-109 EC/BC will
3385+ reduce system performance.
3386
3387 comment "Code generation options"
3388
3389diff -urNp linux-2.6.32.48/arch/s390/kernel/module.c linux-2.6.32.48/arch/s390/kernel/module.c
3390--- linux-2.6.32.48/arch/s390/kernel/module.c 2011-11-08 19:02:43.000000000 -0500
3391+++ linux-2.6.32.48/arch/s390/kernel/module.c 2011-11-15 19:59:42.000000000 -0500
3392@@ -166,11 +166,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
3393
3394 /* Increase core size by size of got & plt and set start
3395 offsets for got and plt. */
3396- me->core_size = ALIGN(me->core_size, 4);
3397- me->arch.got_offset = me->core_size;
3398- me->core_size += me->arch.got_size;
3399- me->arch.plt_offset = me->core_size;
3400- me->core_size += me->arch.plt_size;
3401+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
3402+ me->arch.got_offset = me->core_size_rw;
3403+ me->core_size_rw += me->arch.got_size;
3404+ me->arch.plt_offset = me->core_size_rx;
3405+ me->core_size_rx += me->arch.plt_size;
3406 return 0;
3407 }
3408
3409@@ -256,7 +256,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3410 if (info->got_initialized == 0) {
3411 Elf_Addr *gotent;
3412
3413- gotent = me->module_core + me->arch.got_offset +
3414+ gotent = me->module_core_rw + me->arch.got_offset +
3415 info->got_offset;
3416 *gotent = val;
3417 info->got_initialized = 1;
3418@@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3419 else if (r_type == R_390_GOTENT ||
3420 r_type == R_390_GOTPLTENT)
3421 *(unsigned int *) loc =
3422- (val + (Elf_Addr) me->module_core - loc) >> 1;
3423+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
3424 else if (r_type == R_390_GOT64 ||
3425 r_type == R_390_GOTPLT64)
3426 *(unsigned long *) loc = val;
3427@@ -294,7 +294,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3428 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
3429 if (info->plt_initialized == 0) {
3430 unsigned int *ip;
3431- ip = me->module_core + me->arch.plt_offset +
3432+ ip = me->module_core_rx + me->arch.plt_offset +
3433 info->plt_offset;
3434 #ifndef CONFIG_64BIT
3435 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
3436@@ -319,7 +319,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3437 val - loc + 0xffffUL < 0x1ffffeUL) ||
3438 (r_type == R_390_PLT32DBL &&
3439 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
3440- val = (Elf_Addr) me->module_core +
3441+ val = (Elf_Addr) me->module_core_rx +
3442 me->arch.plt_offset +
3443 info->plt_offset;
3444 val += rela->r_addend - loc;
3445@@ -341,7 +341,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3446 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
3447 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
3448 val = val + rela->r_addend -
3449- ((Elf_Addr) me->module_core + me->arch.got_offset);
3450+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
3451 if (r_type == R_390_GOTOFF16)
3452 *(unsigned short *) loc = val;
3453 else if (r_type == R_390_GOTOFF32)
3454@@ -351,7 +351,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3455 break;
3456 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
3457 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
3458- val = (Elf_Addr) me->module_core + me->arch.got_offset +
3459+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
3460 rela->r_addend - loc;
3461 if (r_type == R_390_GOTPC)
3462 *(unsigned int *) loc = val;
3463diff -urNp linux-2.6.32.48/arch/s390/kernel/setup.c linux-2.6.32.48/arch/s390/kernel/setup.c
3464--- linux-2.6.32.48/arch/s390/kernel/setup.c 2011-11-08 19:02:43.000000000 -0500
3465+++ linux-2.6.32.48/arch/s390/kernel/setup.c 2011-11-15 19:59:42.000000000 -0500
3466@@ -306,9 +306,6 @@ static int __init early_parse_mem(char *
3467 early_param("mem", early_parse_mem);
3468
3469 #ifdef CONFIG_S390_SWITCH_AMODE
3470-unsigned int switch_amode = 0;
3471-EXPORT_SYMBOL_GPL(switch_amode);
3472-
3473 static int set_amode_and_uaccess(unsigned long user_amode,
3474 unsigned long user32_amode)
3475 {
3476@@ -334,17 +331,6 @@ static int set_amode_and_uaccess(unsigne
3477 return 0;
3478 }
3479 }
3480-
3481-/*
3482- * Switch kernel/user addressing modes?
3483- */
3484-static int __init early_parse_switch_amode(char *p)
3485-{
3486- switch_amode = 1;
3487- return 0;
3488-}
3489-early_param("switch_amode", early_parse_switch_amode);
3490-
3491 #else /* CONFIG_S390_SWITCH_AMODE */
3492 static inline int set_amode_and_uaccess(unsigned long user_amode,
3493 unsigned long user32_amode)
3494@@ -353,24 +339,6 @@ static inline int set_amode_and_uaccess(
3495 }
3496 #endif /* CONFIG_S390_SWITCH_AMODE */
3497
3498-#ifdef CONFIG_S390_EXEC_PROTECT
3499-unsigned int s390_noexec = 0;
3500-EXPORT_SYMBOL_GPL(s390_noexec);
3501-
3502-/*
3503- * Enable execute protection?
3504- */
3505-static int __init early_parse_noexec(char *p)
3506-{
3507- if (!strncmp(p, "off", 3))
3508- return 0;
3509- switch_amode = 1;
3510- s390_noexec = 1;
3511- return 0;
3512-}
3513-early_param("noexec", early_parse_noexec);
3514-#endif /* CONFIG_S390_EXEC_PROTECT */
3515-
3516 static void setup_addressing_mode(void)
3517 {
3518 if (s390_noexec) {
3519diff -urNp linux-2.6.32.48/arch/s390/mm/mmap.c linux-2.6.32.48/arch/s390/mm/mmap.c
3520--- linux-2.6.32.48/arch/s390/mm/mmap.c 2011-11-08 19:02:43.000000000 -0500
3521+++ linux-2.6.32.48/arch/s390/mm/mmap.c 2011-11-15 19:59:42.000000000 -0500
3522@@ -78,10 +78,22 @@ void arch_pick_mmap_layout(struct mm_str
3523 */
3524 if (mmap_is_legacy()) {
3525 mm->mmap_base = TASK_UNMAPPED_BASE;
3526+
3527+#ifdef CONFIG_PAX_RANDMMAP
3528+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3529+ mm->mmap_base += mm->delta_mmap;
3530+#endif
3531+
3532 mm->get_unmapped_area = arch_get_unmapped_area;
3533 mm->unmap_area = arch_unmap_area;
3534 } else {
3535 mm->mmap_base = mmap_base();
3536+
3537+#ifdef CONFIG_PAX_RANDMMAP
3538+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3539+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3540+#endif
3541+
3542 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3543 mm->unmap_area = arch_unmap_area_topdown;
3544 }
3545@@ -153,10 +165,22 @@ void arch_pick_mmap_layout(struct mm_str
3546 */
3547 if (mmap_is_legacy()) {
3548 mm->mmap_base = TASK_UNMAPPED_BASE;
3549+
3550+#ifdef CONFIG_PAX_RANDMMAP
3551+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3552+ mm->mmap_base += mm->delta_mmap;
3553+#endif
3554+
3555 mm->get_unmapped_area = s390_get_unmapped_area;
3556 mm->unmap_area = arch_unmap_area;
3557 } else {
3558 mm->mmap_base = mmap_base();
3559+
3560+#ifdef CONFIG_PAX_RANDMMAP
3561+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3562+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3563+#endif
3564+
3565 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
3566 mm->unmap_area = arch_unmap_area_topdown;
3567 }
3568diff -urNp linux-2.6.32.48/arch/score/include/asm/system.h linux-2.6.32.48/arch/score/include/asm/system.h
3569--- linux-2.6.32.48/arch/score/include/asm/system.h 2011-11-08 19:02:43.000000000 -0500
3570+++ linux-2.6.32.48/arch/score/include/asm/system.h 2011-11-15 19:59:42.000000000 -0500
3571@@ -17,7 +17,7 @@ do { \
3572 #define finish_arch_switch(prev) do {} while (0)
3573
3574 typedef void (*vi_handler_t)(void);
3575-extern unsigned long arch_align_stack(unsigned long sp);
3576+#define arch_align_stack(x) (x)
3577
3578 #define mb() barrier()
3579 #define rmb() barrier()
3580diff -urNp linux-2.6.32.48/arch/score/kernel/process.c linux-2.6.32.48/arch/score/kernel/process.c
3581--- linux-2.6.32.48/arch/score/kernel/process.c 2011-11-08 19:02:43.000000000 -0500
3582+++ linux-2.6.32.48/arch/score/kernel/process.c 2011-11-15 19:59:42.000000000 -0500
3583@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
3584
3585 return task_pt_regs(task)->cp0_epc;
3586 }
3587-
3588-unsigned long arch_align_stack(unsigned long sp)
3589-{
3590- return sp;
3591-}
3592diff -urNp linux-2.6.32.48/arch/sh/boards/mach-hp6xx/pm.c linux-2.6.32.48/arch/sh/boards/mach-hp6xx/pm.c
3593--- linux-2.6.32.48/arch/sh/boards/mach-hp6xx/pm.c 2011-11-08 19:02:43.000000000 -0500
3594+++ linux-2.6.32.48/arch/sh/boards/mach-hp6xx/pm.c 2011-11-15 19:59:42.000000000 -0500
3595@@ -143,7 +143,7 @@ static int hp6x0_pm_enter(suspend_state_
3596 return 0;
3597 }
3598
3599-static struct platform_suspend_ops hp6x0_pm_ops = {
3600+static const struct platform_suspend_ops hp6x0_pm_ops = {
3601 .enter = hp6x0_pm_enter,
3602 .valid = suspend_valid_only_mem,
3603 };
3604diff -urNp linux-2.6.32.48/arch/sh/kernel/cpu/sh4/sq.c linux-2.6.32.48/arch/sh/kernel/cpu/sh4/sq.c
3605--- linux-2.6.32.48/arch/sh/kernel/cpu/sh4/sq.c 2011-11-08 19:02:43.000000000 -0500
3606+++ linux-2.6.32.48/arch/sh/kernel/cpu/sh4/sq.c 2011-11-15 19:59:42.000000000 -0500
3607@@ -327,7 +327,7 @@ static struct attribute *sq_sysfs_attrs[
3608 NULL,
3609 };
3610
3611-static struct sysfs_ops sq_sysfs_ops = {
3612+static const struct sysfs_ops sq_sysfs_ops = {
3613 .show = sq_sysfs_show,
3614 .store = sq_sysfs_store,
3615 };
3616diff -urNp linux-2.6.32.48/arch/sh/kernel/cpu/shmobile/pm.c linux-2.6.32.48/arch/sh/kernel/cpu/shmobile/pm.c
3617--- linux-2.6.32.48/arch/sh/kernel/cpu/shmobile/pm.c 2011-11-08 19:02:43.000000000 -0500
3618+++ linux-2.6.32.48/arch/sh/kernel/cpu/shmobile/pm.c 2011-11-15 19:59:42.000000000 -0500
3619@@ -58,7 +58,7 @@ static int sh_pm_enter(suspend_state_t s
3620 return 0;
3621 }
3622
3623-static struct platform_suspend_ops sh_pm_ops = {
3624+static const struct platform_suspend_ops sh_pm_ops = {
3625 .enter = sh_pm_enter,
3626 .valid = suspend_valid_only_mem,
3627 };
3628diff -urNp linux-2.6.32.48/arch/sh/kernel/kgdb.c linux-2.6.32.48/arch/sh/kernel/kgdb.c
3629--- linux-2.6.32.48/arch/sh/kernel/kgdb.c 2011-11-08 19:02:43.000000000 -0500
3630+++ linux-2.6.32.48/arch/sh/kernel/kgdb.c 2011-11-15 19:59:42.000000000 -0500
3631@@ -271,7 +271,7 @@ void kgdb_arch_exit(void)
3632 {
3633 }
3634
3635-struct kgdb_arch arch_kgdb_ops = {
3636+const struct kgdb_arch arch_kgdb_ops = {
3637 /* Breakpoint instruction: trapa #0x3c */
3638 #ifdef CONFIG_CPU_LITTLE_ENDIAN
3639 .gdb_bpt_instr = { 0x3c, 0xc3 },
3640diff -urNp linux-2.6.32.48/arch/sh/mm/mmap.c linux-2.6.32.48/arch/sh/mm/mmap.c
3641--- linux-2.6.32.48/arch/sh/mm/mmap.c 2011-11-08 19:02:43.000000000 -0500
3642+++ linux-2.6.32.48/arch/sh/mm/mmap.c 2011-11-15 19:59:42.000000000 -0500
3643@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
3644 addr = PAGE_ALIGN(addr);
3645
3646 vma = find_vma(mm, addr);
3647- if (TASK_SIZE - len >= addr &&
3648- (!vma || addr + len <= vma->vm_start))
3649+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3650 return addr;
3651 }
3652
3653@@ -106,7 +105,7 @@ full_search:
3654 }
3655 return -ENOMEM;
3656 }
3657- if (likely(!vma || addr + len <= vma->vm_start)) {
3658+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3659 /*
3660 * Remember the place where we stopped the search:
3661 */
3662@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
3663 addr = PAGE_ALIGN(addr);
3664
3665 vma = find_vma(mm, addr);
3666- if (TASK_SIZE - len >= addr &&
3667- (!vma || addr + len <= vma->vm_start))
3668+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3669 return addr;
3670 }
3671
3672@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
3673 /* make sure it can fit in the remaining address space */
3674 if (likely(addr > len)) {
3675 vma = find_vma(mm, addr-len);
3676- if (!vma || addr <= vma->vm_start) {
3677+ if (check_heap_stack_gap(vma, addr - len, len)) {
3678 /* remember the address as a hint for next time */
3679 return (mm->free_area_cache = addr-len);
3680 }
3681@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
3682 if (unlikely(mm->mmap_base < len))
3683 goto bottomup;
3684
3685- addr = mm->mmap_base-len;
3686- if (do_colour_align)
3687- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3688+ addr = mm->mmap_base - len;
3689
3690 do {
3691+ if (do_colour_align)
3692+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3693 /*
3694 * Lookup failure means no vma is above this address,
3695 * else if new region fits below vma->vm_start,
3696 * return with success:
3697 */
3698 vma = find_vma(mm, addr);
3699- if (likely(!vma || addr+len <= vma->vm_start)) {
3700+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3701 /* remember the address as a hint for next time */
3702 return (mm->free_area_cache = addr);
3703 }
3704@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
3705 mm->cached_hole_size = vma->vm_start - addr;
3706
3707 /* try just below the current vma->vm_start */
3708- addr = vma->vm_start-len;
3709- if (do_colour_align)
3710- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3711- } while (likely(len < vma->vm_start));
3712+ addr = skip_heap_stack_gap(vma, len);
3713+ } while (!IS_ERR_VALUE(addr));
3714
3715 bottomup:
3716 /*
3717diff -urNp linux-2.6.32.48/arch/sparc/include/asm/atomic_64.h linux-2.6.32.48/arch/sparc/include/asm/atomic_64.h
3718--- linux-2.6.32.48/arch/sparc/include/asm/atomic_64.h 2011-11-08 19:02:43.000000000 -0500
3719+++ linux-2.6.32.48/arch/sparc/include/asm/atomic_64.h 2011-11-15 19:59:42.000000000 -0500
3720@@ -14,18 +14,40 @@
3721 #define ATOMIC64_INIT(i) { (i) }
3722
3723 #define atomic_read(v) ((v)->counter)
3724+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
3725+{
3726+ return v->counter;
3727+}
3728 #define atomic64_read(v) ((v)->counter)
3729+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
3730+{
3731+ return v->counter;
3732+}
3733
3734 #define atomic_set(v, i) (((v)->counter) = i)
3735+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
3736+{
3737+ v->counter = i;
3738+}
3739 #define atomic64_set(v, i) (((v)->counter) = i)
3740+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
3741+{
3742+ v->counter = i;
3743+}
3744
3745 extern void atomic_add(int, atomic_t *);
3746+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
3747 extern void atomic64_add(long, atomic64_t *);
3748+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
3749 extern void atomic_sub(int, atomic_t *);
3750+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
3751 extern void atomic64_sub(long, atomic64_t *);
3752+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
3753
3754 extern int atomic_add_ret(int, atomic_t *);
3755+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
3756 extern long atomic64_add_ret(long, atomic64_t *);
3757+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
3758 extern int atomic_sub_ret(int, atomic_t *);
3759 extern long atomic64_sub_ret(long, atomic64_t *);
3760
3761@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomi
3762 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
3763
3764 #define atomic_inc_return(v) atomic_add_ret(1, v)
3765+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
3766+{
3767+ return atomic_add_ret_unchecked(1, v);
3768+}
3769 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
3770+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
3771+{
3772+ return atomic64_add_ret_unchecked(1, v);
3773+}
3774
3775 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
3776 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
3777
3778 #define atomic_add_return(i, v) atomic_add_ret(i, v)
3779+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
3780+{
3781+ return atomic_add_ret_unchecked(i, v);
3782+}
3783 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
3784+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
3785+{
3786+ return atomic64_add_ret_unchecked(i, v);
3787+}
3788
3789 /*
3790 * atomic_inc_and_test - increment and test
3791@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomi
3792 * other cases.
3793 */
3794 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
3795+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
3796+{
3797+ return atomic_inc_return_unchecked(v) == 0;
3798+}
3799 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
3800
3801 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
3802@@ -59,30 +101,65 @@ extern long atomic64_sub_ret(long, atomi
3803 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
3804
3805 #define atomic_inc(v) atomic_add(1, v)
3806+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
3807+{
3808+ atomic_add_unchecked(1, v);
3809+}
3810 #define atomic64_inc(v) atomic64_add(1, v)
3811+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
3812+{
3813+ atomic64_add_unchecked(1, v);
3814+}
3815
3816 #define atomic_dec(v) atomic_sub(1, v)
3817+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
3818+{
3819+ atomic_sub_unchecked(1, v);
3820+}
3821 #define atomic64_dec(v) atomic64_sub(1, v)
3822+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
3823+{
3824+ atomic64_sub_unchecked(1, v);
3825+}
3826
3827 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
3828 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
3829
3830 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3831+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
3832+{
3833+ return cmpxchg(&v->counter, old, new);
3834+}
3835 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
3836+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
3837+{
3838+ return xchg(&v->counter, new);
3839+}
3840
3841 static inline int atomic_add_unless(atomic_t *v, int a, int u)
3842 {
3843- int c, old;
3844+ int c, old, new;
3845 c = atomic_read(v);
3846 for (;;) {
3847- if (unlikely(c == (u)))
3848+ if (unlikely(c == u))
3849 break;
3850- old = atomic_cmpxchg((v), c, c + (a));
3851+
3852+ asm volatile("addcc %2, %0, %0\n"
3853+
3854+#ifdef CONFIG_PAX_REFCOUNT
3855+ "tvs %%icc, 6\n"
3856+#endif
3857+
3858+ : "=r" (new)
3859+ : "0" (c), "ir" (a)
3860+ : "cc");
3861+
3862+ old = atomic_cmpxchg(v, c, new);
3863 if (likely(old == c))
3864 break;
3865 c = old;
3866 }
3867- return c != (u);
3868+ return c != u;
3869 }
3870
3871 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
3872@@ -90,20 +167,35 @@ static inline int atomic_add_unless(atom
3873 #define atomic64_cmpxchg(v, o, n) \
3874 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
3875 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
3876+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
3877+{
3878+ return xchg(&v->counter, new);
3879+}
3880
3881 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
3882 {
3883- long c, old;
3884+ long c, old, new;
3885 c = atomic64_read(v);
3886 for (;;) {
3887- if (unlikely(c == (u)))
3888+ if (unlikely(c == u))
3889 break;
3890- old = atomic64_cmpxchg((v), c, c + (a));
3891+
3892+ asm volatile("addcc %2, %0, %0\n"
3893+
3894+#ifdef CONFIG_PAX_REFCOUNT
3895+ "tvs %%xcc, 6\n"
3896+#endif
3897+
3898+ : "=r" (new)
3899+ : "0" (c), "ir" (a)
3900+ : "cc");
3901+
3902+ old = atomic64_cmpxchg(v, c, new);
3903 if (likely(old == c))
3904 break;
3905 c = old;
3906 }
3907- return c != (u);
3908+ return c != u;
3909 }
3910
3911 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3912diff -urNp linux-2.6.32.48/arch/sparc/include/asm/cache.h linux-2.6.32.48/arch/sparc/include/asm/cache.h
3913--- linux-2.6.32.48/arch/sparc/include/asm/cache.h 2011-11-08 19:02:43.000000000 -0500
3914+++ linux-2.6.32.48/arch/sparc/include/asm/cache.h 2011-11-15 19:59:42.000000000 -0500
3915@@ -8,7 +8,7 @@
3916 #define _SPARC_CACHE_H
3917
3918 #define L1_CACHE_SHIFT 5
3919-#define L1_CACHE_BYTES 32
3920+#define L1_CACHE_BYTES 32UL
3921 #define L1_CACHE_ALIGN(x) ((((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)))
3922
3923 #ifdef CONFIG_SPARC32
3924diff -urNp linux-2.6.32.48/arch/sparc/include/asm/dma-mapping.h linux-2.6.32.48/arch/sparc/include/asm/dma-mapping.h
3925--- linux-2.6.32.48/arch/sparc/include/asm/dma-mapping.h 2011-11-08 19:02:43.000000000 -0500
3926+++ linux-2.6.32.48/arch/sparc/include/asm/dma-mapping.h 2011-11-15 19:59:42.000000000 -0500
3927@@ -14,10 +14,10 @@ extern int dma_set_mask(struct device *d
3928 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
3929 #define dma_is_consistent(d, h) (1)
3930
3931-extern struct dma_map_ops *dma_ops, pci32_dma_ops;
3932+extern const struct dma_map_ops *dma_ops, pci32_dma_ops;
3933 extern struct bus_type pci_bus_type;
3934
3935-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
3936+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
3937 {
3938 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
3939 if (dev->bus == &pci_bus_type)
3940@@ -31,7 +31,7 @@ static inline struct dma_map_ops *get_dm
3941 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
3942 dma_addr_t *dma_handle, gfp_t flag)
3943 {
3944- struct dma_map_ops *ops = get_dma_ops(dev);
3945+ const struct dma_map_ops *ops = get_dma_ops(dev);
3946 void *cpu_addr;
3947
3948 cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
3949@@ -42,7 +42,7 @@ static inline void *dma_alloc_coherent(s
3950 static inline void dma_free_coherent(struct device *dev, size_t size,
3951 void *cpu_addr, dma_addr_t dma_handle)
3952 {
3953- struct dma_map_ops *ops = get_dma_ops(dev);
3954+ const struct dma_map_ops *ops = get_dma_ops(dev);
3955
3956 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
3957 ops->free_coherent(dev, size, cpu_addr, dma_handle);
3958diff -urNp linux-2.6.32.48/arch/sparc/include/asm/elf_32.h linux-2.6.32.48/arch/sparc/include/asm/elf_32.h
3959--- linux-2.6.32.48/arch/sparc/include/asm/elf_32.h 2011-11-08 19:02:43.000000000 -0500
3960+++ linux-2.6.32.48/arch/sparc/include/asm/elf_32.h 2011-11-15 19:59:42.000000000 -0500
3961@@ -116,6 +116,13 @@ typedef struct {
3962
3963 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3964
3965+#ifdef CONFIG_PAX_ASLR
3966+#define PAX_ELF_ET_DYN_BASE 0x10000UL
3967+
3968+#define PAX_DELTA_MMAP_LEN 16
3969+#define PAX_DELTA_STACK_LEN 16
3970+#endif
3971+
3972 /* This yields a mask that user programs can use to figure out what
3973 instruction set this cpu supports. This can NOT be done in userspace
3974 on Sparc. */
3975diff -urNp linux-2.6.32.48/arch/sparc/include/asm/elf_64.h linux-2.6.32.48/arch/sparc/include/asm/elf_64.h
3976--- linux-2.6.32.48/arch/sparc/include/asm/elf_64.h 2011-11-08 19:02:43.000000000 -0500
3977+++ linux-2.6.32.48/arch/sparc/include/asm/elf_64.h 2011-11-15 19:59:42.000000000 -0500
3978@@ -163,6 +163,12 @@ typedef struct {
3979 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3980 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3981
3982+#ifdef CONFIG_PAX_ASLR
3983+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3984+
3985+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3986+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3987+#endif
3988
3989 /* This yields a mask that user programs can use to figure out what
3990 instruction set this cpu supports. */
3991diff -urNp linux-2.6.32.48/arch/sparc/include/asm/pgtable_32.h linux-2.6.32.48/arch/sparc/include/asm/pgtable_32.h
3992--- linux-2.6.32.48/arch/sparc/include/asm/pgtable_32.h 2011-11-08 19:02:43.000000000 -0500
3993+++ linux-2.6.32.48/arch/sparc/include/asm/pgtable_32.h 2011-11-15 19:59:42.000000000 -0500
3994@@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3995 BTFIXUPDEF_INT(page_none)
3996 BTFIXUPDEF_INT(page_copy)
3997 BTFIXUPDEF_INT(page_readonly)
3998+
3999+#ifdef CONFIG_PAX_PAGEEXEC
4000+BTFIXUPDEF_INT(page_shared_noexec)
4001+BTFIXUPDEF_INT(page_copy_noexec)
4002+BTFIXUPDEF_INT(page_readonly_noexec)
4003+#endif
4004+
4005 BTFIXUPDEF_INT(page_kernel)
4006
4007 #define PMD_SHIFT SUN4C_PMD_SHIFT
4008@@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
4009 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
4010 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
4011
4012+#ifdef CONFIG_PAX_PAGEEXEC
4013+extern pgprot_t PAGE_SHARED_NOEXEC;
4014+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
4015+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
4016+#else
4017+# define PAGE_SHARED_NOEXEC PAGE_SHARED
4018+# define PAGE_COPY_NOEXEC PAGE_COPY
4019+# define PAGE_READONLY_NOEXEC PAGE_READONLY
4020+#endif
4021+
4022 extern unsigned long page_kernel;
4023
4024 #ifdef MODULE
4025diff -urNp linux-2.6.32.48/arch/sparc/include/asm/pgtsrmmu.h linux-2.6.32.48/arch/sparc/include/asm/pgtsrmmu.h
4026--- linux-2.6.32.48/arch/sparc/include/asm/pgtsrmmu.h 2011-11-08 19:02:43.000000000 -0500
4027+++ linux-2.6.32.48/arch/sparc/include/asm/pgtsrmmu.h 2011-11-15 19:59:42.000000000 -0500
4028@@ -115,6 +115,13 @@
4029 SRMMU_EXEC | SRMMU_REF)
4030 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
4031 SRMMU_EXEC | SRMMU_REF)
4032+
4033+#ifdef CONFIG_PAX_PAGEEXEC
4034+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
4035+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4036+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4037+#endif
4038+
4039 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
4040 SRMMU_DIRTY | SRMMU_REF)
4041
4042diff -urNp linux-2.6.32.48/arch/sparc/include/asm/spinlock_64.h linux-2.6.32.48/arch/sparc/include/asm/spinlock_64.h
4043--- linux-2.6.32.48/arch/sparc/include/asm/spinlock_64.h 2011-11-08 19:02:43.000000000 -0500
4044+++ linux-2.6.32.48/arch/sparc/include/asm/spinlock_64.h 2011-11-15 19:59:42.000000000 -0500
4045@@ -92,14 +92,19 @@ static inline void __raw_spin_lock_flags
4046
4047 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
4048
4049-static void inline arch_read_lock(raw_rwlock_t *lock)
4050+static inline void arch_read_lock(raw_rwlock_t *lock)
4051 {
4052 unsigned long tmp1, tmp2;
4053
4054 __asm__ __volatile__ (
4055 "1: ldsw [%2], %0\n"
4056 " brlz,pn %0, 2f\n"
4057-"4: add %0, 1, %1\n"
4058+"4: addcc %0, 1, %1\n"
4059+
4060+#ifdef CONFIG_PAX_REFCOUNT
4061+" tvs %%icc, 6\n"
4062+#endif
4063+
4064 " cas [%2], %0, %1\n"
4065 " cmp %0, %1\n"
4066 " bne,pn %%icc, 1b\n"
4067@@ -112,10 +117,10 @@ static void inline arch_read_lock(raw_rw
4068 " .previous"
4069 : "=&r" (tmp1), "=&r" (tmp2)
4070 : "r" (lock)
4071- : "memory");
4072+ : "memory", "cc");
4073 }
4074
4075-static int inline arch_read_trylock(raw_rwlock_t *lock)
4076+static inline int arch_read_trylock(raw_rwlock_t *lock)
4077 {
4078 int tmp1, tmp2;
4079
4080@@ -123,7 +128,12 @@ static int inline arch_read_trylock(raw_
4081 "1: ldsw [%2], %0\n"
4082 " brlz,a,pn %0, 2f\n"
4083 " mov 0, %0\n"
4084-" add %0, 1, %1\n"
4085+" addcc %0, 1, %1\n"
4086+
4087+#ifdef CONFIG_PAX_REFCOUNT
4088+" tvs %%icc, 6\n"
4089+#endif
4090+
4091 " cas [%2], %0, %1\n"
4092 " cmp %0, %1\n"
4093 " bne,pn %%icc, 1b\n"
4094@@ -136,13 +146,18 @@ static int inline arch_read_trylock(raw_
4095 return tmp1;
4096 }
4097
4098-static void inline arch_read_unlock(raw_rwlock_t *lock)
4099+static inline void arch_read_unlock(raw_rwlock_t *lock)
4100 {
4101 unsigned long tmp1, tmp2;
4102
4103 __asm__ __volatile__(
4104 "1: lduw [%2], %0\n"
4105-" sub %0, 1, %1\n"
4106+" subcc %0, 1, %1\n"
4107+
4108+#ifdef CONFIG_PAX_REFCOUNT
4109+" tvs %%icc, 6\n"
4110+#endif
4111+
4112 " cas [%2], %0, %1\n"
4113 " cmp %0, %1\n"
4114 " bne,pn %%xcc, 1b\n"
4115@@ -152,7 +167,7 @@ static void inline arch_read_unlock(raw_
4116 : "memory");
4117 }
4118
4119-static void inline arch_write_lock(raw_rwlock_t *lock)
4120+static inline void arch_write_lock(raw_rwlock_t *lock)
4121 {
4122 unsigned long mask, tmp1, tmp2;
4123
4124@@ -177,7 +192,7 @@ static void inline arch_write_lock(raw_r
4125 : "memory");
4126 }
4127
4128-static void inline arch_write_unlock(raw_rwlock_t *lock)
4129+static inline void arch_write_unlock(raw_rwlock_t *lock)
4130 {
4131 __asm__ __volatile__(
4132 " stw %%g0, [%0]"
4133@@ -186,7 +201,7 @@ static void inline arch_write_unlock(raw
4134 : "memory");
4135 }
4136
4137-static int inline arch_write_trylock(raw_rwlock_t *lock)
4138+static inline int arch_write_trylock(raw_rwlock_t *lock)
4139 {
4140 unsigned long mask, tmp1, tmp2, result;
4141
4142diff -urNp linux-2.6.32.48/arch/sparc/include/asm/thread_info_32.h linux-2.6.32.48/arch/sparc/include/asm/thread_info_32.h
4143--- linux-2.6.32.48/arch/sparc/include/asm/thread_info_32.h 2011-11-08 19:02:43.000000000 -0500
4144+++ linux-2.6.32.48/arch/sparc/include/asm/thread_info_32.h 2011-11-15 19:59:42.000000000 -0500
4145@@ -50,6 +50,8 @@ struct thread_info {
4146 unsigned long w_saved;
4147
4148 struct restart_block restart_block;
4149+
4150+ unsigned long lowest_stack;
4151 };
4152
4153 /*
4154diff -urNp linux-2.6.32.48/arch/sparc/include/asm/thread_info_64.h linux-2.6.32.48/arch/sparc/include/asm/thread_info_64.h
4155--- linux-2.6.32.48/arch/sparc/include/asm/thread_info_64.h 2011-11-08 19:02:43.000000000 -0500
4156+++ linux-2.6.32.48/arch/sparc/include/asm/thread_info_64.h 2011-11-15 19:59:42.000000000 -0500
4157@@ -68,6 +68,8 @@ struct thread_info {
4158 struct pt_regs *kern_una_regs;
4159 unsigned int kern_una_insn;
4160
4161+ unsigned long lowest_stack;
4162+
4163 unsigned long fpregs[0] __attribute__ ((aligned(64)));
4164 };
4165
4166diff -urNp linux-2.6.32.48/arch/sparc/include/asm/uaccess_32.h linux-2.6.32.48/arch/sparc/include/asm/uaccess_32.h
4167--- linux-2.6.32.48/arch/sparc/include/asm/uaccess_32.h 2011-11-08 19:02:43.000000000 -0500
4168+++ linux-2.6.32.48/arch/sparc/include/asm/uaccess_32.h 2011-11-15 19:59:42.000000000 -0500
4169@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
4170
4171 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4172 {
4173- if (n && __access_ok((unsigned long) to, n))
4174+ if ((long)n < 0)
4175+ return n;
4176+
4177+ if (n && __access_ok((unsigned long) to, n)) {
4178+ if (!__builtin_constant_p(n))
4179+ check_object_size(from, n, true);
4180 return __copy_user(to, (__force void __user *) from, n);
4181- else
4182+ } else
4183 return n;
4184 }
4185
4186 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
4187 {
4188+ if ((long)n < 0)
4189+ return n;
4190+
4191+ if (!__builtin_constant_p(n))
4192+ check_object_size(from, n, true);
4193+
4194 return __copy_user(to, (__force void __user *) from, n);
4195 }
4196
4197 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4198 {
4199- if (n && __access_ok((unsigned long) from, n))
4200+ if ((long)n < 0)
4201+ return n;
4202+
4203+ if (n && __access_ok((unsigned long) from, n)) {
4204+ if (!__builtin_constant_p(n))
4205+ check_object_size(to, n, false);
4206 return __copy_user((__force void __user *) to, from, n);
4207- else
4208+ } else
4209 return n;
4210 }
4211
4212 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
4213 {
4214+ if ((long)n < 0)
4215+ return n;
4216+
4217 return __copy_user((__force void __user *) to, from, n);
4218 }
4219
4220diff -urNp linux-2.6.32.48/arch/sparc/include/asm/uaccess_64.h linux-2.6.32.48/arch/sparc/include/asm/uaccess_64.h
4221--- linux-2.6.32.48/arch/sparc/include/asm/uaccess_64.h 2011-11-08 19:02:43.000000000 -0500
4222+++ linux-2.6.32.48/arch/sparc/include/asm/uaccess_64.h 2011-11-15 19:59:42.000000000 -0500
4223@@ -9,6 +9,7 @@
4224 #include <linux/compiler.h>
4225 #include <linux/string.h>
4226 #include <linux/thread_info.h>
4227+#include <linux/kernel.h>
4228 #include <asm/asi.h>
4229 #include <asm/system.h>
4230 #include <asm/spitfire.h>
4231@@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixu
4232 static inline unsigned long __must_check
4233 copy_from_user(void *to, const void __user *from, unsigned long size)
4234 {
4235- unsigned long ret = ___copy_from_user(to, from, size);
4236+ unsigned long ret;
4237
4238+ if ((long)size < 0 || size > INT_MAX)
4239+ return size;
4240+
4241+ if (!__builtin_constant_p(size))
4242+ check_object_size(to, size, false);
4243+
4244+ ret = ___copy_from_user(to, from, size);
4245 if (unlikely(ret))
4246 ret = copy_from_user_fixup(to, from, size);
4247 return ret;
4248@@ -228,8 +236,15 @@ extern unsigned long copy_to_user_fixup(
4249 static inline unsigned long __must_check
4250 copy_to_user(void __user *to, const void *from, unsigned long size)
4251 {
4252- unsigned long ret = ___copy_to_user(to, from, size);
4253+ unsigned long ret;
4254+
4255+ if ((long)size < 0 || size > INT_MAX)
4256+ return size;
4257+
4258+ if (!__builtin_constant_p(size))
4259+ check_object_size(from, size, true);
4260
4261+ ret = ___copy_to_user(to, from, size);
4262 if (unlikely(ret))
4263 ret = copy_to_user_fixup(to, from, size);
4264 return ret;
4265diff -urNp linux-2.6.32.48/arch/sparc/include/asm/uaccess.h linux-2.6.32.48/arch/sparc/include/asm/uaccess.h
4266--- linux-2.6.32.48/arch/sparc/include/asm/uaccess.h 2011-11-08 19:02:43.000000000 -0500
4267+++ linux-2.6.32.48/arch/sparc/include/asm/uaccess.h 2011-11-15 19:59:42.000000000 -0500
4268@@ -1,5 +1,13 @@
4269 #ifndef ___ASM_SPARC_UACCESS_H
4270 #define ___ASM_SPARC_UACCESS_H
4271+
4272+#ifdef __KERNEL__
4273+#ifndef __ASSEMBLY__
4274+#include <linux/types.h>
4275+extern void check_object_size(const void *ptr, unsigned long n, bool to);
4276+#endif
4277+#endif
4278+
4279 #if defined(__sparc__) && defined(__arch64__)
4280 #include <asm/uaccess_64.h>
4281 #else
4282diff -urNp linux-2.6.32.48/arch/sparc/kernel/iommu.c linux-2.6.32.48/arch/sparc/kernel/iommu.c
4283--- linux-2.6.32.48/arch/sparc/kernel/iommu.c 2011-11-08 19:02:43.000000000 -0500
4284+++ linux-2.6.32.48/arch/sparc/kernel/iommu.c 2011-11-15 19:59:42.000000000 -0500
4285@@ -826,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struc
4286 spin_unlock_irqrestore(&iommu->lock, flags);
4287 }
4288
4289-static struct dma_map_ops sun4u_dma_ops = {
4290+static const struct dma_map_ops sun4u_dma_ops = {
4291 .alloc_coherent = dma_4u_alloc_coherent,
4292 .free_coherent = dma_4u_free_coherent,
4293 .map_page = dma_4u_map_page,
4294@@ -837,7 +837,7 @@ static struct dma_map_ops sun4u_dma_ops
4295 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
4296 };
4297
4298-struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4299+const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4300 EXPORT_SYMBOL(dma_ops);
4301
4302 extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
4303diff -urNp linux-2.6.32.48/arch/sparc/kernel/ioport.c linux-2.6.32.48/arch/sparc/kernel/ioport.c
4304--- linux-2.6.32.48/arch/sparc/kernel/ioport.c 2011-11-08 19:02:43.000000000 -0500
4305+++ linux-2.6.32.48/arch/sparc/kernel/ioport.c 2011-11-15 19:59:42.000000000 -0500
4306@@ -392,7 +392,7 @@ static void sbus_sync_sg_for_device(stru
4307 BUG();
4308 }
4309
4310-struct dma_map_ops sbus_dma_ops = {
4311+const struct dma_map_ops sbus_dma_ops = {
4312 .alloc_coherent = sbus_alloc_coherent,
4313 .free_coherent = sbus_free_coherent,
4314 .map_page = sbus_map_page,
4315@@ -403,7 +403,7 @@ struct dma_map_ops sbus_dma_ops = {
4316 .sync_sg_for_device = sbus_sync_sg_for_device,
4317 };
4318
4319-struct dma_map_ops *dma_ops = &sbus_dma_ops;
4320+const struct dma_map_ops *dma_ops = &sbus_dma_ops;
4321 EXPORT_SYMBOL(dma_ops);
4322
4323 static int __init sparc_register_ioport(void)
4324@@ -640,7 +640,7 @@ static void pci32_sync_sg_for_device(str
4325 }
4326 }
4327
4328-struct dma_map_ops pci32_dma_ops = {
4329+const struct dma_map_ops pci32_dma_ops = {
4330 .alloc_coherent = pci32_alloc_coherent,
4331 .free_coherent = pci32_free_coherent,
4332 .map_page = pci32_map_page,
4333diff -urNp linux-2.6.32.48/arch/sparc/kernel/kgdb_32.c linux-2.6.32.48/arch/sparc/kernel/kgdb_32.c
4334--- linux-2.6.32.48/arch/sparc/kernel/kgdb_32.c 2011-11-08 19:02:43.000000000 -0500
4335+++ linux-2.6.32.48/arch/sparc/kernel/kgdb_32.c 2011-11-15 19:59:42.000000000 -0500
4336@@ -158,7 +158,7 @@ void kgdb_arch_exit(void)
4337 {
4338 }
4339
4340-struct kgdb_arch arch_kgdb_ops = {
4341+const struct kgdb_arch arch_kgdb_ops = {
4342 /* Breakpoint instruction: ta 0x7d */
4343 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
4344 };
4345diff -urNp linux-2.6.32.48/arch/sparc/kernel/kgdb_64.c linux-2.6.32.48/arch/sparc/kernel/kgdb_64.c
4346--- linux-2.6.32.48/arch/sparc/kernel/kgdb_64.c 2011-11-08 19:02:43.000000000 -0500
4347+++ linux-2.6.32.48/arch/sparc/kernel/kgdb_64.c 2011-11-15 19:59:42.000000000 -0500
4348@@ -180,7 +180,7 @@ void kgdb_arch_exit(void)
4349 {
4350 }
4351
4352-struct kgdb_arch arch_kgdb_ops = {
4353+const struct kgdb_arch arch_kgdb_ops = {
4354 /* Breakpoint instruction: ta 0x72 */
4355 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
4356 };
4357diff -urNp linux-2.6.32.48/arch/sparc/kernel/Makefile linux-2.6.32.48/arch/sparc/kernel/Makefile
4358--- linux-2.6.32.48/arch/sparc/kernel/Makefile 2011-11-08 19:02:43.000000000 -0500
4359+++ linux-2.6.32.48/arch/sparc/kernel/Makefile 2011-11-15 19:59:42.000000000 -0500
4360@@ -3,7 +3,7 @@
4361 #
4362
4363 asflags-y := -ansi
4364-ccflags-y := -Werror
4365+#ccflags-y := -Werror
4366
4367 extra-y := head_$(BITS).o
4368 extra-y += init_task.o
4369diff -urNp linux-2.6.32.48/arch/sparc/kernel/pci_sun4v.c linux-2.6.32.48/arch/sparc/kernel/pci_sun4v.c
4370--- linux-2.6.32.48/arch/sparc/kernel/pci_sun4v.c 2011-11-08 19:02:43.000000000 -0500
4371+++ linux-2.6.32.48/arch/sparc/kernel/pci_sun4v.c 2011-11-15 19:59:42.000000000 -0500
4372@@ -525,7 +525,7 @@ static void dma_4v_unmap_sg(struct devic
4373 spin_unlock_irqrestore(&iommu->lock, flags);
4374 }
4375
4376-static struct dma_map_ops sun4v_dma_ops = {
4377+static const struct dma_map_ops sun4v_dma_ops = {
4378 .alloc_coherent = dma_4v_alloc_coherent,
4379 .free_coherent = dma_4v_free_coherent,
4380 .map_page = dma_4v_map_page,
4381diff -urNp linux-2.6.32.48/arch/sparc/kernel/process_32.c linux-2.6.32.48/arch/sparc/kernel/process_32.c
4382--- linux-2.6.32.48/arch/sparc/kernel/process_32.c 2011-11-08 19:02:43.000000000 -0500
4383+++ linux-2.6.32.48/arch/sparc/kernel/process_32.c 2011-11-15 19:59:42.000000000 -0500
4384@@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp)
4385 rw->ins[4], rw->ins[5],
4386 rw->ins[6],
4387 rw->ins[7]);
4388- printk("%pS\n", (void *) rw->ins[7]);
4389+ printk("%pA\n", (void *) rw->ins[7]);
4390 rw = (struct reg_window32 *) rw->ins[6];
4391 }
4392 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
4393@@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r)
4394
4395 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
4396 r->psr, r->pc, r->npc, r->y, print_tainted());
4397- printk("PC: <%pS>\n", (void *) r->pc);
4398+ printk("PC: <%pA>\n", (void *) r->pc);
4399 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4400 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
4401 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
4402 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4403 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
4404 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
4405- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
4406+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
4407
4408 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4409 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
4410@@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk,
4411 rw = (struct reg_window32 *) fp;
4412 pc = rw->ins[7];
4413 printk("[%08lx : ", pc);
4414- printk("%pS ] ", (void *) pc);
4415+ printk("%pA ] ", (void *) pc);
4416 fp = rw->ins[6];
4417 } while (++count < 16);
4418 printk("\n");
4419diff -urNp linux-2.6.32.48/arch/sparc/kernel/process_64.c linux-2.6.32.48/arch/sparc/kernel/process_64.c
4420--- linux-2.6.32.48/arch/sparc/kernel/process_64.c 2011-11-08 19:02:43.000000000 -0500
4421+++ linux-2.6.32.48/arch/sparc/kernel/process_64.c 2011-11-15 19:59:42.000000000 -0500
4422@@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
4423 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
4424 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
4425 if (regs->tstate & TSTATE_PRIV)
4426- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
4427+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
4428 }
4429
4430 void show_regs(struct pt_regs *regs)
4431 {
4432 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
4433 regs->tpc, regs->tnpc, regs->y, print_tainted());
4434- printk("TPC: <%pS>\n", (void *) regs->tpc);
4435+ printk("TPC: <%pA>\n", (void *) regs->tpc);
4436 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
4437 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
4438 regs->u_regs[3]);
4439@@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
4440 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
4441 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
4442 regs->u_regs[15]);
4443- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
4444+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
4445 show_regwindow(regs);
4446 }
4447
4448@@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void
4449 ((tp && tp->task) ? tp->task->pid : -1));
4450
4451 if (gp->tstate & TSTATE_PRIV) {
4452- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
4453+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
4454 (void *) gp->tpc,
4455 (void *) gp->o7,
4456 (void *) gp->i7,
4457diff -urNp linux-2.6.32.48/arch/sparc/kernel/sys_sparc_32.c linux-2.6.32.48/arch/sparc/kernel/sys_sparc_32.c
4458--- linux-2.6.32.48/arch/sparc/kernel/sys_sparc_32.c 2011-11-08 19:02:43.000000000 -0500
4459+++ linux-2.6.32.48/arch/sparc/kernel/sys_sparc_32.c 2011-11-15 19:59:42.000000000 -0500
4460@@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(str
4461 if (ARCH_SUN4C && len > 0x20000000)
4462 return -ENOMEM;
4463 if (!addr)
4464- addr = TASK_UNMAPPED_BASE;
4465+ addr = current->mm->mmap_base;
4466
4467 if (flags & MAP_SHARED)
4468 addr = COLOUR_ALIGN(addr);
4469@@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(str
4470 }
4471 if (TASK_SIZE - PAGE_SIZE - len < addr)
4472 return -ENOMEM;
4473- if (!vmm || addr + len <= vmm->vm_start)
4474+ if (check_heap_stack_gap(vmm, addr, len))
4475 return addr;
4476 addr = vmm->vm_end;
4477 if (flags & MAP_SHARED)
4478diff -urNp linux-2.6.32.48/arch/sparc/kernel/sys_sparc_64.c linux-2.6.32.48/arch/sparc/kernel/sys_sparc_64.c
4479--- linux-2.6.32.48/arch/sparc/kernel/sys_sparc_64.c 2011-11-08 19:02:43.000000000 -0500
4480+++ linux-2.6.32.48/arch/sparc/kernel/sys_sparc_64.c 2011-11-15 19:59:42.000000000 -0500
4481@@ -125,7 +125,7 @@ unsigned long arch_get_unmapped_area(str
4482 /* We do not accept a shared mapping if it would violate
4483 * cache aliasing constraints.
4484 */
4485- if ((flags & MAP_SHARED) &&
4486+ if ((filp || (flags & MAP_SHARED)) &&
4487 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4488 return -EINVAL;
4489 return addr;
4490@@ -140,6 +140,10 @@ unsigned long arch_get_unmapped_area(str
4491 if (filp || (flags & MAP_SHARED))
4492 do_color_align = 1;
4493
4494+#ifdef CONFIG_PAX_RANDMMAP
4495+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4496+#endif
4497+
4498 if (addr) {
4499 if (do_color_align)
4500 addr = COLOUR_ALIGN(addr, pgoff);
4501@@ -147,15 +151,14 @@ unsigned long arch_get_unmapped_area(str
4502 addr = PAGE_ALIGN(addr);
4503
4504 vma = find_vma(mm, addr);
4505- if (task_size - len >= addr &&
4506- (!vma || addr + len <= vma->vm_start))
4507+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4508 return addr;
4509 }
4510
4511 if (len > mm->cached_hole_size) {
4512- start_addr = addr = mm->free_area_cache;
4513+ start_addr = addr = mm->free_area_cache;
4514 } else {
4515- start_addr = addr = TASK_UNMAPPED_BASE;
4516+ start_addr = addr = mm->mmap_base;
4517 mm->cached_hole_size = 0;
4518 }
4519
4520@@ -175,14 +178,14 @@ full_search:
4521 vma = find_vma(mm, VA_EXCLUDE_END);
4522 }
4523 if (unlikely(task_size < addr)) {
4524- if (start_addr != TASK_UNMAPPED_BASE) {
4525- start_addr = addr = TASK_UNMAPPED_BASE;
4526+ if (start_addr != mm->mmap_base) {
4527+ start_addr = addr = mm->mmap_base;
4528 mm->cached_hole_size = 0;
4529 goto full_search;
4530 }
4531 return -ENOMEM;
4532 }
4533- if (likely(!vma || addr + len <= vma->vm_start)) {
4534+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4535 /*
4536 * Remember the place where we stopped the search:
4537 */
4538@@ -216,7 +219,7 @@ arch_get_unmapped_area_topdown(struct fi
4539 /* We do not accept a shared mapping if it would violate
4540 * cache aliasing constraints.
4541 */
4542- if ((flags & MAP_SHARED) &&
4543+ if ((filp || (flags & MAP_SHARED)) &&
4544 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4545 return -EINVAL;
4546 return addr;
4547@@ -237,8 +240,7 @@ arch_get_unmapped_area_topdown(struct fi
4548 addr = PAGE_ALIGN(addr);
4549
4550 vma = find_vma(mm, addr);
4551- if (task_size - len >= addr &&
4552- (!vma || addr + len <= vma->vm_start))
4553+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4554 return addr;
4555 }
4556
4557@@ -259,7 +261,7 @@ arch_get_unmapped_area_topdown(struct fi
4558 /* make sure it can fit in the remaining address space */
4559 if (likely(addr > len)) {
4560 vma = find_vma(mm, addr-len);
4561- if (!vma || addr <= vma->vm_start) {
4562+ if (check_heap_stack_gap(vma, addr - len, len)) {
4563 /* remember the address as a hint for next time */
4564 return (mm->free_area_cache = addr-len);
4565 }
4566@@ -268,18 +270,18 @@ arch_get_unmapped_area_topdown(struct fi
4567 if (unlikely(mm->mmap_base < len))
4568 goto bottomup;
4569
4570- addr = mm->mmap_base-len;
4571- if (do_color_align)
4572- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4573+ addr = mm->mmap_base - len;
4574
4575 do {
4576+ if (do_color_align)
4577+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4578 /*
4579 * Lookup failure means no vma is above this address,
4580 * else if new region fits below vma->vm_start,
4581 * return with success:
4582 */
4583 vma = find_vma(mm, addr);
4584- if (likely(!vma || addr+len <= vma->vm_start)) {
4585+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4586 /* remember the address as a hint for next time */
4587 return (mm->free_area_cache = addr);
4588 }
4589@@ -289,10 +291,8 @@ arch_get_unmapped_area_topdown(struct fi
4590 mm->cached_hole_size = vma->vm_start - addr;
4591
4592 /* try just below the current vma->vm_start */
4593- addr = vma->vm_start-len;
4594- if (do_color_align)
4595- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4596- } while (likely(len < vma->vm_start));
4597+ addr = skip_heap_stack_gap(vma, len);
4598+ } while (!IS_ERR_VALUE(addr));
4599
4600 bottomup:
4601 /*
4602@@ -384,6 +384,12 @@ void arch_pick_mmap_layout(struct mm_str
4603 current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
4604 sysctl_legacy_va_layout) {
4605 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4606+
4607+#ifdef CONFIG_PAX_RANDMMAP
4608+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4609+ mm->mmap_base += mm->delta_mmap;
4610+#endif
4611+
4612 mm->get_unmapped_area = arch_get_unmapped_area;
4613 mm->unmap_area = arch_unmap_area;
4614 } else {
4615@@ -398,6 +404,12 @@ void arch_pick_mmap_layout(struct mm_str
4616 gap = (task_size / 6 * 5);
4617
4618 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
4619+
4620+#ifdef CONFIG_PAX_RANDMMAP
4621+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4622+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4623+#endif
4624+
4625 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4626 mm->unmap_area = arch_unmap_area_topdown;
4627 }
4628diff -urNp linux-2.6.32.48/arch/sparc/kernel/traps_32.c linux-2.6.32.48/arch/sparc/kernel/traps_32.c
4629--- linux-2.6.32.48/arch/sparc/kernel/traps_32.c 2011-11-08 19:02:43.000000000 -0500
4630+++ linux-2.6.32.48/arch/sparc/kernel/traps_32.c 2011-11-15 19:59:42.000000000 -0500
4631@@ -44,6 +44,8 @@ static void instruction_dump(unsigned lo
4632 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
4633 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
4634
4635+extern void gr_handle_kernel_exploit(void);
4636+
4637 void die_if_kernel(char *str, struct pt_regs *regs)
4638 {
4639 static int die_counter;
4640@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_
4641 count++ < 30 &&
4642 (((unsigned long) rw) >= PAGE_OFFSET) &&
4643 !(((unsigned long) rw) & 0x7)) {
4644- printk("Caller[%08lx]: %pS\n", rw->ins[7],
4645+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
4646 (void *) rw->ins[7]);
4647 rw = (struct reg_window32 *)rw->ins[6];
4648 }
4649 }
4650 printk("Instruction DUMP:");
4651 instruction_dump ((unsigned long *) regs->pc);
4652- if(regs->psr & PSR_PS)
4653+ if(regs->psr & PSR_PS) {
4654+ gr_handle_kernel_exploit();
4655 do_exit(SIGKILL);
4656+ }
4657 do_exit(SIGSEGV);
4658 }
4659
4660diff -urNp linux-2.6.32.48/arch/sparc/kernel/traps_64.c linux-2.6.32.48/arch/sparc/kernel/traps_64.c
4661--- linux-2.6.32.48/arch/sparc/kernel/traps_64.c 2011-11-08 19:02:43.000000000 -0500
4662+++ linux-2.6.32.48/arch/sparc/kernel/traps_64.c 2011-11-15 19:59:42.000000000 -0500
4663@@ -73,7 +73,7 @@ static void dump_tl1_traplog(struct tl1_
4664 i + 1,
4665 p->trapstack[i].tstate, p->trapstack[i].tpc,
4666 p->trapstack[i].tnpc, p->trapstack[i].tt);
4667- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
4668+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
4669 }
4670 }
4671
4672@@ -93,6 +93,12 @@ void bad_trap(struct pt_regs *regs, long
4673
4674 lvl -= 0x100;
4675 if (regs->tstate & TSTATE_PRIV) {
4676+
4677+#ifdef CONFIG_PAX_REFCOUNT
4678+ if (lvl == 6)
4679+ pax_report_refcount_overflow(regs);
4680+#endif
4681+
4682 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
4683 die_if_kernel(buffer, regs);
4684 }
4685@@ -111,11 +117,16 @@ void bad_trap(struct pt_regs *regs, long
4686 void bad_trap_tl1(struct pt_regs *regs, long lvl)
4687 {
4688 char buffer[32];
4689-
4690+
4691 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
4692 0, lvl, SIGTRAP) == NOTIFY_STOP)
4693 return;
4694
4695+#ifdef CONFIG_PAX_REFCOUNT
4696+ if (lvl == 6)
4697+ pax_report_refcount_overflow(regs);
4698+#endif
4699+
4700 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
4701
4702 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
4703@@ -1139,7 +1150,7 @@ static void cheetah_log_errors(struct pt
4704 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
4705 printk("%s" "ERROR(%d): ",
4706 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
4707- printk("TPC<%pS>\n", (void *) regs->tpc);
4708+ printk("TPC<%pA>\n", (void *) regs->tpc);
4709 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
4710 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
4711 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
4712@@ -1746,7 +1757,7 @@ void cheetah_plus_parity_error(int type,
4713 smp_processor_id(),
4714 (type & 0x1) ? 'I' : 'D',
4715 regs->tpc);
4716- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
4717+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
4718 panic("Irrecoverable Cheetah+ parity error.");
4719 }
4720
4721@@ -1754,7 +1765,7 @@ void cheetah_plus_parity_error(int type,
4722 smp_processor_id(),
4723 (type & 0x1) ? 'I' : 'D',
4724 regs->tpc);
4725- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
4726+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
4727 }
4728
4729 struct sun4v_error_entry {
4730@@ -1961,9 +1972,9 @@ void sun4v_itlb_error_report(struct pt_r
4731
4732 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
4733 regs->tpc, tl);
4734- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
4735+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
4736 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4737- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
4738+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
4739 (void *) regs->u_regs[UREG_I7]);
4740 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
4741 "pte[%lx] error[%lx]\n",
4742@@ -1985,9 +1996,9 @@ void sun4v_dtlb_error_report(struct pt_r
4743
4744 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
4745 regs->tpc, tl);
4746- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
4747+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
4748 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4749- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
4750+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
4751 (void *) regs->u_regs[UREG_I7]);
4752 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
4753 "pte[%lx] error[%lx]\n",
4754@@ -2191,7 +2202,7 @@ void show_stack(struct task_struct *tsk,
4755 fp = (unsigned long)sf->fp + STACK_BIAS;
4756 }
4757
4758- printk(" [%016lx] %pS\n", pc, (void *) pc);
4759+ printk(" [%016lx] %pA\n", pc, (void *) pc);
4760 } while (++count < 16);
4761 }
4762
4763@@ -2233,6 +2244,8 @@ static inline struct reg_window *kernel_
4764 return (struct reg_window *) (fp + STACK_BIAS);
4765 }
4766
4767+extern void gr_handle_kernel_exploit(void);
4768+
4769 void die_if_kernel(char *str, struct pt_regs *regs)
4770 {
4771 static int die_counter;
4772@@ -2260,7 +2273,7 @@ void die_if_kernel(char *str, struct pt_
4773 while (rw &&
4774 count++ < 30&&
4775 is_kernel_stack(current, rw)) {
4776- printk("Caller[%016lx]: %pS\n", rw->ins[7],
4777+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
4778 (void *) rw->ins[7]);
4779
4780 rw = kernel_stack_up(rw);
4781@@ -2273,8 +2286,11 @@ void die_if_kernel(char *str, struct pt_
4782 }
4783 user_instruction_dump ((unsigned int __user *) regs->tpc);
4784 }
4785- if (regs->tstate & TSTATE_PRIV)
4786+ if (regs->tstate & TSTATE_PRIV) {
4787+ gr_handle_kernel_exploit();
4788 do_exit(SIGKILL);
4789+ }
4790+
4791 do_exit(SIGSEGV);
4792 }
4793 EXPORT_SYMBOL(die_if_kernel);
4794diff -urNp linux-2.6.32.48/arch/sparc/kernel/una_asm_64.S linux-2.6.32.48/arch/sparc/kernel/una_asm_64.S
4795--- linux-2.6.32.48/arch/sparc/kernel/una_asm_64.S 2011-11-08 19:02:43.000000000 -0500
4796+++ linux-2.6.32.48/arch/sparc/kernel/una_asm_64.S 2011-11-15 19:59:42.000000000 -0500
4797@@ -127,7 +127,7 @@ do_int_load:
4798 wr %o5, 0x0, %asi
4799 retl
4800 mov 0, %o0
4801- .size __do_int_load, .-__do_int_load
4802+ .size do_int_load, .-do_int_load
4803
4804 .section __ex_table,"a"
4805 .word 4b, __retl_efault
4806diff -urNp linux-2.6.32.48/arch/sparc/kernel/unaligned_64.c linux-2.6.32.48/arch/sparc/kernel/unaligned_64.c
4807--- linux-2.6.32.48/arch/sparc/kernel/unaligned_64.c 2011-11-08 19:02:43.000000000 -0500
4808+++ linux-2.6.32.48/arch/sparc/kernel/unaligned_64.c 2011-11-15 19:59:42.000000000 -0500
4809@@ -288,7 +288,7 @@ static void log_unaligned(struct pt_regs
4810 if (count < 5) {
4811 last_time = jiffies;
4812 count++;
4813- printk("Kernel unaligned access at TPC[%lx] %pS\n",
4814+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
4815 regs->tpc, (void *) regs->tpc);
4816 }
4817 }
4818diff -urNp linux-2.6.32.48/arch/sparc/lib/atomic_64.S linux-2.6.32.48/arch/sparc/lib/atomic_64.S
4819--- linux-2.6.32.48/arch/sparc/lib/atomic_64.S 2011-11-08 19:02:43.000000000 -0500
4820+++ linux-2.6.32.48/arch/sparc/lib/atomic_64.S 2011-11-15 19:59:42.000000000 -0500
4821@@ -18,7 +18,12 @@
4822 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4823 BACKOFF_SETUP(%o2)
4824 1: lduw [%o1], %g1
4825- add %g1, %o0, %g7
4826+ addcc %g1, %o0, %g7
4827+
4828+#ifdef CONFIG_PAX_REFCOUNT
4829+ tvs %icc, 6
4830+#endif
4831+
4832 cas [%o1], %g1, %g7
4833 cmp %g1, %g7
4834 bne,pn %icc, 2f
4835@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
4836 2: BACKOFF_SPIN(%o2, %o3, 1b)
4837 .size atomic_add, .-atomic_add
4838
4839+ .globl atomic_add_unchecked
4840+ .type atomic_add_unchecked,#function
4841+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4842+ BACKOFF_SETUP(%o2)
4843+1: lduw [%o1], %g1
4844+ add %g1, %o0, %g7
4845+ cas [%o1], %g1, %g7
4846+ cmp %g1, %g7
4847+ bne,pn %icc, 2f
4848+ nop
4849+ retl
4850+ nop
4851+2: BACKOFF_SPIN(%o2, %o3, 1b)
4852+ .size atomic_add_unchecked, .-atomic_add_unchecked
4853+
4854 .globl atomic_sub
4855 .type atomic_sub,#function
4856 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4857 BACKOFF_SETUP(%o2)
4858 1: lduw [%o1], %g1
4859- sub %g1, %o0, %g7
4860+ subcc %g1, %o0, %g7
4861+
4862+#ifdef CONFIG_PAX_REFCOUNT
4863+ tvs %icc, 6
4864+#endif
4865+
4866 cas [%o1], %g1, %g7
4867 cmp %g1, %g7
4868 bne,pn %icc, 2f
4869@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
4870 2: BACKOFF_SPIN(%o2, %o3, 1b)
4871 .size atomic_sub, .-atomic_sub
4872
4873+ .globl atomic_sub_unchecked
4874+ .type atomic_sub_unchecked,#function
4875+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4876+ BACKOFF_SETUP(%o2)
4877+1: lduw [%o1], %g1
4878+ sub %g1, %o0, %g7
4879+ cas [%o1], %g1, %g7
4880+ cmp %g1, %g7
4881+ bne,pn %icc, 2f
4882+ nop
4883+ retl
4884+ nop
4885+2: BACKOFF_SPIN(%o2, %o3, 1b)
4886+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
4887+
4888 .globl atomic_add_ret
4889 .type atomic_add_ret,#function
4890 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4891 BACKOFF_SETUP(%o2)
4892 1: lduw [%o1], %g1
4893- add %g1, %o0, %g7
4894+ addcc %g1, %o0, %g7
4895+
4896+#ifdef CONFIG_PAX_REFCOUNT
4897+ tvs %icc, 6
4898+#endif
4899+
4900 cas [%o1], %g1, %g7
4901 cmp %g1, %g7
4902 bne,pn %icc, 2f
4903@@ -59,12 +104,33 @@ atomic_add_ret: /* %o0 = increment, %o1
4904 2: BACKOFF_SPIN(%o2, %o3, 1b)
4905 .size atomic_add_ret, .-atomic_add_ret
4906
4907+ .globl atomic_add_ret_unchecked
4908+ .type atomic_add_ret_unchecked,#function
4909+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4910+ BACKOFF_SETUP(%o2)
4911+1: lduw [%o1], %g1
4912+ addcc %g1, %o0, %g7
4913+ cas [%o1], %g1, %g7
4914+ cmp %g1, %g7
4915+ bne,pn %icc, 2f
4916+ add %g7, %o0, %g7
4917+ sra %g7, 0, %o0
4918+ retl
4919+ nop
4920+2: BACKOFF_SPIN(%o2, %o3, 1b)
4921+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
4922+
4923 .globl atomic_sub_ret
4924 .type atomic_sub_ret,#function
4925 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4926 BACKOFF_SETUP(%o2)
4927 1: lduw [%o1], %g1
4928- sub %g1, %o0, %g7
4929+ subcc %g1, %o0, %g7
4930+
4931+#ifdef CONFIG_PAX_REFCOUNT
4932+ tvs %icc, 6
4933+#endif
4934+
4935 cas [%o1], %g1, %g7
4936 cmp %g1, %g7
4937 bne,pn %icc, 2f
4938@@ -80,7 +146,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
4939 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4940 BACKOFF_SETUP(%o2)
4941 1: ldx [%o1], %g1
4942- add %g1, %o0, %g7
4943+ addcc %g1, %o0, %g7
4944+
4945+#ifdef CONFIG_PAX_REFCOUNT
4946+ tvs %xcc, 6
4947+#endif
4948+
4949 casx [%o1], %g1, %g7
4950 cmp %g1, %g7
4951 bne,pn %xcc, 2f
4952@@ -90,12 +161,32 @@ atomic64_add: /* %o0 = increment, %o1 =
4953 2: BACKOFF_SPIN(%o2, %o3, 1b)
4954 .size atomic64_add, .-atomic64_add
4955
4956+ .globl atomic64_add_unchecked
4957+ .type atomic64_add_unchecked,#function
4958+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4959+ BACKOFF_SETUP(%o2)
4960+1: ldx [%o1], %g1
4961+ addcc %g1, %o0, %g7
4962+ casx [%o1], %g1, %g7
4963+ cmp %g1, %g7
4964+ bne,pn %xcc, 2f
4965+ nop
4966+ retl
4967+ nop
4968+2: BACKOFF_SPIN(%o2, %o3, 1b)
4969+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
4970+
4971 .globl atomic64_sub
4972 .type atomic64_sub,#function
4973 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4974 BACKOFF_SETUP(%o2)
4975 1: ldx [%o1], %g1
4976- sub %g1, %o0, %g7
4977+ subcc %g1, %o0, %g7
4978+
4979+#ifdef CONFIG_PAX_REFCOUNT
4980+ tvs %xcc, 6
4981+#endif
4982+
4983 casx [%o1], %g1, %g7
4984 cmp %g1, %g7
4985 bne,pn %xcc, 2f
4986@@ -105,12 +196,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
4987 2: BACKOFF_SPIN(%o2, %o3, 1b)
4988 .size atomic64_sub, .-atomic64_sub
4989
4990+ .globl atomic64_sub_unchecked
4991+ .type atomic64_sub_unchecked,#function
4992+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4993+ BACKOFF_SETUP(%o2)
4994+1: ldx [%o1], %g1
4995+ subcc %g1, %o0, %g7
4996+ casx [%o1], %g1, %g7
4997+ cmp %g1, %g7
4998+ bne,pn %xcc, 2f
4999+ nop
5000+ retl
5001+ nop
5002+2: BACKOFF_SPIN(%o2, %o3, 1b)
5003+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
5004+
5005 .globl atomic64_add_ret
5006 .type atomic64_add_ret,#function
5007 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5008 BACKOFF_SETUP(%o2)
5009 1: ldx [%o1], %g1
5010- add %g1, %o0, %g7
5011+ addcc %g1, %o0, %g7
5012+
5013+#ifdef CONFIG_PAX_REFCOUNT
5014+ tvs %xcc, 6
5015+#endif
5016+
5017 casx [%o1], %g1, %g7
5018 cmp %g1, %g7
5019 bne,pn %xcc, 2f
5020@@ -121,12 +232,33 @@ atomic64_add_ret: /* %o0 = increment, %o
5021 2: BACKOFF_SPIN(%o2, %o3, 1b)
5022 .size atomic64_add_ret, .-atomic64_add_ret
5023
5024+ .globl atomic64_add_ret_unchecked
5025+ .type atomic64_add_ret_unchecked,#function
5026+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5027+ BACKOFF_SETUP(%o2)
5028+1: ldx [%o1], %g1
5029+ addcc %g1, %o0, %g7
5030+ casx [%o1], %g1, %g7
5031+ cmp %g1, %g7
5032+ bne,pn %xcc, 2f
5033+ add %g7, %o0, %g7
5034+ mov %g7, %o0
5035+ retl
5036+ nop
5037+2: BACKOFF_SPIN(%o2, %o3, 1b)
5038+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
5039+
5040 .globl atomic64_sub_ret
5041 .type atomic64_sub_ret,#function
5042 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5043 BACKOFF_SETUP(%o2)
5044 1: ldx [%o1], %g1
5045- sub %g1, %o0, %g7
5046+ subcc %g1, %o0, %g7
5047+
5048+#ifdef CONFIG_PAX_REFCOUNT
5049+ tvs %xcc, 6
5050+#endif
5051+
5052 casx [%o1], %g1, %g7
5053 cmp %g1, %g7
5054 bne,pn %xcc, 2f
5055diff -urNp linux-2.6.32.48/arch/sparc/lib/ksyms.c linux-2.6.32.48/arch/sparc/lib/ksyms.c
5056--- linux-2.6.32.48/arch/sparc/lib/ksyms.c 2011-11-08 19:02:43.000000000 -0500
5057+++ linux-2.6.32.48/arch/sparc/lib/ksyms.c 2011-11-15 19:59:42.000000000 -0500
5058@@ -144,12 +144,18 @@ EXPORT_SYMBOL(__downgrade_write);
5059
5060 /* Atomic counter implementation. */
5061 EXPORT_SYMBOL(atomic_add);
5062+EXPORT_SYMBOL(atomic_add_unchecked);
5063 EXPORT_SYMBOL(atomic_add_ret);
5064+EXPORT_SYMBOL(atomic_add_ret_unchecked);
5065 EXPORT_SYMBOL(atomic_sub);
5066+EXPORT_SYMBOL(atomic_sub_unchecked);
5067 EXPORT_SYMBOL(atomic_sub_ret);
5068 EXPORT_SYMBOL(atomic64_add);
5069+EXPORT_SYMBOL(atomic64_add_unchecked);
5070 EXPORT_SYMBOL(atomic64_add_ret);
5071+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
5072 EXPORT_SYMBOL(atomic64_sub);
5073+EXPORT_SYMBOL(atomic64_sub_unchecked);
5074 EXPORT_SYMBOL(atomic64_sub_ret);
5075
5076 /* Atomic bit operations. */
5077diff -urNp linux-2.6.32.48/arch/sparc/lib/Makefile linux-2.6.32.48/arch/sparc/lib/Makefile
5078--- linux-2.6.32.48/arch/sparc/lib/Makefile 2011-11-08 19:02:43.000000000 -0500
5079+++ linux-2.6.32.48/arch/sparc/lib/Makefile 2011-11-15 19:59:42.000000000 -0500
5080@@ -2,7 +2,7 @@
5081 #
5082
5083 asflags-y := -ansi -DST_DIV0=0x02
5084-ccflags-y := -Werror
5085+#ccflags-y := -Werror
5086
5087 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
5088 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
5089diff -urNp linux-2.6.32.48/arch/sparc/lib/rwsem_64.S linux-2.6.32.48/arch/sparc/lib/rwsem_64.S
5090--- linux-2.6.32.48/arch/sparc/lib/rwsem_64.S 2011-11-08 19:02:43.000000000 -0500
5091+++ linux-2.6.32.48/arch/sparc/lib/rwsem_64.S 2011-11-15 19:59:42.000000000 -0500
5092@@ -11,7 +11,12 @@
5093 .globl __down_read
5094 __down_read:
5095 1: lduw [%o0], %g1
5096- add %g1, 1, %g7
5097+ addcc %g1, 1, %g7
5098+
5099+#ifdef CONFIG_PAX_REFCOUNT
5100+ tvs %icc, 6
5101+#endif
5102+
5103 cas [%o0], %g1, %g7
5104 cmp %g1, %g7
5105 bne,pn %icc, 1b
5106@@ -33,7 +38,12 @@ __down_read:
5107 .globl __down_read_trylock
5108 __down_read_trylock:
5109 1: lduw [%o0], %g1
5110- add %g1, 1, %g7
5111+ addcc %g1, 1, %g7
5112+
5113+#ifdef CONFIG_PAX_REFCOUNT
5114+ tvs %icc, 6
5115+#endif
5116+
5117 cmp %g7, 0
5118 bl,pn %icc, 2f
5119 mov 0, %o1
5120@@ -51,7 +61,12 @@ __down_write:
5121 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5122 1:
5123 lduw [%o0], %g3
5124- add %g3, %g1, %g7
5125+ addcc %g3, %g1, %g7
5126+
5127+#ifdef CONFIG_PAX_REFCOUNT
5128+ tvs %icc, 6
5129+#endif
5130+
5131 cas [%o0], %g3, %g7
5132 cmp %g3, %g7
5133 bne,pn %icc, 1b
5134@@ -77,7 +92,12 @@ __down_write_trylock:
5135 cmp %g3, 0
5136 bne,pn %icc, 2f
5137 mov 0, %o1
5138- add %g3, %g1, %g7
5139+ addcc %g3, %g1, %g7
5140+
5141+#ifdef CONFIG_PAX_REFCOUNT
5142+ tvs %icc, 6
5143+#endif
5144+
5145 cas [%o0], %g3, %g7
5146 cmp %g3, %g7
5147 bne,pn %icc, 1b
5148@@ -90,7 +110,12 @@ __down_write_trylock:
5149 __up_read:
5150 1:
5151 lduw [%o0], %g1
5152- sub %g1, 1, %g7
5153+ subcc %g1, 1, %g7
5154+
5155+#ifdef CONFIG_PAX_REFCOUNT
5156+ tvs %icc, 6
5157+#endif
5158+
5159 cas [%o0], %g1, %g7
5160 cmp %g1, %g7
5161 bne,pn %icc, 1b
5162@@ -118,7 +143,12 @@ __up_write:
5163 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5164 1:
5165 lduw [%o0], %g3
5166- sub %g3, %g1, %g7
5167+ subcc %g3, %g1, %g7
5168+
5169+#ifdef CONFIG_PAX_REFCOUNT
5170+ tvs %icc, 6
5171+#endif
5172+
5173 cas [%o0], %g3, %g7
5174 cmp %g3, %g7
5175 bne,pn %icc, 1b
5176@@ -143,7 +173,12 @@ __downgrade_write:
5177 or %g1, %lo(RWSEM_WAITING_BIAS), %g1
5178 1:
5179 lduw [%o0], %g3
5180- sub %g3, %g1, %g7
5181+ subcc %g3, %g1, %g7
5182+
5183+#ifdef CONFIG_PAX_REFCOUNT
5184+ tvs %icc, 6
5185+#endif
5186+
5187 cas [%o0], %g3, %g7
5188 cmp %g3, %g7
5189 bne,pn %icc, 1b
5190diff -urNp linux-2.6.32.48/arch/sparc/Makefile linux-2.6.32.48/arch/sparc/Makefile
5191--- linux-2.6.32.48/arch/sparc/Makefile 2011-11-08 19:02:43.000000000 -0500
5192+++ linux-2.6.32.48/arch/sparc/Makefile 2011-11-15 19:59:42.000000000 -0500
5193@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
5194 # Export what is needed by arch/sparc/boot/Makefile
5195 export VMLINUX_INIT VMLINUX_MAIN
5196 VMLINUX_INIT := $(head-y) $(init-y)
5197-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
5198+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
5199 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
5200 VMLINUX_MAIN += $(drivers-y) $(net-y)
5201
5202diff -urNp linux-2.6.32.48/arch/sparc/mm/fault_32.c linux-2.6.32.48/arch/sparc/mm/fault_32.c
5203--- linux-2.6.32.48/arch/sparc/mm/fault_32.c 2011-11-08 19:02:43.000000000 -0500
5204+++ linux-2.6.32.48/arch/sparc/mm/fault_32.c 2011-11-15 19:59:42.000000000 -0500
5205@@ -21,6 +21,9 @@
5206 #include <linux/interrupt.h>
5207 #include <linux/module.h>
5208 #include <linux/kdebug.h>
5209+#include <linux/slab.h>
5210+#include <linux/pagemap.h>
5211+#include <linux/compiler.h>
5212
5213 #include <asm/system.h>
5214 #include <asm/page.h>
5215@@ -167,6 +170,267 @@ static unsigned long compute_si_addr(str
5216 return safe_compute_effective_address(regs, insn);
5217 }
5218
5219+#ifdef CONFIG_PAX_PAGEEXEC
5220+#ifdef CONFIG_PAX_DLRESOLVE
5221+static void pax_emuplt_close(struct vm_area_struct *vma)
5222+{
5223+ vma->vm_mm->call_dl_resolve = 0UL;
5224+}
5225+
5226+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5227+{
5228+ unsigned int *kaddr;
5229+
5230+ vmf->page = alloc_page(GFP_HIGHUSER);
5231+ if (!vmf->page)
5232+ return VM_FAULT_OOM;
5233+
5234+ kaddr = kmap(vmf->page);
5235+ memset(kaddr, 0, PAGE_SIZE);
5236+ kaddr[0] = 0x9DE3BFA8U; /* save */
5237+ flush_dcache_page(vmf->page);
5238+ kunmap(vmf->page);
5239+ return VM_FAULT_MAJOR;
5240+}
5241+
5242+static const struct vm_operations_struct pax_vm_ops = {
5243+ .close = pax_emuplt_close,
5244+ .fault = pax_emuplt_fault
5245+};
5246+
5247+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5248+{
5249+ int ret;
5250+
5251+ vma->vm_mm = current->mm;
5252+ vma->vm_start = addr;
5253+ vma->vm_end = addr + PAGE_SIZE;
5254+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5255+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5256+ vma->vm_ops = &pax_vm_ops;
5257+
5258+ ret = insert_vm_struct(current->mm, vma);
5259+ if (ret)
5260+ return ret;
5261+
5262+ ++current->mm->total_vm;
5263+ return 0;
5264+}
5265+#endif
5266+
5267+/*
5268+ * PaX: decide what to do with offenders (regs->pc = fault address)
5269+ *
5270+ * returns 1 when task should be killed
5271+ * 2 when patched PLT trampoline was detected
5272+ * 3 when unpatched PLT trampoline was detected
5273+ */
5274+static int pax_handle_fetch_fault(struct pt_regs *regs)
5275+{
5276+
5277+#ifdef CONFIG_PAX_EMUPLT
5278+ int err;
5279+
5280+ do { /* PaX: patched PLT emulation #1 */
5281+ unsigned int sethi1, sethi2, jmpl;
5282+
5283+ err = get_user(sethi1, (unsigned int *)regs->pc);
5284+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
5285+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
5286+
5287+ if (err)
5288+ break;
5289+
5290+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5291+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
5292+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
5293+ {
5294+ unsigned int addr;
5295+
5296+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5297+ addr = regs->u_regs[UREG_G1];
5298+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5299+ regs->pc = addr;
5300+ regs->npc = addr+4;
5301+ return 2;
5302+ }
5303+ } while (0);
5304+
5305+ { /* PaX: patched PLT emulation #2 */
5306+ unsigned int ba;
5307+
5308+ err = get_user(ba, (unsigned int *)regs->pc);
5309+
5310+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5311+ unsigned int addr;
5312+
5313+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5314+ regs->pc = addr;
5315+ regs->npc = addr+4;
5316+ return 2;
5317+ }
5318+ }
5319+
5320+ do { /* PaX: patched PLT emulation #3 */
5321+ unsigned int sethi, jmpl, nop;
5322+
5323+ err = get_user(sethi, (unsigned int *)regs->pc);
5324+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
5325+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
5326+
5327+ if (err)
5328+ break;
5329+
5330+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5331+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5332+ nop == 0x01000000U)
5333+ {
5334+ unsigned int addr;
5335+
5336+ addr = (sethi & 0x003FFFFFU) << 10;
5337+ regs->u_regs[UREG_G1] = addr;
5338+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5339+ regs->pc = addr;
5340+ regs->npc = addr+4;
5341+ return 2;
5342+ }
5343+ } while (0);
5344+
5345+ do { /* PaX: unpatched PLT emulation step 1 */
5346+ unsigned int sethi, ba, nop;
5347+
5348+ err = get_user(sethi, (unsigned int *)regs->pc);
5349+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
5350+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
5351+
5352+ if (err)
5353+ break;
5354+
5355+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5356+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5357+ nop == 0x01000000U)
5358+ {
5359+ unsigned int addr, save, call;
5360+
5361+ if ((ba & 0xFFC00000U) == 0x30800000U)
5362+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5363+ else
5364+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
5365+
5366+ err = get_user(save, (unsigned int *)addr);
5367+ err |= get_user(call, (unsigned int *)(addr+4));
5368+ err |= get_user(nop, (unsigned int *)(addr+8));
5369+ if (err)
5370+ break;
5371+
5372+#ifdef CONFIG_PAX_DLRESOLVE
5373+ if (save == 0x9DE3BFA8U &&
5374+ (call & 0xC0000000U) == 0x40000000U &&
5375+ nop == 0x01000000U)
5376+ {
5377+ struct vm_area_struct *vma;
5378+ unsigned long call_dl_resolve;
5379+
5380+ down_read(&current->mm->mmap_sem);
5381+ call_dl_resolve = current->mm->call_dl_resolve;
5382+ up_read(&current->mm->mmap_sem);
5383+ if (likely(call_dl_resolve))
5384+ goto emulate;
5385+
5386+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5387+
5388+ down_write(&current->mm->mmap_sem);
5389+ if (current->mm->call_dl_resolve) {
5390+ call_dl_resolve = current->mm->call_dl_resolve;
5391+ up_write(&current->mm->mmap_sem);
5392+ if (vma)
5393+ kmem_cache_free(vm_area_cachep, vma);
5394+ goto emulate;
5395+ }
5396+
5397+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5398+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5399+ up_write(&current->mm->mmap_sem);
5400+ if (vma)
5401+ kmem_cache_free(vm_area_cachep, vma);
5402+ return 1;
5403+ }
5404+
5405+ if (pax_insert_vma(vma, call_dl_resolve)) {
5406+ up_write(&current->mm->mmap_sem);
5407+ kmem_cache_free(vm_area_cachep, vma);
5408+ return 1;
5409+ }
5410+
5411+ current->mm->call_dl_resolve = call_dl_resolve;
5412+ up_write(&current->mm->mmap_sem);
5413+
5414+emulate:
5415+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5416+ regs->pc = call_dl_resolve;
5417+ regs->npc = addr+4;
5418+ return 3;
5419+ }
5420+#endif
5421+
5422+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5423+ if ((save & 0xFFC00000U) == 0x05000000U &&
5424+ (call & 0xFFFFE000U) == 0x85C0A000U &&
5425+ nop == 0x01000000U)
5426+ {
5427+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5428+ regs->u_regs[UREG_G2] = addr + 4;
5429+ addr = (save & 0x003FFFFFU) << 10;
5430+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5431+ regs->pc = addr;
5432+ regs->npc = addr+4;
5433+ return 3;
5434+ }
5435+ }
5436+ } while (0);
5437+
5438+ do { /* PaX: unpatched PLT emulation step 2 */
5439+ unsigned int save, call, nop;
5440+
5441+ err = get_user(save, (unsigned int *)(regs->pc-4));
5442+ err |= get_user(call, (unsigned int *)regs->pc);
5443+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
5444+ if (err)
5445+ break;
5446+
5447+ if (save == 0x9DE3BFA8U &&
5448+ (call & 0xC0000000U) == 0x40000000U &&
5449+ nop == 0x01000000U)
5450+ {
5451+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
5452+
5453+ regs->u_regs[UREG_RETPC] = regs->pc;
5454+ regs->pc = dl_resolve;
5455+ regs->npc = dl_resolve+4;
5456+ return 3;
5457+ }
5458+ } while (0);
5459+#endif
5460+
5461+ return 1;
5462+}
5463+
5464+void pax_report_insns(void *pc, void *sp)
5465+{
5466+ unsigned long i;
5467+
5468+ printk(KERN_ERR "PAX: bytes at PC: ");
5469+ for (i = 0; i < 8; i++) {
5470+ unsigned int c;
5471+ if (get_user(c, (unsigned int *)pc+i))
5472+ printk(KERN_CONT "???????? ");
5473+ else
5474+ printk(KERN_CONT "%08x ", c);
5475+ }
5476+ printk("\n");
5477+}
5478+#endif
5479+
5480 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
5481 unsigned long address)
5482 {
5483@@ -231,6 +495,24 @@ good_area:
5484 if(!(vma->vm_flags & VM_WRITE))
5485 goto bad_area;
5486 } else {
5487+
5488+#ifdef CONFIG_PAX_PAGEEXEC
5489+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
5490+ up_read(&mm->mmap_sem);
5491+ switch (pax_handle_fetch_fault(regs)) {
5492+
5493+#ifdef CONFIG_PAX_EMUPLT
5494+ case 2:
5495+ case 3:
5496+ return;
5497+#endif
5498+
5499+ }
5500+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
5501+ do_group_exit(SIGKILL);
5502+ }
5503+#endif
5504+
5505 /* Allow reads even for write-only mappings */
5506 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
5507 goto bad_area;
5508diff -urNp linux-2.6.32.48/arch/sparc/mm/fault_64.c linux-2.6.32.48/arch/sparc/mm/fault_64.c
5509--- linux-2.6.32.48/arch/sparc/mm/fault_64.c 2011-11-08 19:02:43.000000000 -0500
5510+++ linux-2.6.32.48/arch/sparc/mm/fault_64.c 2011-11-15 19:59:42.000000000 -0500
5511@@ -20,6 +20,9 @@
5512 #include <linux/kprobes.h>
5513 #include <linux/kdebug.h>
5514 #include <linux/percpu.h>
5515+#include <linux/slab.h>
5516+#include <linux/pagemap.h>
5517+#include <linux/compiler.h>
5518
5519 #include <asm/page.h>
5520 #include <asm/pgtable.h>
5521@@ -78,7 +81,7 @@ static void bad_kernel_pc(struct pt_regs
5522 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
5523 regs->tpc);
5524 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
5525- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
5526+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
5527 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
5528 dump_stack();
5529 unhandled_fault(regs->tpc, current, regs);
5530@@ -249,6 +252,456 @@ static void noinline bogus_32bit_fault_a
5531 show_regs(regs);
5532 }
5533
5534+#ifdef CONFIG_PAX_PAGEEXEC
5535+#ifdef CONFIG_PAX_DLRESOLVE
5536+static void pax_emuplt_close(struct vm_area_struct *vma)
5537+{
5538+ vma->vm_mm->call_dl_resolve = 0UL;
5539+}
5540+
5541+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5542+{
5543+ unsigned int *kaddr;
5544+
5545+ vmf->page = alloc_page(GFP_HIGHUSER);
5546+ if (!vmf->page)
5547+ return VM_FAULT_OOM;
5548+
5549+ kaddr = kmap(vmf->page);
5550+ memset(kaddr, 0, PAGE_SIZE);
5551+ kaddr[0] = 0x9DE3BFA8U; /* save */
5552+ flush_dcache_page(vmf->page);
5553+ kunmap(vmf->page);
5554+ return VM_FAULT_MAJOR;
5555+}
5556+
5557+static const struct vm_operations_struct pax_vm_ops = {
5558+ .close = pax_emuplt_close,
5559+ .fault = pax_emuplt_fault
5560+};
5561+
5562+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5563+{
5564+ int ret;
5565+
5566+ vma->vm_mm = current->mm;
5567+ vma->vm_start = addr;
5568+ vma->vm_end = addr + PAGE_SIZE;
5569+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5570+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5571+ vma->vm_ops = &pax_vm_ops;
5572+
5573+ ret = insert_vm_struct(current->mm, vma);
5574+ if (ret)
5575+ return ret;
5576+
5577+ ++current->mm->total_vm;
5578+ return 0;
5579+}
5580+#endif
5581+
5582+/*
5583+ * PaX: decide what to do with offenders (regs->tpc = fault address)
5584+ *
5585+ * returns 1 when task should be killed
5586+ * 2 when patched PLT trampoline was detected
5587+ * 3 when unpatched PLT trampoline was detected
5588+ */
5589+static int pax_handle_fetch_fault(struct pt_regs *regs)
5590+{
5591+
5592+#ifdef CONFIG_PAX_EMUPLT
5593+ int err;
5594+
5595+ do { /* PaX: patched PLT emulation #1 */
5596+ unsigned int sethi1, sethi2, jmpl;
5597+
5598+ err = get_user(sethi1, (unsigned int *)regs->tpc);
5599+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
5600+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
5601+
5602+ if (err)
5603+ break;
5604+
5605+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5606+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
5607+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
5608+ {
5609+ unsigned long addr;
5610+
5611+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5612+ addr = regs->u_regs[UREG_G1];
5613+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5614+
5615+ if (test_thread_flag(TIF_32BIT))
5616+ addr &= 0xFFFFFFFFUL;
5617+
5618+ regs->tpc = addr;
5619+ regs->tnpc = addr+4;
5620+ return 2;
5621+ }
5622+ } while (0);
5623+
5624+ { /* PaX: patched PLT emulation #2 */
5625+ unsigned int ba;
5626+
5627+ err = get_user(ba, (unsigned int *)regs->tpc);
5628+
5629+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5630+ unsigned long addr;
5631+
5632+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5633+
5634+ if (test_thread_flag(TIF_32BIT))
5635+ addr &= 0xFFFFFFFFUL;
5636+
5637+ regs->tpc = addr;
5638+ regs->tnpc = addr+4;
5639+ return 2;
5640+ }
5641+ }
5642+
5643+ do { /* PaX: patched PLT emulation #3 */
5644+ unsigned int sethi, jmpl, nop;
5645+
5646+ err = get_user(sethi, (unsigned int *)regs->tpc);
5647+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
5648+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5649+
5650+ if (err)
5651+ break;
5652+
5653+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5654+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5655+ nop == 0x01000000U)
5656+ {
5657+ unsigned long addr;
5658+
5659+ addr = (sethi & 0x003FFFFFU) << 10;
5660+ regs->u_regs[UREG_G1] = addr;
5661+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5662+
5663+ if (test_thread_flag(TIF_32BIT))
5664+ addr &= 0xFFFFFFFFUL;
5665+
5666+ regs->tpc = addr;
5667+ regs->tnpc = addr+4;
5668+ return 2;
5669+ }
5670+ } while (0);
5671+
5672+ do { /* PaX: patched PLT emulation #4 */
5673+ unsigned int sethi, mov1, call, mov2;
5674+
5675+ err = get_user(sethi, (unsigned int *)regs->tpc);
5676+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
5677+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
5678+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
5679+
5680+ if (err)
5681+ break;
5682+
5683+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5684+ mov1 == 0x8210000FU &&
5685+ (call & 0xC0000000U) == 0x40000000U &&
5686+ mov2 == 0x9E100001U)
5687+ {
5688+ unsigned long addr;
5689+
5690+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
5691+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5692+
5693+ if (test_thread_flag(TIF_32BIT))
5694+ addr &= 0xFFFFFFFFUL;
5695+
5696+ regs->tpc = addr;
5697+ regs->tnpc = addr+4;
5698+ return 2;
5699+ }
5700+ } while (0);
5701+
5702+ do { /* PaX: patched PLT emulation #5 */
5703+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
5704+
5705+ err = get_user(sethi, (unsigned int *)regs->tpc);
5706+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5707+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5708+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
5709+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
5710+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
5711+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
5712+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
5713+
5714+ if (err)
5715+ break;
5716+
5717+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5718+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
5719+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5720+ (or1 & 0xFFFFE000U) == 0x82106000U &&
5721+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
5722+ sllx == 0x83287020U &&
5723+ jmpl == 0x81C04005U &&
5724+ nop == 0x01000000U)
5725+ {
5726+ unsigned long addr;
5727+
5728+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5729+ regs->u_regs[UREG_G1] <<= 32;
5730+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5731+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5732+ regs->tpc = addr;
5733+ regs->tnpc = addr+4;
5734+ return 2;
5735+ }
5736+ } while (0);
5737+
5738+ do { /* PaX: patched PLT emulation #6 */
5739+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
5740+
5741+ err = get_user(sethi, (unsigned int *)regs->tpc);
5742+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5743+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5744+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
5745+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
5746+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
5747+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
5748+
5749+ if (err)
5750+ break;
5751+
5752+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5753+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
5754+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5755+ sllx == 0x83287020U &&
5756+ (or & 0xFFFFE000U) == 0x8A116000U &&
5757+ jmpl == 0x81C04005U &&
5758+ nop == 0x01000000U)
5759+ {
5760+ unsigned long addr;
5761+
5762+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
5763+ regs->u_regs[UREG_G1] <<= 32;
5764+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
5765+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5766+ regs->tpc = addr;
5767+ regs->tnpc = addr+4;
5768+ return 2;
5769+ }
5770+ } while (0);
5771+
5772+ do { /* PaX: unpatched PLT emulation step 1 */
5773+ unsigned int sethi, ba, nop;
5774+
5775+ err = get_user(sethi, (unsigned int *)regs->tpc);
5776+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5777+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5778+
5779+ if (err)
5780+ break;
5781+
5782+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5783+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5784+ nop == 0x01000000U)
5785+ {
5786+ unsigned long addr;
5787+ unsigned int save, call;
5788+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
5789+
5790+ if ((ba & 0xFFC00000U) == 0x30800000U)
5791+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5792+ else
5793+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5794+
5795+ if (test_thread_flag(TIF_32BIT))
5796+ addr &= 0xFFFFFFFFUL;
5797+
5798+ err = get_user(save, (unsigned int *)addr);
5799+ err |= get_user(call, (unsigned int *)(addr+4));
5800+ err |= get_user(nop, (unsigned int *)(addr+8));
5801+ if (err)
5802+ break;
5803+
5804+#ifdef CONFIG_PAX_DLRESOLVE
5805+ if (save == 0x9DE3BFA8U &&
5806+ (call & 0xC0000000U) == 0x40000000U &&
5807+ nop == 0x01000000U)
5808+ {
5809+ struct vm_area_struct *vma;
5810+ unsigned long call_dl_resolve;
5811+
5812+ down_read(&current->mm->mmap_sem);
5813+ call_dl_resolve = current->mm->call_dl_resolve;
5814+ up_read(&current->mm->mmap_sem);
5815+ if (likely(call_dl_resolve))
5816+ goto emulate;
5817+
5818+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5819+
5820+ down_write(&current->mm->mmap_sem);
5821+ if (current->mm->call_dl_resolve) {
5822+ call_dl_resolve = current->mm->call_dl_resolve;
5823+ up_write(&current->mm->mmap_sem);
5824+ if (vma)
5825+ kmem_cache_free(vm_area_cachep, vma);
5826+ goto emulate;
5827+ }
5828+
5829+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5830+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5831+ up_write(&current->mm->mmap_sem);
5832+ if (vma)
5833+ kmem_cache_free(vm_area_cachep, vma);
5834+ return 1;
5835+ }
5836+
5837+ if (pax_insert_vma(vma, call_dl_resolve)) {
5838+ up_write(&current->mm->mmap_sem);
5839+ kmem_cache_free(vm_area_cachep, vma);
5840+ return 1;
5841+ }
5842+
5843+ current->mm->call_dl_resolve = call_dl_resolve;
5844+ up_write(&current->mm->mmap_sem);
5845+
5846+emulate:
5847+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5848+ regs->tpc = call_dl_resolve;
5849+ regs->tnpc = addr+4;
5850+ return 3;
5851+ }
5852+#endif
5853+
5854+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5855+ if ((save & 0xFFC00000U) == 0x05000000U &&
5856+ (call & 0xFFFFE000U) == 0x85C0A000U &&
5857+ nop == 0x01000000U)
5858+ {
5859+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5860+ regs->u_regs[UREG_G2] = addr + 4;
5861+ addr = (save & 0x003FFFFFU) << 10;
5862+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5863+
5864+ if (test_thread_flag(TIF_32BIT))
5865+ addr &= 0xFFFFFFFFUL;
5866+
5867+ regs->tpc = addr;
5868+ regs->tnpc = addr+4;
5869+ return 3;
5870+ }
5871+
5872+ /* PaX: 64-bit PLT stub */
5873+ err = get_user(sethi1, (unsigned int *)addr);
5874+ err |= get_user(sethi2, (unsigned int *)(addr+4));
5875+ err |= get_user(or1, (unsigned int *)(addr+8));
5876+ err |= get_user(or2, (unsigned int *)(addr+12));
5877+ err |= get_user(sllx, (unsigned int *)(addr+16));
5878+ err |= get_user(add, (unsigned int *)(addr+20));
5879+ err |= get_user(jmpl, (unsigned int *)(addr+24));
5880+ err |= get_user(nop, (unsigned int *)(addr+28));
5881+ if (err)
5882+ break;
5883+
5884+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
5885+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5886+ (or1 & 0xFFFFE000U) == 0x88112000U &&
5887+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
5888+ sllx == 0x89293020U &&
5889+ add == 0x8A010005U &&
5890+ jmpl == 0x89C14000U &&
5891+ nop == 0x01000000U)
5892+ {
5893+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5894+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5895+ regs->u_regs[UREG_G4] <<= 32;
5896+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5897+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
5898+ regs->u_regs[UREG_G4] = addr + 24;
5899+ addr = regs->u_regs[UREG_G5];
5900+ regs->tpc = addr;
5901+ regs->tnpc = addr+4;
5902+ return 3;
5903+ }
5904+ }
5905+ } while (0);
5906+
5907+#ifdef CONFIG_PAX_DLRESOLVE
5908+ do { /* PaX: unpatched PLT emulation step 2 */
5909+ unsigned int save, call, nop;
5910+
5911+ err = get_user(save, (unsigned int *)(regs->tpc-4));
5912+ err |= get_user(call, (unsigned int *)regs->tpc);
5913+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
5914+ if (err)
5915+ break;
5916+
5917+ if (save == 0x9DE3BFA8U &&
5918+ (call & 0xC0000000U) == 0x40000000U &&
5919+ nop == 0x01000000U)
5920+ {
5921+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5922+
5923+ if (test_thread_flag(TIF_32BIT))
5924+ dl_resolve &= 0xFFFFFFFFUL;
5925+
5926+ regs->u_regs[UREG_RETPC] = regs->tpc;
5927+ regs->tpc = dl_resolve;
5928+ regs->tnpc = dl_resolve+4;
5929+ return 3;
5930+ }
5931+ } while (0);
5932+#endif
5933+
5934+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
5935+ unsigned int sethi, ba, nop;
5936+
5937+ err = get_user(sethi, (unsigned int *)regs->tpc);
5938+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5939+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5940+
5941+ if (err)
5942+ break;
5943+
5944+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5945+ (ba & 0xFFF00000U) == 0x30600000U &&
5946+ nop == 0x01000000U)
5947+ {
5948+ unsigned long addr;
5949+
5950+ addr = (sethi & 0x003FFFFFU) << 10;
5951+ regs->u_regs[UREG_G1] = addr;
5952+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5953+
5954+ if (test_thread_flag(TIF_32BIT))
5955+ addr &= 0xFFFFFFFFUL;
5956+
5957+ regs->tpc = addr;
5958+ regs->tnpc = addr+4;
5959+ return 2;
5960+ }
5961+ } while (0);
5962+
5963+#endif
5964+
5965+ return 1;
5966+}
5967+
5968+void pax_report_insns(void *pc, void *sp)
5969+{
5970+ unsigned long i;
5971+
5972+ printk(KERN_ERR "PAX: bytes at PC: ");
5973+ for (i = 0; i < 8; i++) {
5974+ unsigned int c;
5975+ if (get_user(c, (unsigned int *)pc+i))
5976+ printk(KERN_CONT "???????? ");
5977+ else
5978+ printk(KERN_CONT "%08x ", c);
5979+ }
5980+ printk("\n");
5981+}
5982+#endif
5983+
5984 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5985 {
5986 struct mm_struct *mm = current->mm;
5987@@ -315,6 +768,29 @@ asmlinkage void __kprobes do_sparc64_fau
5988 if (!vma)
5989 goto bad_area;
5990
5991+#ifdef CONFIG_PAX_PAGEEXEC
5992+ /* PaX: detect ITLB misses on non-exec pages */
5993+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
5994+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
5995+ {
5996+ if (address != regs->tpc)
5997+ goto good_area;
5998+
5999+ up_read(&mm->mmap_sem);
6000+ switch (pax_handle_fetch_fault(regs)) {
6001+
6002+#ifdef CONFIG_PAX_EMUPLT
6003+ case 2:
6004+ case 3:
6005+ return;
6006+#endif
6007+
6008+ }
6009+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
6010+ do_group_exit(SIGKILL);
6011+ }
6012+#endif
6013+
6014 /* Pure DTLB misses do not tell us whether the fault causing
6015 * load/store/atomic was a write or not, it only says that there
6016 * was no match. So in such a case we (carefully) read the
6017diff -urNp linux-2.6.32.48/arch/sparc/mm/hugetlbpage.c linux-2.6.32.48/arch/sparc/mm/hugetlbpage.c
6018--- linux-2.6.32.48/arch/sparc/mm/hugetlbpage.c 2011-11-08 19:02:43.000000000 -0500
6019+++ linux-2.6.32.48/arch/sparc/mm/hugetlbpage.c 2011-11-15 19:59:42.000000000 -0500
6020@@ -69,7 +69,7 @@ full_search:
6021 }
6022 return -ENOMEM;
6023 }
6024- if (likely(!vma || addr + len <= vma->vm_start)) {
6025+ if (likely(check_heap_stack_gap(vma, addr, len))) {
6026 /*
6027 * Remember the place where we stopped the search:
6028 */
6029@@ -108,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct
6030 /* make sure it can fit in the remaining address space */
6031 if (likely(addr > len)) {
6032 vma = find_vma(mm, addr-len);
6033- if (!vma || addr <= vma->vm_start) {
6034+ if (check_heap_stack_gap(vma, addr - len, len)) {
6035 /* remember the address as a hint for next time */
6036 return (mm->free_area_cache = addr-len);
6037 }
6038@@ -117,16 +117,17 @@ hugetlb_get_unmapped_area_topdown(struct
6039 if (unlikely(mm->mmap_base < len))
6040 goto bottomup;
6041
6042- addr = (mm->mmap_base-len) & HPAGE_MASK;
6043+ addr = mm->mmap_base - len;
6044
6045 do {
6046+ addr &= HPAGE_MASK;
6047 /*
6048 * Lookup failure means no vma is above this address,
6049 * else if new region fits below vma->vm_start,
6050 * return with success:
6051 */
6052 vma = find_vma(mm, addr);
6053- if (likely(!vma || addr+len <= vma->vm_start)) {
6054+ if (likely(check_heap_stack_gap(vma, addr, len))) {
6055 /* remember the address as a hint for next time */
6056 return (mm->free_area_cache = addr);
6057 }
6058@@ -136,8 +137,8 @@ hugetlb_get_unmapped_area_topdown(struct
6059 mm->cached_hole_size = vma->vm_start - addr;
6060
6061 /* try just below the current vma->vm_start */
6062- addr = (vma->vm_start-len) & HPAGE_MASK;
6063- } while (likely(len < vma->vm_start));
6064+ addr = skip_heap_stack_gap(vma, len);
6065+ } while (!IS_ERR_VALUE(addr));
6066
6067 bottomup:
6068 /*
6069@@ -183,8 +184,7 @@ hugetlb_get_unmapped_area(struct file *f
6070 if (addr) {
6071 addr = ALIGN(addr, HPAGE_SIZE);
6072 vma = find_vma(mm, addr);
6073- if (task_size - len >= addr &&
6074- (!vma || addr + len <= vma->vm_start))
6075+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
6076 return addr;
6077 }
6078 if (mm->get_unmapped_area == arch_get_unmapped_area)
6079diff -urNp linux-2.6.32.48/arch/sparc/mm/init_32.c linux-2.6.32.48/arch/sparc/mm/init_32.c
6080--- linux-2.6.32.48/arch/sparc/mm/init_32.c 2011-11-08 19:02:43.000000000 -0500
6081+++ linux-2.6.32.48/arch/sparc/mm/init_32.c 2011-11-15 19:59:42.000000000 -0500
6082@@ -317,6 +317,9 @@ extern void device_scan(void);
6083 pgprot_t PAGE_SHARED __read_mostly;
6084 EXPORT_SYMBOL(PAGE_SHARED);
6085
6086+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
6087+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
6088+
6089 void __init paging_init(void)
6090 {
6091 switch(sparc_cpu_model) {
6092@@ -345,17 +348,17 @@ void __init paging_init(void)
6093
6094 /* Initialize the protection map with non-constant, MMU dependent values. */
6095 protection_map[0] = PAGE_NONE;
6096- protection_map[1] = PAGE_READONLY;
6097- protection_map[2] = PAGE_COPY;
6098- protection_map[3] = PAGE_COPY;
6099+ protection_map[1] = PAGE_READONLY_NOEXEC;
6100+ protection_map[2] = PAGE_COPY_NOEXEC;
6101+ protection_map[3] = PAGE_COPY_NOEXEC;
6102 protection_map[4] = PAGE_READONLY;
6103 protection_map[5] = PAGE_READONLY;
6104 protection_map[6] = PAGE_COPY;
6105 protection_map[7] = PAGE_COPY;
6106 protection_map[8] = PAGE_NONE;
6107- protection_map[9] = PAGE_READONLY;
6108- protection_map[10] = PAGE_SHARED;
6109- protection_map[11] = PAGE_SHARED;
6110+ protection_map[9] = PAGE_READONLY_NOEXEC;
6111+ protection_map[10] = PAGE_SHARED_NOEXEC;
6112+ protection_map[11] = PAGE_SHARED_NOEXEC;
6113 protection_map[12] = PAGE_READONLY;
6114 protection_map[13] = PAGE_READONLY;
6115 protection_map[14] = PAGE_SHARED;
6116diff -urNp linux-2.6.32.48/arch/sparc/mm/Makefile linux-2.6.32.48/arch/sparc/mm/Makefile
6117--- linux-2.6.32.48/arch/sparc/mm/Makefile 2011-11-08 19:02:43.000000000 -0500
6118+++ linux-2.6.32.48/arch/sparc/mm/Makefile 2011-11-15 19:59:42.000000000 -0500
6119@@ -2,7 +2,7 @@
6120 #
6121
6122 asflags-y := -ansi
6123-ccflags-y := -Werror
6124+#ccflags-y := -Werror
6125
6126 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
6127 obj-y += fault_$(BITS).o
6128diff -urNp linux-2.6.32.48/arch/sparc/mm/srmmu.c linux-2.6.32.48/arch/sparc/mm/srmmu.c
6129--- linux-2.6.32.48/arch/sparc/mm/srmmu.c 2011-11-08 19:02:43.000000000 -0500
6130+++ linux-2.6.32.48/arch/sparc/mm/srmmu.c 2011-11-15 19:59:42.000000000 -0500
6131@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
6132 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
6133 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
6134 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
6135+
6136+#ifdef CONFIG_PAX_PAGEEXEC
6137+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
6138+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
6139+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
6140+#endif
6141+
6142 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
6143 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
6144
6145diff -urNp linux-2.6.32.48/arch/um/include/asm/kmap_types.h linux-2.6.32.48/arch/um/include/asm/kmap_types.h
6146--- linux-2.6.32.48/arch/um/include/asm/kmap_types.h 2011-11-08 19:02:43.000000000 -0500
6147+++ linux-2.6.32.48/arch/um/include/asm/kmap_types.h 2011-11-15 19:59:42.000000000 -0500
6148@@ -23,6 +23,7 @@ enum km_type {
6149 KM_IRQ1,
6150 KM_SOFTIRQ0,
6151 KM_SOFTIRQ1,
6152+ KM_CLEARPAGE,
6153 KM_TYPE_NR
6154 };
6155
6156diff -urNp linux-2.6.32.48/arch/um/include/asm/page.h linux-2.6.32.48/arch/um/include/asm/page.h
6157--- linux-2.6.32.48/arch/um/include/asm/page.h 2011-11-08 19:02:43.000000000 -0500
6158+++ linux-2.6.32.48/arch/um/include/asm/page.h 2011-11-15 19:59:42.000000000 -0500
6159@@ -14,6 +14,9 @@
6160 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
6161 #define PAGE_MASK (~(PAGE_SIZE-1))
6162
6163+#define ktla_ktva(addr) (addr)
6164+#define ktva_ktla(addr) (addr)
6165+
6166 #ifndef __ASSEMBLY__
6167
6168 struct page;
6169diff -urNp linux-2.6.32.48/arch/um/kernel/process.c linux-2.6.32.48/arch/um/kernel/process.c
6170--- linux-2.6.32.48/arch/um/kernel/process.c 2011-11-08 19:02:43.000000000 -0500
6171+++ linux-2.6.32.48/arch/um/kernel/process.c 2011-11-15 19:59:42.000000000 -0500
6172@@ -393,22 +393,6 @@ int singlestepping(void * t)
6173 return 2;
6174 }
6175
6176-/*
6177- * Only x86 and x86_64 have an arch_align_stack().
6178- * All other arches have "#define arch_align_stack(x) (x)"
6179- * in their asm/system.h
6180- * As this is included in UML from asm-um/system-generic.h,
6181- * we can use it to behave as the subarch does.
6182- */
6183-#ifndef arch_align_stack
6184-unsigned long arch_align_stack(unsigned long sp)
6185-{
6186- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6187- sp -= get_random_int() % 8192;
6188- return sp & ~0xf;
6189-}
6190-#endif
6191-
6192 unsigned long get_wchan(struct task_struct *p)
6193 {
6194 unsigned long stack_page, sp, ip;
6195diff -urNp linux-2.6.32.48/arch/um/sys-i386/syscalls.c linux-2.6.32.48/arch/um/sys-i386/syscalls.c
6196--- linux-2.6.32.48/arch/um/sys-i386/syscalls.c 2011-11-08 19:02:43.000000000 -0500
6197+++ linux-2.6.32.48/arch/um/sys-i386/syscalls.c 2011-11-15 19:59:42.000000000 -0500
6198@@ -11,6 +11,21 @@
6199 #include "asm/uaccess.h"
6200 #include "asm/unistd.h"
6201
6202+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
6203+{
6204+ unsigned long pax_task_size = TASK_SIZE;
6205+
6206+#ifdef CONFIG_PAX_SEGMEXEC
6207+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
6208+ pax_task_size = SEGMEXEC_TASK_SIZE;
6209+#endif
6210+
6211+ if (len > pax_task_size || addr > pax_task_size - len)
6212+ return -EINVAL;
6213+
6214+ return 0;
6215+}
6216+
6217 /*
6218 * Perform the select(nd, in, out, ex, tv) and mmap() system
6219 * calls. Linux/i386 didn't use to be able to handle more than
6220diff -urNp linux-2.6.32.48/arch/x86/boot/bitops.h linux-2.6.32.48/arch/x86/boot/bitops.h
6221--- linux-2.6.32.48/arch/x86/boot/bitops.h 2011-11-08 19:02:43.000000000 -0500
6222+++ linux-2.6.32.48/arch/x86/boot/bitops.h 2011-11-15 19:59:42.000000000 -0500
6223@@ -26,7 +26,7 @@ static inline int variable_test_bit(int
6224 u8 v;
6225 const u32 *p = (const u32 *)addr;
6226
6227- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
6228+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
6229 return v;
6230 }
6231
6232@@ -37,7 +37,7 @@ static inline int variable_test_bit(int
6233
6234 static inline void set_bit(int nr, void *addr)
6235 {
6236- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
6237+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
6238 }
6239
6240 #endif /* BOOT_BITOPS_H */
6241diff -urNp linux-2.6.32.48/arch/x86/boot/boot.h linux-2.6.32.48/arch/x86/boot/boot.h
6242--- linux-2.6.32.48/arch/x86/boot/boot.h 2011-11-08 19:02:43.000000000 -0500
6243+++ linux-2.6.32.48/arch/x86/boot/boot.h 2011-11-15 19:59:42.000000000 -0500
6244@@ -82,7 +82,7 @@ static inline void io_delay(void)
6245 static inline u16 ds(void)
6246 {
6247 u16 seg;
6248- asm("movw %%ds,%0" : "=rm" (seg));
6249+ asm volatile("movw %%ds,%0" : "=rm" (seg));
6250 return seg;
6251 }
6252
6253@@ -178,7 +178,7 @@ static inline void wrgs32(u32 v, addr_t
6254 static inline int memcmp(const void *s1, const void *s2, size_t len)
6255 {
6256 u8 diff;
6257- asm("repe; cmpsb; setnz %0"
6258+ asm volatile("repe; cmpsb; setnz %0"
6259 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
6260 return diff;
6261 }
6262diff -urNp linux-2.6.32.48/arch/x86/boot/compressed/head_32.S linux-2.6.32.48/arch/x86/boot/compressed/head_32.S
6263--- linux-2.6.32.48/arch/x86/boot/compressed/head_32.S 2011-11-08 19:02:43.000000000 -0500
6264+++ linux-2.6.32.48/arch/x86/boot/compressed/head_32.S 2011-11-15 19:59:42.000000000 -0500
6265@@ -76,7 +76,7 @@ ENTRY(startup_32)
6266 notl %eax
6267 andl %eax, %ebx
6268 #else
6269- movl $LOAD_PHYSICAL_ADDR, %ebx
6270+ movl $____LOAD_PHYSICAL_ADDR, %ebx
6271 #endif
6272
6273 /* Target address to relocate to for decompression */
6274@@ -149,7 +149,7 @@ relocated:
6275 * and where it was actually loaded.
6276 */
6277 movl %ebp, %ebx
6278- subl $LOAD_PHYSICAL_ADDR, %ebx
6279+ subl $____LOAD_PHYSICAL_ADDR, %ebx
6280 jz 2f /* Nothing to be done if loaded at compiled addr. */
6281 /*
6282 * Process relocations.
6283@@ -157,8 +157,7 @@ relocated:
6284
6285 1: subl $4, %edi
6286 movl (%edi), %ecx
6287- testl %ecx, %ecx
6288- jz 2f
6289+ jecxz 2f
6290 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
6291 jmp 1b
6292 2:
6293diff -urNp linux-2.6.32.48/arch/x86/boot/compressed/head_64.S linux-2.6.32.48/arch/x86/boot/compressed/head_64.S
6294--- linux-2.6.32.48/arch/x86/boot/compressed/head_64.S 2011-11-08 19:02:43.000000000 -0500
6295+++ linux-2.6.32.48/arch/x86/boot/compressed/head_64.S 2011-11-15 19:59:42.000000000 -0500
6296@@ -91,7 +91,7 @@ ENTRY(startup_32)
6297 notl %eax
6298 andl %eax, %ebx
6299 #else
6300- movl $LOAD_PHYSICAL_ADDR, %ebx
6301+ movl $____LOAD_PHYSICAL_ADDR, %ebx
6302 #endif
6303
6304 /* Target address to relocate to for decompression */
6305@@ -183,7 +183,7 @@ no_longmode:
6306 hlt
6307 jmp 1b
6308
6309-#include "../../kernel/verify_cpu_64.S"
6310+#include "../../kernel/verify_cpu.S"
6311
6312 /*
6313 * Be careful here startup_64 needs to be at a predictable
6314@@ -234,7 +234,7 @@ ENTRY(startup_64)
6315 notq %rax
6316 andq %rax, %rbp
6317 #else
6318- movq $LOAD_PHYSICAL_ADDR, %rbp
6319+ movq $____LOAD_PHYSICAL_ADDR, %rbp
6320 #endif
6321
6322 /* Target address to relocate to for decompression */
6323diff -urNp linux-2.6.32.48/arch/x86/boot/compressed/Makefile linux-2.6.32.48/arch/x86/boot/compressed/Makefile
6324--- linux-2.6.32.48/arch/x86/boot/compressed/Makefile 2011-11-08 19:02:43.000000000 -0500
6325+++ linux-2.6.32.48/arch/x86/boot/compressed/Makefile 2011-11-15 19:59:42.000000000 -0500
6326@@ -13,6 +13,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=smal
6327 KBUILD_CFLAGS += $(cflags-y)
6328 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
6329 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
6330+ifdef CONSTIFY_PLUGIN
6331+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
6332+endif
6333
6334 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
6335 GCOV_PROFILE := n
6336diff -urNp linux-2.6.32.48/arch/x86/boot/compressed/misc.c linux-2.6.32.48/arch/x86/boot/compressed/misc.c
6337--- linux-2.6.32.48/arch/x86/boot/compressed/misc.c 2011-11-08 19:02:43.000000000 -0500
6338+++ linux-2.6.32.48/arch/x86/boot/compressed/misc.c 2011-11-15 19:59:42.000000000 -0500
6339@@ -288,7 +288,7 @@ static void parse_elf(void *output)
6340 case PT_LOAD:
6341 #ifdef CONFIG_RELOCATABLE
6342 dest = output;
6343- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
6344+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
6345 #else
6346 dest = (void *)(phdr->p_paddr);
6347 #endif
6348@@ -335,7 +335,7 @@ asmlinkage void decompress_kernel(void *
6349 error("Destination address too large");
6350 #endif
6351 #ifndef CONFIG_RELOCATABLE
6352- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
6353+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
6354 error("Wrong destination address");
6355 #endif
6356
6357diff -urNp linux-2.6.32.48/arch/x86/boot/compressed/mkpiggy.c linux-2.6.32.48/arch/x86/boot/compressed/mkpiggy.c
6358--- linux-2.6.32.48/arch/x86/boot/compressed/mkpiggy.c 2011-11-08 19:02:43.000000000 -0500
6359+++ linux-2.6.32.48/arch/x86/boot/compressed/mkpiggy.c 2011-11-15 19:59:42.000000000 -0500
6360@@ -74,7 +74,7 @@ int main(int argc, char *argv[])
6361
6362 offs = (olen > ilen) ? olen - ilen : 0;
6363 offs += olen >> 12; /* Add 8 bytes for each 32K block */
6364- offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */
6365+ offs += 64*1024; /* Add 64K bytes slack */
6366 offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
6367
6368 printf(".section \".rodata.compressed\",\"a\",@progbits\n");
6369diff -urNp linux-2.6.32.48/arch/x86/boot/compressed/relocs.c linux-2.6.32.48/arch/x86/boot/compressed/relocs.c
6370--- linux-2.6.32.48/arch/x86/boot/compressed/relocs.c 2011-11-08 19:02:43.000000000 -0500
6371+++ linux-2.6.32.48/arch/x86/boot/compressed/relocs.c 2011-11-15 19:59:42.000000000 -0500
6372@@ -10,8 +10,11 @@
6373 #define USE_BSD
6374 #include <endian.h>
6375
6376+#include "../../../../include/linux/autoconf.h"
6377+
6378 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
6379 static Elf32_Ehdr ehdr;
6380+static Elf32_Phdr *phdr;
6381 static unsigned long reloc_count, reloc_idx;
6382 static unsigned long *relocs;
6383
6384@@ -37,7 +40,7 @@ static const char* safe_abs_relocs[] = {
6385
6386 static int is_safe_abs_reloc(const char* sym_name)
6387 {
6388- int i;
6389+ unsigned int i;
6390
6391 for (i = 0; i < ARRAY_SIZE(safe_abs_relocs); i++) {
6392 if (!strcmp(sym_name, safe_abs_relocs[i]))
6393@@ -245,9 +248,39 @@ static void read_ehdr(FILE *fp)
6394 }
6395 }
6396
6397+static void read_phdrs(FILE *fp)
6398+{
6399+ unsigned int i;
6400+
6401+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
6402+ if (!phdr) {
6403+ die("Unable to allocate %d program headers\n",
6404+ ehdr.e_phnum);
6405+ }
6406+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
6407+ die("Seek to %d failed: %s\n",
6408+ ehdr.e_phoff, strerror(errno));
6409+ }
6410+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
6411+ die("Cannot read ELF program headers: %s\n",
6412+ strerror(errno));
6413+ }
6414+ for(i = 0; i < ehdr.e_phnum; i++) {
6415+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
6416+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
6417+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
6418+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
6419+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
6420+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
6421+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
6422+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
6423+ }
6424+
6425+}
6426+
6427 static void read_shdrs(FILE *fp)
6428 {
6429- int i;
6430+ unsigned int i;
6431 Elf32_Shdr shdr;
6432
6433 secs = calloc(ehdr.e_shnum, sizeof(struct section));
6434@@ -282,7 +315,7 @@ static void read_shdrs(FILE *fp)
6435
6436 static void read_strtabs(FILE *fp)
6437 {
6438- int i;
6439+ unsigned int i;
6440 for (i = 0; i < ehdr.e_shnum; i++) {
6441 struct section *sec = &secs[i];
6442 if (sec->shdr.sh_type != SHT_STRTAB) {
6443@@ -307,7 +340,7 @@ static void read_strtabs(FILE *fp)
6444
6445 static void read_symtabs(FILE *fp)
6446 {
6447- int i,j;
6448+ unsigned int i,j;
6449 for (i = 0; i < ehdr.e_shnum; i++) {
6450 struct section *sec = &secs[i];
6451 if (sec->shdr.sh_type != SHT_SYMTAB) {
6452@@ -340,7 +373,9 @@ static void read_symtabs(FILE *fp)
6453
6454 static void read_relocs(FILE *fp)
6455 {
6456- int i,j;
6457+ unsigned int i,j;
6458+ uint32_t base;
6459+
6460 for (i = 0; i < ehdr.e_shnum; i++) {
6461 struct section *sec = &secs[i];
6462 if (sec->shdr.sh_type != SHT_REL) {
6463@@ -360,9 +395,18 @@ static void read_relocs(FILE *fp)
6464 die("Cannot read symbol table: %s\n",
6465 strerror(errno));
6466 }
6467+ base = 0;
6468+ for (j = 0; j < ehdr.e_phnum; j++) {
6469+ if (phdr[j].p_type != PT_LOAD )
6470+ continue;
6471+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
6472+ continue;
6473+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
6474+ break;
6475+ }
6476 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
6477 Elf32_Rel *rel = &sec->reltab[j];
6478- rel->r_offset = elf32_to_cpu(rel->r_offset);
6479+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
6480 rel->r_info = elf32_to_cpu(rel->r_info);
6481 }
6482 }
6483@@ -371,14 +415,14 @@ static void read_relocs(FILE *fp)
6484
6485 static void print_absolute_symbols(void)
6486 {
6487- int i;
6488+ unsigned int i;
6489 printf("Absolute symbols\n");
6490 printf(" Num: Value Size Type Bind Visibility Name\n");
6491 for (i = 0; i < ehdr.e_shnum; i++) {
6492 struct section *sec = &secs[i];
6493 char *sym_strtab;
6494 Elf32_Sym *sh_symtab;
6495- int j;
6496+ unsigned int j;
6497
6498 if (sec->shdr.sh_type != SHT_SYMTAB) {
6499 continue;
6500@@ -406,14 +450,14 @@ static void print_absolute_symbols(void)
6501
6502 static void print_absolute_relocs(void)
6503 {
6504- int i, printed = 0;
6505+ unsigned int i, printed = 0;
6506
6507 for (i = 0; i < ehdr.e_shnum; i++) {
6508 struct section *sec = &secs[i];
6509 struct section *sec_applies, *sec_symtab;
6510 char *sym_strtab;
6511 Elf32_Sym *sh_symtab;
6512- int j;
6513+ unsigned int j;
6514 if (sec->shdr.sh_type != SHT_REL) {
6515 continue;
6516 }
6517@@ -474,13 +518,13 @@ static void print_absolute_relocs(void)
6518
6519 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6520 {
6521- int i;
6522+ unsigned int i;
6523 /* Walk through the relocations */
6524 for (i = 0; i < ehdr.e_shnum; i++) {
6525 char *sym_strtab;
6526 Elf32_Sym *sh_symtab;
6527 struct section *sec_applies, *sec_symtab;
6528- int j;
6529+ unsigned int j;
6530 struct section *sec = &secs[i];
6531
6532 if (sec->shdr.sh_type != SHT_REL) {
6533@@ -504,6 +548,21 @@ static void walk_relocs(void (*visit)(El
6534 if (sym->st_shndx == SHN_ABS) {
6535 continue;
6536 }
6537+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
6538+ if (!strcmp(sec_name(sym->st_shndx), ".data.percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
6539+ continue;
6540+
6541+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
6542+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
6543+ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
6544+ continue;
6545+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
6546+ continue;
6547+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
6548+ continue;
6549+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
6550+ continue;
6551+#endif
6552 if (r_type == R_386_NONE || r_type == R_386_PC32) {
6553 /*
6554 * NONE can be ignored and and PC relative
6555@@ -541,7 +600,7 @@ static int cmp_relocs(const void *va, co
6556
6557 static void emit_relocs(int as_text)
6558 {
6559- int i;
6560+ unsigned int i;
6561 /* Count how many relocations I have and allocate space for them. */
6562 reloc_count = 0;
6563 walk_relocs(count_reloc);
6564@@ -634,6 +693,7 @@ int main(int argc, char **argv)
6565 fname, strerror(errno));
6566 }
6567 read_ehdr(fp);
6568+ read_phdrs(fp);
6569 read_shdrs(fp);
6570 read_strtabs(fp);
6571 read_symtabs(fp);
6572diff -urNp linux-2.6.32.48/arch/x86/boot/cpucheck.c linux-2.6.32.48/arch/x86/boot/cpucheck.c
6573--- linux-2.6.32.48/arch/x86/boot/cpucheck.c 2011-11-08 19:02:43.000000000 -0500
6574+++ linux-2.6.32.48/arch/x86/boot/cpucheck.c 2011-11-15 19:59:42.000000000 -0500
6575@@ -74,7 +74,7 @@ static int has_fpu(void)
6576 u16 fcw = -1, fsw = -1;
6577 u32 cr0;
6578
6579- asm("movl %%cr0,%0" : "=r" (cr0));
6580+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
6581 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
6582 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
6583 asm volatile("movl %0,%%cr0" : : "r" (cr0));
6584@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
6585 {
6586 u32 f0, f1;
6587
6588- asm("pushfl ; "
6589+ asm volatile("pushfl ; "
6590 "pushfl ; "
6591 "popl %0 ; "
6592 "movl %0,%1 ; "
6593@@ -115,7 +115,7 @@ static void get_flags(void)
6594 set_bit(X86_FEATURE_FPU, cpu.flags);
6595
6596 if (has_eflag(X86_EFLAGS_ID)) {
6597- asm("cpuid"
6598+ asm volatile("cpuid"
6599 : "=a" (max_intel_level),
6600 "=b" (cpu_vendor[0]),
6601 "=d" (cpu_vendor[1]),
6602@@ -124,7 +124,7 @@ static void get_flags(void)
6603
6604 if (max_intel_level >= 0x00000001 &&
6605 max_intel_level <= 0x0000ffff) {
6606- asm("cpuid"
6607+ asm volatile("cpuid"
6608 : "=a" (tfms),
6609 "=c" (cpu.flags[4]),
6610 "=d" (cpu.flags[0])
6611@@ -136,7 +136,7 @@ static void get_flags(void)
6612 cpu.model += ((tfms >> 16) & 0xf) << 4;
6613 }
6614
6615- asm("cpuid"
6616+ asm volatile("cpuid"
6617 : "=a" (max_amd_level)
6618 : "a" (0x80000000)
6619 : "ebx", "ecx", "edx");
6620@@ -144,7 +144,7 @@ static void get_flags(void)
6621 if (max_amd_level >= 0x80000001 &&
6622 max_amd_level <= 0x8000ffff) {
6623 u32 eax = 0x80000001;
6624- asm("cpuid"
6625+ asm volatile("cpuid"
6626 : "+a" (eax),
6627 "=c" (cpu.flags[6]),
6628 "=d" (cpu.flags[1])
6629@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
6630 u32 ecx = MSR_K7_HWCR;
6631 u32 eax, edx;
6632
6633- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6634+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6635 eax &= ~(1 << 15);
6636- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6637+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6638
6639 get_flags(); /* Make sure it really did something */
6640 err = check_flags();
6641@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
6642 u32 ecx = MSR_VIA_FCR;
6643 u32 eax, edx;
6644
6645- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6646+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6647 eax |= (1<<1)|(1<<7);
6648- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6649+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6650
6651 set_bit(X86_FEATURE_CX8, cpu.flags);
6652 err = check_flags();
6653@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
6654 u32 eax, edx;
6655 u32 level = 1;
6656
6657- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6658- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6659- asm("cpuid"
6660+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6661+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6662+ asm volatile("cpuid"
6663 : "+a" (level), "=d" (cpu.flags[0])
6664 : : "ecx", "ebx");
6665- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6666+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6667
6668 err = check_flags();
6669 }
6670diff -urNp linux-2.6.32.48/arch/x86/boot/header.S linux-2.6.32.48/arch/x86/boot/header.S
6671--- linux-2.6.32.48/arch/x86/boot/header.S 2011-11-08 19:02:43.000000000 -0500
6672+++ linux-2.6.32.48/arch/x86/boot/header.S 2011-11-15 19:59:42.000000000 -0500
6673@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
6674 # single linked list of
6675 # struct setup_data
6676
6677-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
6678+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
6679
6680 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
6681 #define VO_INIT_SIZE (VO__end - VO__text)
6682diff -urNp linux-2.6.32.48/arch/x86/boot/Makefile linux-2.6.32.48/arch/x86/boot/Makefile
6683--- linux-2.6.32.48/arch/x86/boot/Makefile 2011-11-08 19:02:43.000000000 -0500
6684+++ linux-2.6.32.48/arch/x86/boot/Makefile 2011-11-15 19:59:42.000000000 -0500
6685@@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
6686 $(call cc-option, -fno-stack-protector) \
6687 $(call cc-option, -mpreferred-stack-boundary=2)
6688 KBUILD_CFLAGS += $(call cc-option, -m32)
6689+ifdef CONSTIFY_PLUGIN
6690+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
6691+endif
6692 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
6693 GCOV_PROFILE := n
6694
6695diff -urNp linux-2.6.32.48/arch/x86/boot/memory.c linux-2.6.32.48/arch/x86/boot/memory.c
6696--- linux-2.6.32.48/arch/x86/boot/memory.c 2011-11-08 19:02:43.000000000 -0500
6697+++ linux-2.6.32.48/arch/x86/boot/memory.c 2011-11-15 19:59:42.000000000 -0500
6698@@ -19,7 +19,7 @@
6699
6700 static int detect_memory_e820(void)
6701 {
6702- int count = 0;
6703+ unsigned int count = 0;
6704 struct biosregs ireg, oreg;
6705 struct e820entry *desc = boot_params.e820_map;
6706 static struct e820entry buf; /* static so it is zeroed */
6707diff -urNp linux-2.6.32.48/arch/x86/boot/video.c linux-2.6.32.48/arch/x86/boot/video.c
6708--- linux-2.6.32.48/arch/x86/boot/video.c 2011-11-08 19:02:43.000000000 -0500
6709+++ linux-2.6.32.48/arch/x86/boot/video.c 2011-11-15 19:59:42.000000000 -0500
6710@@ -90,7 +90,7 @@ static void store_mode_params(void)
6711 static unsigned int get_entry(void)
6712 {
6713 char entry_buf[4];
6714- int i, len = 0;
6715+ unsigned int i, len = 0;
6716 int key;
6717 unsigned int v;
6718
6719diff -urNp linux-2.6.32.48/arch/x86/boot/video-vesa.c linux-2.6.32.48/arch/x86/boot/video-vesa.c
6720--- linux-2.6.32.48/arch/x86/boot/video-vesa.c 2011-11-08 19:02:43.000000000 -0500
6721+++ linux-2.6.32.48/arch/x86/boot/video-vesa.c 2011-11-15 19:59:42.000000000 -0500
6722@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
6723
6724 boot_params.screen_info.vesapm_seg = oreg.es;
6725 boot_params.screen_info.vesapm_off = oreg.di;
6726+ boot_params.screen_info.vesapm_size = oreg.cx;
6727 }
6728
6729 /*
6730diff -urNp linux-2.6.32.48/arch/x86/crypto/aes-x86_64-asm_64.S linux-2.6.32.48/arch/x86/crypto/aes-x86_64-asm_64.S
6731--- linux-2.6.32.48/arch/x86/crypto/aes-x86_64-asm_64.S 2011-11-08 19:02:43.000000000 -0500
6732+++ linux-2.6.32.48/arch/x86/crypto/aes-x86_64-asm_64.S 2011-11-15 19:59:42.000000000 -0500
6733@@ -8,6 +8,8 @@
6734 * including this sentence is retained in full.
6735 */
6736
6737+#include <asm/alternative-asm.h>
6738+
6739 .extern crypto_ft_tab
6740 .extern crypto_it_tab
6741 .extern crypto_fl_tab
6742@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
6743 je B192; \
6744 leaq 32(r9),r9;
6745
6746+#define ret pax_force_retaddr; ret
6747+
6748 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
6749 movq r1,r2; \
6750 movq r3,r4; \
6751diff -urNp linux-2.6.32.48/arch/x86/crypto/salsa20-x86_64-asm_64.S linux-2.6.32.48/arch/x86/crypto/salsa20-x86_64-asm_64.S
6752--- linux-2.6.32.48/arch/x86/crypto/salsa20-x86_64-asm_64.S 2011-11-08 19:02:43.000000000 -0500
6753+++ linux-2.6.32.48/arch/x86/crypto/salsa20-x86_64-asm_64.S 2011-11-15 19:59:42.000000000 -0500
6754@@ -1,3 +1,5 @@
6755+#include <asm/alternative-asm.h>
6756+
6757 # enter ECRYPT_encrypt_bytes
6758 .text
6759 .p2align 5
6760@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
6761 add %r11,%rsp
6762 mov %rdi,%rax
6763 mov %rsi,%rdx
6764+ pax_force_retaddr
6765 ret
6766 # bytesatleast65:
6767 ._bytesatleast65:
6768@@ -891,6 +894,7 @@ ECRYPT_keysetup:
6769 add %r11,%rsp
6770 mov %rdi,%rax
6771 mov %rsi,%rdx
6772+ pax_force_retaddr
6773 ret
6774 # enter ECRYPT_ivsetup
6775 .text
6776@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
6777 add %r11,%rsp
6778 mov %rdi,%rax
6779 mov %rsi,%rdx
6780+ pax_force_retaddr
6781 ret
6782diff -urNp linux-2.6.32.48/arch/x86/crypto/twofish-x86_64-asm_64.S linux-2.6.32.48/arch/x86/crypto/twofish-x86_64-asm_64.S
6783--- linux-2.6.32.48/arch/x86/crypto/twofish-x86_64-asm_64.S 2011-11-08 19:02:43.000000000 -0500
6784+++ linux-2.6.32.48/arch/x86/crypto/twofish-x86_64-asm_64.S 2011-11-15 19:59:42.000000000 -0500
6785@@ -21,6 +21,7 @@
6786 .text
6787
6788 #include <asm/asm-offsets.h>
6789+#include <asm/alternative-asm.h>
6790
6791 #define a_offset 0
6792 #define b_offset 4
6793@@ -269,6 +270,7 @@ twofish_enc_blk:
6794
6795 popq R1
6796 movq $1,%rax
6797+ pax_force_retaddr
6798 ret
6799
6800 twofish_dec_blk:
6801@@ -321,4 +323,5 @@ twofish_dec_blk:
6802
6803 popq R1
6804 movq $1,%rax
6805+ pax_force_retaddr
6806 ret
6807diff -urNp linux-2.6.32.48/arch/x86/ia32/ia32_aout.c linux-2.6.32.48/arch/x86/ia32/ia32_aout.c
6808--- linux-2.6.32.48/arch/x86/ia32/ia32_aout.c 2011-11-08 19:02:43.000000000 -0500
6809+++ linux-2.6.32.48/arch/x86/ia32/ia32_aout.c 2011-11-15 19:59:42.000000000 -0500
6810@@ -169,6 +169,8 @@ static int aout_core_dump(long signr, st
6811 unsigned long dump_start, dump_size;
6812 struct user32 dump;
6813
6814+ memset(&dump, 0, sizeof(dump));
6815+
6816 fs = get_fs();
6817 set_fs(KERNEL_DS);
6818 has_dumped = 1;
6819@@ -218,12 +220,6 @@ static int aout_core_dump(long signr, st
6820 dump_size = dump.u_ssize << PAGE_SHIFT;
6821 DUMP_WRITE(dump_start, dump_size);
6822 }
6823- /*
6824- * Finally dump the task struct. Not be used by gdb, but
6825- * could be useful
6826- */
6827- set_fs(KERNEL_DS);
6828- DUMP_WRITE(current, sizeof(*current));
6829 end_coredump:
6830 set_fs(fs);
6831 return has_dumped;
6832diff -urNp linux-2.6.32.48/arch/x86/ia32/ia32entry.S linux-2.6.32.48/arch/x86/ia32/ia32entry.S
6833--- linux-2.6.32.48/arch/x86/ia32/ia32entry.S 2011-11-08 19:02:43.000000000 -0500
6834+++ linux-2.6.32.48/arch/x86/ia32/ia32entry.S 2011-11-15 19:59:42.000000000 -0500
6835@@ -13,6 +13,7 @@
6836 #include <asm/thread_info.h>
6837 #include <asm/segment.h>
6838 #include <asm/irqflags.h>
6839+#include <asm/pgtable.h>
6840 #include <linux/linkage.h>
6841
6842 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
6843@@ -93,6 +94,29 @@ ENTRY(native_irq_enable_sysexit)
6844 ENDPROC(native_irq_enable_sysexit)
6845 #endif
6846
6847+ .macro pax_enter_kernel_user
6848+#ifdef CONFIG_PAX_MEMORY_UDEREF
6849+ call pax_enter_kernel_user
6850+#endif
6851+ .endm
6852+
6853+ .macro pax_exit_kernel_user
6854+#ifdef CONFIG_PAX_MEMORY_UDEREF
6855+ call pax_exit_kernel_user
6856+#endif
6857+#ifdef CONFIG_PAX_RANDKSTACK
6858+ pushq %rax
6859+ call pax_randomize_kstack
6860+ popq %rax
6861+#endif
6862+ .endm
6863+
6864+.macro pax_erase_kstack
6865+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
6866+ call pax_erase_kstack
6867+#endif
6868+.endm
6869+
6870 /*
6871 * 32bit SYSENTER instruction entry.
6872 *
6873@@ -119,7 +143,7 @@ ENTRY(ia32_sysenter_target)
6874 CFI_REGISTER rsp,rbp
6875 SWAPGS_UNSAFE_STACK
6876 movq PER_CPU_VAR(kernel_stack), %rsp
6877- addq $(KERNEL_STACK_OFFSET),%rsp
6878+ pax_enter_kernel_user
6879 /*
6880 * No need to follow this irqs on/off section: the syscall
6881 * disabled irqs, here we enable it straight after entry:
6882@@ -135,7 +159,8 @@ ENTRY(ia32_sysenter_target)
6883 pushfq
6884 CFI_ADJUST_CFA_OFFSET 8
6885 /*CFI_REL_OFFSET rflags,0*/
6886- movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
6887+ GET_THREAD_INFO(%r10)
6888+ movl TI_sysenter_return(%r10), %r10d
6889 CFI_REGISTER rip,r10
6890 pushq $__USER32_CS
6891 CFI_ADJUST_CFA_OFFSET 8
6892@@ -150,6 +175,12 @@ ENTRY(ia32_sysenter_target)
6893 SAVE_ARGS 0,0,1
6894 /* no need to do an access_ok check here because rbp has been
6895 32bit zero extended */
6896+
6897+#ifdef CONFIG_PAX_MEMORY_UDEREF
6898+ mov $PAX_USER_SHADOW_BASE,%r10
6899+ add %r10,%rbp
6900+#endif
6901+
6902 1: movl (%rbp),%ebp
6903 .section __ex_table,"a"
6904 .quad 1b,ia32_badarg
6905@@ -172,6 +203,8 @@ sysenter_dispatch:
6906 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6907 jnz sysexit_audit
6908 sysexit_from_sys_call:
6909+ pax_exit_kernel_user
6910+ pax_erase_kstack
6911 andl $~TS_COMPAT,TI_status(%r10)
6912 /* clear IF, that popfq doesn't enable interrupts early */
6913 andl $~0x200,EFLAGS-R11(%rsp)
6914@@ -200,6 +233,9 @@ sysexit_from_sys_call:
6915 movl %eax,%esi /* 2nd arg: syscall number */
6916 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
6917 call audit_syscall_entry
6918+
6919+ pax_erase_kstack
6920+
6921 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
6922 cmpq $(IA32_NR_syscalls-1),%rax
6923 ja ia32_badsys
6924@@ -252,6 +288,9 @@ sysenter_tracesys:
6925 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
6926 movq %rsp,%rdi /* &pt_regs -> arg1 */
6927 call syscall_trace_enter
6928+
6929+ pax_erase_kstack
6930+
6931 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6932 RESTORE_REST
6933 cmpq $(IA32_NR_syscalls-1),%rax
6934@@ -283,19 +322,24 @@ ENDPROC(ia32_sysenter_target)
6935 ENTRY(ia32_cstar_target)
6936 CFI_STARTPROC32 simple
6937 CFI_SIGNAL_FRAME
6938- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
6939+ CFI_DEF_CFA rsp,0
6940 CFI_REGISTER rip,rcx
6941 /*CFI_REGISTER rflags,r11*/
6942 SWAPGS_UNSAFE_STACK
6943 movl %esp,%r8d
6944 CFI_REGISTER rsp,r8
6945 movq PER_CPU_VAR(kernel_stack),%rsp
6946+
6947+#ifdef CONFIG_PAX_MEMORY_UDEREF
6948+ pax_enter_kernel_user
6949+#endif
6950+
6951 /*
6952 * No need to follow this irqs on/off section: the syscall
6953 * disabled irqs and here we enable it straight after entry:
6954 */
6955 ENABLE_INTERRUPTS(CLBR_NONE)
6956- SAVE_ARGS 8,1,1
6957+ SAVE_ARGS 8*6,1,1
6958 movl %eax,%eax /* zero extension */
6959 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
6960 movq %rcx,RIP-ARGOFFSET(%rsp)
6961@@ -311,6 +355,12 @@ ENTRY(ia32_cstar_target)
6962 /* no need to do an access_ok check here because r8 has been
6963 32bit zero extended */
6964 /* hardware stack frame is complete now */
6965+
6966+#ifdef CONFIG_PAX_MEMORY_UDEREF
6967+ mov $PAX_USER_SHADOW_BASE,%r10
6968+ add %r10,%r8
6969+#endif
6970+
6971 1: movl (%r8),%r9d
6972 .section __ex_table,"a"
6973 .quad 1b,ia32_badarg
6974@@ -333,6 +383,8 @@ cstar_dispatch:
6975 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6976 jnz sysretl_audit
6977 sysretl_from_sys_call:
6978+ pax_exit_kernel_user
6979+ pax_erase_kstack
6980 andl $~TS_COMPAT,TI_status(%r10)
6981 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
6982 movl RIP-ARGOFFSET(%rsp),%ecx
6983@@ -370,6 +422,9 @@ cstar_tracesys:
6984 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6985 movq %rsp,%rdi /* &pt_regs -> arg1 */
6986 call syscall_trace_enter
6987+
6988+ pax_erase_kstack
6989+
6990 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
6991 RESTORE_REST
6992 xchgl %ebp,%r9d
6993@@ -415,6 +470,7 @@ ENTRY(ia32_syscall)
6994 CFI_REL_OFFSET rip,RIP-RIP
6995 PARAVIRT_ADJUST_EXCEPTION_FRAME
6996 SWAPGS
6997+ pax_enter_kernel_user
6998 /*
6999 * No need to follow this irqs on/off section: the syscall
7000 * disabled irqs and here we enable it straight after entry:
7001@@ -448,6 +504,9 @@ ia32_tracesys:
7002 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
7003 movq %rsp,%rdi /* &pt_regs -> arg1 */
7004 call syscall_trace_enter
7005+
7006+ pax_erase_kstack
7007+
7008 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
7009 RESTORE_REST
7010 cmpq $(IA32_NR_syscalls-1),%rax
7011diff -urNp linux-2.6.32.48/arch/x86/ia32/ia32_signal.c linux-2.6.32.48/arch/x86/ia32/ia32_signal.c
7012--- linux-2.6.32.48/arch/x86/ia32/ia32_signal.c 2011-11-08 19:02:43.000000000 -0500
7013+++ linux-2.6.32.48/arch/x86/ia32/ia32_signal.c 2011-11-15 19:59:42.000000000 -0500
7014@@ -167,7 +167,7 @@ asmlinkage long sys32_sigaltstack(const
7015 }
7016 seg = get_fs();
7017 set_fs(KERNEL_DS);
7018- ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
7019+ ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
7020 set_fs(seg);
7021 if (ret >= 0 && uoss_ptr) {
7022 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
7023@@ -374,7 +374,7 @@ static int ia32_setup_sigcontext(struct
7024 */
7025 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
7026 size_t frame_size,
7027- void **fpstate)
7028+ void __user **fpstate)
7029 {
7030 unsigned long sp;
7031
7032@@ -395,7 +395,7 @@ static void __user *get_sigframe(struct
7033
7034 if (used_math()) {
7035 sp = sp - sig_xstate_ia32_size;
7036- *fpstate = (struct _fpstate_ia32 *) sp;
7037+ *fpstate = (struct _fpstate_ia32 __user *) sp;
7038 if (save_i387_xstate_ia32(*fpstate) < 0)
7039 return (void __user *) -1L;
7040 }
7041@@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
7042 sp -= frame_size;
7043 /* Align the stack pointer according to the i386 ABI,
7044 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
7045- sp = ((sp + 4) & -16ul) - 4;
7046+ sp = ((sp - 12) & -16ul) - 4;
7047 return (void __user *) sp;
7048 }
7049
7050@@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_s
7051 * These are actually not used anymore, but left because some
7052 * gdb versions depend on them as a marker.
7053 */
7054- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
7055+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
7056 } put_user_catch(err);
7057
7058 if (err)
7059@@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
7060 0xb8,
7061 __NR_ia32_rt_sigreturn,
7062 0x80cd,
7063- 0,
7064+ 0
7065 };
7066
7067 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
7068@@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct
7069
7070 if (ka->sa.sa_flags & SA_RESTORER)
7071 restorer = ka->sa.sa_restorer;
7072+ else if (current->mm->context.vdso)
7073+ /* Return stub is in 32bit vsyscall page */
7074+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
7075 else
7076- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
7077- rt_sigreturn);
7078+ restorer = &frame->retcode;
7079 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
7080
7081 /*
7082 * Not actually used anymore, but left because some gdb
7083 * versions need it.
7084 */
7085- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
7086+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
7087 } put_user_catch(err);
7088
7089 if (err)
7090diff -urNp linux-2.6.32.48/arch/x86/ia32/sys_ia32.c linux-2.6.32.48/arch/x86/ia32/sys_ia32.c
7091--- linux-2.6.32.48/arch/x86/ia32/sys_ia32.c 2011-11-08 19:02:43.000000000 -0500
7092+++ linux-2.6.32.48/arch/x86/ia32/sys_ia32.c 2011-11-15 19:59:42.000000000 -0500
7093@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsign
7094 */
7095 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
7096 {
7097- typeof(ubuf->st_uid) uid = 0;
7098- typeof(ubuf->st_gid) gid = 0;
7099+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
7100+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
7101 SET_UID(uid, stat->uid);
7102 SET_GID(gid, stat->gid);
7103 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
7104@@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int
7105 }
7106 set_fs(KERNEL_DS);
7107 ret = sys_rt_sigprocmask(how,
7108- set ? (sigset_t __user *)&s : NULL,
7109- oset ? (sigset_t __user *)&s : NULL,
7110+ set ? (sigset_t __force_user *)&s : NULL,
7111+ oset ? (sigset_t __force_user *)&s : NULL,
7112 sigsetsize);
7113 set_fs(old_fs);
7114 if (ret)
7115@@ -371,7 +371,7 @@ asmlinkage long sys32_sched_rr_get_inter
7116 mm_segment_t old_fs = get_fs();
7117
7118 set_fs(KERNEL_DS);
7119- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
7120+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
7121 set_fs(old_fs);
7122 if (put_compat_timespec(&t, interval))
7123 return -EFAULT;
7124@@ -387,7 +387,7 @@ asmlinkage long sys32_rt_sigpending(comp
7125 mm_segment_t old_fs = get_fs();
7126
7127 set_fs(KERNEL_DS);
7128- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
7129+ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
7130 set_fs(old_fs);
7131 if (!ret) {
7132 switch (_NSIG_WORDS) {
7133@@ -412,7 +412,7 @@ asmlinkage long sys32_rt_sigqueueinfo(in
7134 if (copy_siginfo_from_user32(&info, uinfo))
7135 return -EFAULT;
7136 set_fs(KERNEL_DS);
7137- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
7138+ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
7139 set_fs(old_fs);
7140 return ret;
7141 }
7142@@ -513,7 +513,7 @@ asmlinkage long sys32_sendfile(int out_f
7143 return -EFAULT;
7144
7145 set_fs(KERNEL_DS);
7146- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
7147+ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
7148 count);
7149 set_fs(old_fs);
7150
7151diff -urNp linux-2.6.32.48/arch/x86/include/asm/alternative-asm.h linux-2.6.32.48/arch/x86/include/asm/alternative-asm.h
7152--- linux-2.6.32.48/arch/x86/include/asm/alternative-asm.h 2011-11-08 19:02:43.000000000 -0500
7153+++ linux-2.6.32.48/arch/x86/include/asm/alternative-asm.h 2011-11-15 19:59:42.000000000 -0500
7154@@ -19,4 +19,18 @@
7155 .endm
7156 #endif
7157
7158+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
7159+ .macro pax_force_retaddr rip=0
7160+ btsq $63,\rip(%rsp)
7161+ .endm
7162+ .macro pax_force_fptr ptr
7163+ btsq $63,\ptr
7164+ .endm
7165+#else
7166+ .macro pax_force_retaddr rip=0
7167+ .endm
7168+ .macro pax_force_fptr ptr
7169+ .endm
7170+#endif
7171+
7172 #endif /* __ASSEMBLY__ */
7173diff -urNp linux-2.6.32.48/arch/x86/include/asm/alternative.h linux-2.6.32.48/arch/x86/include/asm/alternative.h
7174--- linux-2.6.32.48/arch/x86/include/asm/alternative.h 2011-11-08 19:02:43.000000000 -0500
7175+++ linux-2.6.32.48/arch/x86/include/asm/alternative.h 2011-11-15 19:59:42.000000000 -0500
7176@@ -85,7 +85,7 @@ static inline void alternatives_smp_swit
7177 " .byte 662b-661b\n" /* sourcelen */ \
7178 " .byte 664f-663f\n" /* replacementlen */ \
7179 ".previous\n" \
7180- ".section .altinstr_replacement, \"ax\"\n" \
7181+ ".section .altinstr_replacement, \"a\"\n" \
7182 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
7183 ".previous"
7184
7185diff -urNp linux-2.6.32.48/arch/x86/include/asm/apic.h linux-2.6.32.48/arch/x86/include/asm/apic.h
7186--- linux-2.6.32.48/arch/x86/include/asm/apic.h 2011-11-08 19:02:43.000000000 -0500
7187+++ linux-2.6.32.48/arch/x86/include/asm/apic.h 2011-11-15 19:59:42.000000000 -0500
7188@@ -46,7 +46,7 @@ static inline void generic_apic_probe(vo
7189
7190 #ifdef CONFIG_X86_LOCAL_APIC
7191
7192-extern unsigned int apic_verbosity;
7193+extern int apic_verbosity;
7194 extern int local_apic_timer_c2_ok;
7195
7196 extern int disable_apic;
7197diff -urNp linux-2.6.32.48/arch/x86/include/asm/apm.h linux-2.6.32.48/arch/x86/include/asm/apm.h
7198--- linux-2.6.32.48/arch/x86/include/asm/apm.h 2011-11-08 19:02:43.000000000 -0500
7199+++ linux-2.6.32.48/arch/x86/include/asm/apm.h 2011-11-15 19:59:42.000000000 -0500
7200@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
7201 __asm__ __volatile__(APM_DO_ZERO_SEGS
7202 "pushl %%edi\n\t"
7203 "pushl %%ebp\n\t"
7204- "lcall *%%cs:apm_bios_entry\n\t"
7205+ "lcall *%%ss:apm_bios_entry\n\t"
7206 "setc %%al\n\t"
7207 "popl %%ebp\n\t"
7208 "popl %%edi\n\t"
7209@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
7210 __asm__ __volatile__(APM_DO_ZERO_SEGS
7211 "pushl %%edi\n\t"
7212 "pushl %%ebp\n\t"
7213- "lcall *%%cs:apm_bios_entry\n\t"
7214+ "lcall *%%ss:apm_bios_entry\n\t"
7215 "setc %%bl\n\t"
7216 "popl %%ebp\n\t"
7217 "popl %%edi\n\t"
7218diff -urNp linux-2.6.32.48/arch/x86/include/asm/atomic_32.h linux-2.6.32.48/arch/x86/include/asm/atomic_32.h
7219--- linux-2.6.32.48/arch/x86/include/asm/atomic_32.h 2011-11-08 19:02:43.000000000 -0500
7220+++ linux-2.6.32.48/arch/x86/include/asm/atomic_32.h 2011-11-15 19:59:42.000000000 -0500
7221@@ -25,6 +25,17 @@ static inline int atomic_read(const atom
7222 }
7223
7224 /**
7225+ * atomic_read_unchecked - read atomic variable
7226+ * @v: pointer of type atomic_unchecked_t
7227+ *
7228+ * Atomically reads the value of @v.
7229+ */
7230+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7231+{
7232+ return v->counter;
7233+}
7234+
7235+/**
7236 * atomic_set - set atomic variable
7237 * @v: pointer of type atomic_t
7238 * @i: required value
7239@@ -37,6 +48,18 @@ static inline void atomic_set(atomic_t *
7240 }
7241
7242 /**
7243+ * atomic_set_unchecked - set atomic variable
7244+ * @v: pointer of type atomic_unchecked_t
7245+ * @i: required value
7246+ *
7247+ * Atomically sets the value of @v to @i.
7248+ */
7249+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7250+{
7251+ v->counter = i;
7252+}
7253+
7254+/**
7255 * atomic_add - add integer to atomic variable
7256 * @i: integer value to add
7257 * @v: pointer of type atomic_t
7258@@ -45,7 +68,29 @@ static inline void atomic_set(atomic_t *
7259 */
7260 static inline void atomic_add(int i, atomic_t *v)
7261 {
7262- asm volatile(LOCK_PREFIX "addl %1,%0"
7263+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
7264+
7265+#ifdef CONFIG_PAX_REFCOUNT
7266+ "jno 0f\n"
7267+ LOCK_PREFIX "subl %1,%0\n"
7268+ "int $4\n0:\n"
7269+ _ASM_EXTABLE(0b, 0b)
7270+#endif
7271+
7272+ : "+m" (v->counter)
7273+ : "ir" (i));
7274+}
7275+
7276+/**
7277+ * atomic_add_unchecked - add integer to atomic variable
7278+ * @i: integer value to add
7279+ * @v: pointer of type atomic_unchecked_t
7280+ *
7281+ * Atomically adds @i to @v.
7282+ */
7283+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
7284+{
7285+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
7286 : "+m" (v->counter)
7287 : "ir" (i));
7288 }
7289@@ -59,7 +104,29 @@ static inline void atomic_add(int i, ato
7290 */
7291 static inline void atomic_sub(int i, atomic_t *v)
7292 {
7293- asm volatile(LOCK_PREFIX "subl %1,%0"
7294+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
7295+
7296+#ifdef CONFIG_PAX_REFCOUNT
7297+ "jno 0f\n"
7298+ LOCK_PREFIX "addl %1,%0\n"
7299+ "int $4\n0:\n"
7300+ _ASM_EXTABLE(0b, 0b)
7301+#endif
7302+
7303+ : "+m" (v->counter)
7304+ : "ir" (i));
7305+}
7306+
7307+/**
7308+ * atomic_sub_unchecked - subtract integer from atomic variable
7309+ * @i: integer value to subtract
7310+ * @v: pointer of type atomic_unchecked_t
7311+ *
7312+ * Atomically subtracts @i from @v.
7313+ */
7314+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
7315+{
7316+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
7317 : "+m" (v->counter)
7318 : "ir" (i));
7319 }
7320@@ -77,7 +144,16 @@ static inline int atomic_sub_and_test(in
7321 {
7322 unsigned char c;
7323
7324- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
7325+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
7326+
7327+#ifdef CONFIG_PAX_REFCOUNT
7328+ "jno 0f\n"
7329+ LOCK_PREFIX "addl %2,%0\n"
7330+ "int $4\n0:\n"
7331+ _ASM_EXTABLE(0b, 0b)
7332+#endif
7333+
7334+ "sete %1\n"
7335 : "+m" (v->counter), "=qm" (c)
7336 : "ir" (i) : "memory");
7337 return c;
7338@@ -91,7 +167,27 @@ static inline int atomic_sub_and_test(in
7339 */
7340 static inline void atomic_inc(atomic_t *v)
7341 {
7342- asm volatile(LOCK_PREFIX "incl %0"
7343+ asm volatile(LOCK_PREFIX "incl %0\n"
7344+
7345+#ifdef CONFIG_PAX_REFCOUNT
7346+ "jno 0f\n"
7347+ LOCK_PREFIX "decl %0\n"
7348+ "int $4\n0:\n"
7349+ _ASM_EXTABLE(0b, 0b)
7350+#endif
7351+
7352+ : "+m" (v->counter));
7353+}
7354+
7355+/**
7356+ * atomic_inc_unchecked - increment atomic variable
7357+ * @v: pointer of type atomic_unchecked_t
7358+ *
7359+ * Atomically increments @v by 1.
7360+ */
7361+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7362+{
7363+ asm volatile(LOCK_PREFIX "incl %0\n"
7364 : "+m" (v->counter));
7365 }
7366
7367@@ -103,7 +199,27 @@ static inline void atomic_inc(atomic_t *
7368 */
7369 static inline void atomic_dec(atomic_t *v)
7370 {
7371- asm volatile(LOCK_PREFIX "decl %0"
7372+ asm volatile(LOCK_PREFIX "decl %0\n"
7373+
7374+#ifdef CONFIG_PAX_REFCOUNT
7375+ "jno 0f\n"
7376+ LOCK_PREFIX "incl %0\n"
7377+ "int $4\n0:\n"
7378+ _ASM_EXTABLE(0b, 0b)
7379+#endif
7380+
7381+ : "+m" (v->counter));
7382+}
7383+
7384+/**
7385+ * atomic_dec_unchecked - decrement atomic variable
7386+ * @v: pointer of type atomic_unchecked_t
7387+ *
7388+ * Atomically decrements @v by 1.
7389+ */
7390+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7391+{
7392+ asm volatile(LOCK_PREFIX "decl %0\n"
7393 : "+m" (v->counter));
7394 }
7395
7396@@ -119,7 +235,16 @@ static inline int atomic_dec_and_test(at
7397 {
7398 unsigned char c;
7399
7400- asm volatile(LOCK_PREFIX "decl %0; sete %1"
7401+ asm volatile(LOCK_PREFIX "decl %0\n"
7402+
7403+#ifdef CONFIG_PAX_REFCOUNT
7404+ "jno 0f\n"
7405+ LOCK_PREFIX "incl %0\n"
7406+ "int $4\n0:\n"
7407+ _ASM_EXTABLE(0b, 0b)
7408+#endif
7409+
7410+ "sete %1\n"
7411 : "+m" (v->counter), "=qm" (c)
7412 : : "memory");
7413 return c != 0;
7414@@ -137,7 +262,35 @@ static inline int atomic_inc_and_test(at
7415 {
7416 unsigned char c;
7417
7418- asm volatile(LOCK_PREFIX "incl %0; sete %1"
7419+ asm volatile(LOCK_PREFIX "incl %0\n"
7420+
7421+#ifdef CONFIG_PAX_REFCOUNT
7422+ "jno 0f\n"
7423+ LOCK_PREFIX "decl %0\n"
7424+ "into\n0:\n"
7425+ _ASM_EXTABLE(0b, 0b)
7426+#endif
7427+
7428+ "sete %1\n"
7429+ : "+m" (v->counter), "=qm" (c)
7430+ : : "memory");
7431+ return c != 0;
7432+}
7433+
7434+/**
7435+ * atomic_inc_and_test_unchecked - increment and test
7436+ * @v: pointer of type atomic_unchecked_t
7437+ *
7438+ * Atomically increments @v by 1
7439+ * and returns true if the result is zero, or false for all
7440+ * other cases.
7441+ */
7442+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7443+{
7444+ unsigned char c;
7445+
7446+ asm volatile(LOCK_PREFIX "incl %0\n"
7447+ "sete %1\n"
7448 : "+m" (v->counter), "=qm" (c)
7449 : : "memory");
7450 return c != 0;
7451@@ -156,7 +309,16 @@ static inline int atomic_add_negative(in
7452 {
7453 unsigned char c;
7454
7455- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7456+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
7457+
7458+#ifdef CONFIG_PAX_REFCOUNT
7459+ "jno 0f\n"
7460+ LOCK_PREFIX "subl %2,%0\n"
7461+ "int $4\n0:\n"
7462+ _ASM_EXTABLE(0b, 0b)
7463+#endif
7464+
7465+ "sets %1\n"
7466 : "+m" (v->counter), "=qm" (c)
7467 : "ir" (i) : "memory");
7468 return c;
7469@@ -179,6 +341,46 @@ static inline int atomic_add_return(int
7470 #endif
7471 /* Modern 486+ processor */
7472 __i = i;
7473+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7474+
7475+#ifdef CONFIG_PAX_REFCOUNT
7476+ "jno 0f\n"
7477+ "movl %0, %1\n"
7478+ "int $4\n0:\n"
7479+ _ASM_EXTABLE(0b, 0b)
7480+#endif
7481+
7482+ : "+r" (i), "+m" (v->counter)
7483+ : : "memory");
7484+ return i + __i;
7485+
7486+#ifdef CONFIG_M386
7487+no_xadd: /* Legacy 386 processor */
7488+ local_irq_save(flags);
7489+ __i = atomic_read(v);
7490+ atomic_set(v, i + __i);
7491+ local_irq_restore(flags);
7492+ return i + __i;
7493+#endif
7494+}
7495+
7496+/**
7497+ * atomic_add_return_unchecked - add integer and return
7498+ * @v: pointer of type atomic_unchecked_t
7499+ * @i: integer value to add
7500+ *
7501+ * Atomically adds @i to @v and returns @i + @v
7502+ */
7503+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7504+{
7505+ int __i;
7506+#ifdef CONFIG_M386
7507+ unsigned long flags;
7508+ if (unlikely(boot_cpu_data.x86 <= 3))
7509+ goto no_xadd;
7510+#endif
7511+ /* Modern 486+ processor */
7512+ __i = i;
7513 asm volatile(LOCK_PREFIX "xaddl %0, %1"
7514 : "+r" (i), "+m" (v->counter)
7515 : : "memory");
7516@@ -211,11 +413,21 @@ static inline int atomic_cmpxchg(atomic_
7517 return cmpxchg(&v->counter, old, new);
7518 }
7519
7520+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7521+{
7522+ return cmpxchg(&v->counter, old, new);
7523+}
7524+
7525 static inline int atomic_xchg(atomic_t *v, int new)
7526 {
7527 return xchg(&v->counter, new);
7528 }
7529
7530+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7531+{
7532+ return xchg(&v->counter, new);
7533+}
7534+
7535 /**
7536 * atomic_add_unless - add unless the number is already a given value
7537 * @v: pointer of type atomic_t
7538@@ -227,22 +439,39 @@ static inline int atomic_xchg(atomic_t *
7539 */
7540 static inline int atomic_add_unless(atomic_t *v, int a, int u)
7541 {
7542- int c, old;
7543+ int c, old, new;
7544 c = atomic_read(v);
7545 for (;;) {
7546- if (unlikely(c == (u)))
7547+ if (unlikely(c == u))
7548 break;
7549- old = atomic_cmpxchg((v), c, c + (a));
7550+
7551+ asm volatile("addl %2,%0\n"
7552+
7553+#ifdef CONFIG_PAX_REFCOUNT
7554+ "jno 0f\n"
7555+ "subl %2,%0\n"
7556+ "int $4\n0:\n"
7557+ _ASM_EXTABLE(0b, 0b)
7558+#endif
7559+
7560+ : "=r" (new)
7561+ : "0" (c), "ir" (a));
7562+
7563+ old = atomic_cmpxchg(v, c, new);
7564 if (likely(old == c))
7565 break;
7566 c = old;
7567 }
7568- return c != (u);
7569+ return c != u;
7570 }
7571
7572 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
7573
7574 #define atomic_inc_return(v) (atomic_add_return(1, v))
7575+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7576+{
7577+ return atomic_add_return_unchecked(1, v);
7578+}
7579 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7580
7581 /* These are x86-specific, used by some header files */
7582@@ -266,9 +495,18 @@ typedef struct {
7583 u64 __aligned(8) counter;
7584 } atomic64_t;
7585
7586+#ifdef CONFIG_PAX_REFCOUNT
7587+typedef struct {
7588+ u64 __aligned(8) counter;
7589+} atomic64_unchecked_t;
7590+#else
7591+typedef atomic64_t atomic64_unchecked_t;
7592+#endif
7593+
7594 #define ATOMIC64_INIT(val) { (val) }
7595
7596 extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
7597+extern u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val);
7598
7599 /**
7600 * atomic64_xchg - xchg atomic64 variable
7601@@ -279,6 +517,7 @@ extern u64 atomic64_cmpxchg(atomic64_t *
7602 * the old value.
7603 */
7604 extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
7605+extern u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
7606
7607 /**
7608 * atomic64_set - set atomic64 variable
7609@@ -290,6 +529,15 @@ extern u64 atomic64_xchg(atomic64_t *ptr
7610 extern void atomic64_set(atomic64_t *ptr, u64 new_val);
7611
7612 /**
7613+ * atomic64_unchecked_set - set atomic64 variable
7614+ * @ptr: pointer to type atomic64_unchecked_t
7615+ * @new_val: value to assign
7616+ *
7617+ * Atomically sets the value of @ptr to @new_val.
7618+ */
7619+extern void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
7620+
7621+/**
7622 * atomic64_read - read atomic64 variable
7623 * @ptr: pointer to type atomic64_t
7624 *
7625@@ -317,7 +565,33 @@ static inline u64 atomic64_read(atomic64
7626 return res;
7627 }
7628
7629-extern u64 atomic64_read(atomic64_t *ptr);
7630+/**
7631+ * atomic64_read_unchecked - read atomic64 variable
7632+ * @ptr: pointer to type atomic64_unchecked_t
7633+ *
7634+ * Atomically reads the value of @ptr and returns it.
7635+ */
7636+static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *ptr)
7637+{
7638+ u64 res;
7639+
7640+ /*
7641+ * Note, we inline this atomic64_unchecked_t primitive because
7642+ * it only clobbers EAX/EDX and leaves the others
7643+ * untouched. We also (somewhat subtly) rely on the
7644+ * fact that cmpxchg8b returns the current 64-bit value
7645+ * of the memory location we are touching:
7646+ */
7647+ asm volatile(
7648+ "mov %%ebx, %%eax\n\t"
7649+ "mov %%ecx, %%edx\n\t"
7650+ LOCK_PREFIX "cmpxchg8b %1\n"
7651+ : "=&A" (res)
7652+ : "m" (*ptr)
7653+ );
7654+
7655+ return res;
7656+}
7657
7658 /**
7659 * atomic64_add_return - add and return
7660@@ -332,8 +606,11 @@ extern u64 atomic64_add_return(u64 delta
7661 * Other variants with different arithmetic operators:
7662 */
7663 extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr);
7664+extern u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7665 extern u64 atomic64_inc_return(atomic64_t *ptr);
7666+extern u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr);
7667 extern u64 atomic64_dec_return(atomic64_t *ptr);
7668+extern u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr);
7669
7670 /**
7671 * atomic64_add - add integer to atomic64 variable
7672@@ -345,6 +622,15 @@ extern u64 atomic64_dec_return(atomic64_
7673 extern void atomic64_add(u64 delta, atomic64_t *ptr);
7674
7675 /**
7676+ * atomic64_add_unchecked - add integer to atomic64 variable
7677+ * @delta: integer value to add
7678+ * @ptr: pointer to type atomic64_unchecked_t
7679+ *
7680+ * Atomically adds @delta to @ptr.
7681+ */
7682+extern void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7683+
7684+/**
7685 * atomic64_sub - subtract the atomic64 variable
7686 * @delta: integer value to subtract
7687 * @ptr: pointer to type atomic64_t
7688@@ -354,6 +640,15 @@ extern void atomic64_add(u64 delta, atom
7689 extern void atomic64_sub(u64 delta, atomic64_t *ptr);
7690
7691 /**
7692+ * atomic64_sub_unchecked - subtract the atomic64 variable
7693+ * @delta: integer value to subtract
7694+ * @ptr: pointer to type atomic64_unchecked_t
7695+ *
7696+ * Atomically subtracts @delta from @ptr.
7697+ */
7698+extern void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7699+
7700+/**
7701 * atomic64_sub_and_test - subtract value from variable and test result
7702 * @delta: integer value to subtract
7703 * @ptr: pointer to type atomic64_t
7704@@ -373,6 +668,14 @@ extern int atomic64_sub_and_test(u64 del
7705 extern void atomic64_inc(atomic64_t *ptr);
7706
7707 /**
7708+ * atomic64_inc_unchecked - increment atomic64 variable
7709+ * @ptr: pointer to type atomic64_unchecked_t
7710+ *
7711+ * Atomically increments @ptr by 1.
7712+ */
7713+extern void atomic64_inc_unchecked(atomic64_unchecked_t *ptr);
7714+
7715+/**
7716 * atomic64_dec - decrement atomic64 variable
7717 * @ptr: pointer to type atomic64_t
7718 *
7719@@ -381,6 +684,14 @@ extern void atomic64_inc(atomic64_t *ptr
7720 extern void atomic64_dec(atomic64_t *ptr);
7721
7722 /**
7723+ * atomic64_dec_unchecked - decrement atomic64 variable
7724+ * @ptr: pointer to type atomic64_unchecked_t
7725+ *
7726+ * Atomically decrements @ptr by 1.
7727+ */
7728+extern void atomic64_dec_unchecked(atomic64_unchecked_t *ptr);
7729+
7730+/**
7731 * atomic64_dec_and_test - decrement and test
7732 * @ptr: pointer to type atomic64_t
7733 *
7734diff -urNp linux-2.6.32.48/arch/x86/include/asm/atomic_64.h linux-2.6.32.48/arch/x86/include/asm/atomic_64.h
7735--- linux-2.6.32.48/arch/x86/include/asm/atomic_64.h 2011-11-08 19:02:43.000000000 -0500
7736+++ linux-2.6.32.48/arch/x86/include/asm/atomic_64.h 2011-11-15 19:59:42.000000000 -0500
7737@@ -24,6 +24,17 @@ static inline int atomic_read(const atom
7738 }
7739
7740 /**
7741+ * atomic_read_unchecked - read atomic variable
7742+ * @v: pointer of type atomic_unchecked_t
7743+ *
7744+ * Atomically reads the value of @v.
7745+ */
7746+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7747+{
7748+ return v->counter;
7749+}
7750+
7751+/**
7752 * atomic_set - set atomic variable
7753 * @v: pointer of type atomic_t
7754 * @i: required value
7755@@ -36,6 +47,18 @@ static inline void atomic_set(atomic_t *
7756 }
7757
7758 /**
7759+ * atomic_set_unchecked - set atomic variable
7760+ * @v: pointer of type atomic_unchecked_t
7761+ * @i: required value
7762+ *
7763+ * Atomically sets the value of @v to @i.
7764+ */
7765+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7766+{
7767+ v->counter = i;
7768+}
7769+
7770+/**
7771 * atomic_add - add integer to atomic variable
7772 * @i: integer value to add
7773 * @v: pointer of type atomic_t
7774@@ -44,7 +67,29 @@ static inline void atomic_set(atomic_t *
7775 */
7776 static inline void atomic_add(int i, atomic_t *v)
7777 {
7778- asm volatile(LOCK_PREFIX "addl %1,%0"
7779+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
7780+
7781+#ifdef CONFIG_PAX_REFCOUNT
7782+ "jno 0f\n"
7783+ LOCK_PREFIX "subl %1,%0\n"
7784+ "int $4\n0:\n"
7785+ _ASM_EXTABLE(0b, 0b)
7786+#endif
7787+
7788+ : "=m" (v->counter)
7789+ : "ir" (i), "m" (v->counter));
7790+}
7791+
7792+/**
7793+ * atomic_add_unchecked - add integer to atomic variable
7794+ * @i: integer value to add
7795+ * @v: pointer of type atomic_unchecked_t
7796+ *
7797+ * Atomically adds @i to @v.
7798+ */
7799+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
7800+{
7801+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
7802 : "=m" (v->counter)
7803 : "ir" (i), "m" (v->counter));
7804 }
7805@@ -58,7 +103,29 @@ static inline void atomic_add(int i, ato
7806 */
7807 static inline void atomic_sub(int i, atomic_t *v)
7808 {
7809- asm volatile(LOCK_PREFIX "subl %1,%0"
7810+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
7811+
7812+#ifdef CONFIG_PAX_REFCOUNT
7813+ "jno 0f\n"
7814+ LOCK_PREFIX "addl %1,%0\n"
7815+ "int $4\n0:\n"
7816+ _ASM_EXTABLE(0b, 0b)
7817+#endif
7818+
7819+ : "=m" (v->counter)
7820+ : "ir" (i), "m" (v->counter));
7821+}
7822+
7823+/**
7824+ * atomic_sub_unchecked - subtract the atomic variable
7825+ * @i: integer value to subtract
7826+ * @v: pointer of type atomic_unchecked_t
7827+ *
7828+ * Atomically subtracts @i from @v.
7829+ */
7830+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
7831+{
7832+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
7833 : "=m" (v->counter)
7834 : "ir" (i), "m" (v->counter));
7835 }
7836@@ -76,7 +143,16 @@ static inline int atomic_sub_and_test(in
7837 {
7838 unsigned char c;
7839
7840- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
7841+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
7842+
7843+#ifdef CONFIG_PAX_REFCOUNT
7844+ "jno 0f\n"
7845+ LOCK_PREFIX "addl %2,%0\n"
7846+ "int $4\n0:\n"
7847+ _ASM_EXTABLE(0b, 0b)
7848+#endif
7849+
7850+ "sete %1\n"
7851 : "=m" (v->counter), "=qm" (c)
7852 : "ir" (i), "m" (v->counter) : "memory");
7853 return c;
7854@@ -90,7 +166,28 @@ static inline int atomic_sub_and_test(in
7855 */
7856 static inline void atomic_inc(atomic_t *v)
7857 {
7858- asm volatile(LOCK_PREFIX "incl %0"
7859+ asm volatile(LOCK_PREFIX "incl %0\n"
7860+
7861+#ifdef CONFIG_PAX_REFCOUNT
7862+ "jno 0f\n"
7863+ LOCK_PREFIX "decl %0\n"
7864+ "int $4\n0:\n"
7865+ _ASM_EXTABLE(0b, 0b)
7866+#endif
7867+
7868+ : "=m" (v->counter)
7869+ : "m" (v->counter));
7870+}
7871+
7872+/**
7873+ * atomic_inc_unchecked - increment atomic variable
7874+ * @v: pointer of type atomic_unchecked_t
7875+ *
7876+ * Atomically increments @v by 1.
7877+ */
7878+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7879+{
7880+ asm volatile(LOCK_PREFIX "incl %0\n"
7881 : "=m" (v->counter)
7882 : "m" (v->counter));
7883 }
7884@@ -103,7 +200,28 @@ static inline void atomic_inc(atomic_t *
7885 */
7886 static inline void atomic_dec(atomic_t *v)
7887 {
7888- asm volatile(LOCK_PREFIX "decl %0"
7889+ asm volatile(LOCK_PREFIX "decl %0\n"
7890+
7891+#ifdef CONFIG_PAX_REFCOUNT
7892+ "jno 0f\n"
7893+ LOCK_PREFIX "incl %0\n"
7894+ "int $4\n0:\n"
7895+ _ASM_EXTABLE(0b, 0b)
7896+#endif
7897+
7898+ : "=m" (v->counter)
7899+ : "m" (v->counter));
7900+}
7901+
7902+/**
7903+ * atomic_dec_unchecked - decrement atomic variable
7904+ * @v: pointer of type atomic_unchecked_t
7905+ *
7906+ * Atomically decrements @v by 1.
7907+ */
7908+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7909+{
7910+ asm volatile(LOCK_PREFIX "decl %0\n"
7911 : "=m" (v->counter)
7912 : "m" (v->counter));
7913 }
7914@@ -120,7 +238,16 @@ static inline int atomic_dec_and_test(at
7915 {
7916 unsigned char c;
7917
7918- asm volatile(LOCK_PREFIX "decl %0; sete %1"
7919+ asm volatile(LOCK_PREFIX "decl %0\n"
7920+
7921+#ifdef CONFIG_PAX_REFCOUNT
7922+ "jno 0f\n"
7923+ LOCK_PREFIX "incl %0\n"
7924+ "int $4\n0:\n"
7925+ _ASM_EXTABLE(0b, 0b)
7926+#endif
7927+
7928+ "sete %1\n"
7929 : "=m" (v->counter), "=qm" (c)
7930 : "m" (v->counter) : "memory");
7931 return c != 0;
7932@@ -138,7 +265,35 @@ static inline int atomic_inc_and_test(at
7933 {
7934 unsigned char c;
7935
7936- asm volatile(LOCK_PREFIX "incl %0; sete %1"
7937+ asm volatile(LOCK_PREFIX "incl %0\n"
7938+
7939+#ifdef CONFIG_PAX_REFCOUNT
7940+ "jno 0f\n"
7941+ LOCK_PREFIX "decl %0\n"
7942+ "int $4\n0:\n"
7943+ _ASM_EXTABLE(0b, 0b)
7944+#endif
7945+
7946+ "sete %1\n"
7947+ : "=m" (v->counter), "=qm" (c)
7948+ : "m" (v->counter) : "memory");
7949+ return c != 0;
7950+}
7951+
7952+/**
7953+ * atomic_inc_and_test_unchecked - increment and test
7954+ * @v: pointer of type atomic_unchecked_t
7955+ *
7956+ * Atomically increments @v by 1
7957+ * and returns true if the result is zero, or false for all
7958+ * other cases.
7959+ */
7960+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7961+{
7962+ unsigned char c;
7963+
7964+ asm volatile(LOCK_PREFIX "incl %0\n"
7965+ "sete %1\n"
7966 : "=m" (v->counter), "=qm" (c)
7967 : "m" (v->counter) : "memory");
7968 return c != 0;
7969@@ -157,7 +312,16 @@ static inline int atomic_add_negative(in
7970 {
7971 unsigned char c;
7972
7973- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7974+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
7975+
7976+#ifdef CONFIG_PAX_REFCOUNT
7977+ "jno 0f\n"
7978+ LOCK_PREFIX "subl %2,%0\n"
7979+ "int $4\n0:\n"
7980+ _ASM_EXTABLE(0b, 0b)
7981+#endif
7982+
7983+ "sets %1\n"
7984 : "=m" (v->counter), "=qm" (c)
7985 : "ir" (i), "m" (v->counter) : "memory");
7986 return c;
7987@@ -173,7 +337,31 @@ static inline int atomic_add_negative(in
7988 static inline int atomic_add_return(int i, atomic_t *v)
7989 {
7990 int __i = i;
7991- asm volatile(LOCK_PREFIX "xaddl %0, %1"
7992+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7993+
7994+#ifdef CONFIG_PAX_REFCOUNT
7995+ "jno 0f\n"
7996+ "movl %0, %1\n"
7997+ "int $4\n0:\n"
7998+ _ASM_EXTABLE(0b, 0b)
7999+#endif
8000+
8001+ : "+r" (i), "+m" (v->counter)
8002+ : : "memory");
8003+ return i + __i;
8004+}
8005+
8006+/**
8007+ * atomic_add_return_unchecked - add and return
8008+ * @i: integer value to add
8009+ * @v: pointer of type atomic_unchecked_t
8010+ *
8011+ * Atomically adds @i to @v and returns @i + @v
8012+ */
8013+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
8014+{
8015+ int __i = i;
8016+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
8017 : "+r" (i), "+m" (v->counter)
8018 : : "memory");
8019 return i + __i;
8020@@ -185,6 +373,10 @@ static inline int atomic_sub_return(int
8021 }
8022
8023 #define atomic_inc_return(v) (atomic_add_return(1, v))
8024+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
8025+{
8026+ return atomic_add_return_unchecked(1, v);
8027+}
8028 #define atomic_dec_return(v) (atomic_sub_return(1, v))
8029
8030 /* The 64-bit atomic type */
8031@@ -204,6 +396,18 @@ static inline long atomic64_read(const a
8032 }
8033
8034 /**
8035+ * atomic64_read_unchecked - read atomic64 variable
8036+ * @v: pointer of type atomic64_unchecked_t
8037+ *
8038+ * Atomically reads the value of @v.
8039+ * Doesn't imply a read memory barrier.
8040+ */
8041+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
8042+{
8043+ return v->counter;
8044+}
8045+
8046+/**
8047 * atomic64_set - set atomic64 variable
8048 * @v: pointer to type atomic64_t
8049 * @i: required value
8050@@ -216,6 +420,18 @@ static inline void atomic64_set(atomic64
8051 }
8052
8053 /**
8054+ * atomic64_set_unchecked - set atomic64 variable
8055+ * @v: pointer to type atomic64_unchecked_t
8056+ * @i: required value
8057+ *
8058+ * Atomically sets the value of @v to @i.
8059+ */
8060+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
8061+{
8062+ v->counter = i;
8063+}
8064+
8065+/**
8066 * atomic64_add - add integer to atomic64 variable
8067 * @i: integer value to add
8068 * @v: pointer to type atomic64_t
8069@@ -224,6 +440,28 @@ static inline void atomic64_set(atomic64
8070 */
8071 static inline void atomic64_add(long i, atomic64_t *v)
8072 {
8073+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
8074+
8075+#ifdef CONFIG_PAX_REFCOUNT
8076+ "jno 0f\n"
8077+ LOCK_PREFIX "subq %1,%0\n"
8078+ "int $4\n0:\n"
8079+ _ASM_EXTABLE(0b, 0b)
8080+#endif
8081+
8082+ : "=m" (v->counter)
8083+ : "er" (i), "m" (v->counter));
8084+}
8085+
8086+/**
8087+ * atomic64_add_unchecked - add integer to atomic64 variable
8088+ * @i: integer value to add
8089+ * @v: pointer to type atomic64_unchecked_t
8090+ *
8091+ * Atomically adds @i to @v.
8092+ */
8093+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
8094+{
8095 asm volatile(LOCK_PREFIX "addq %1,%0"
8096 : "=m" (v->counter)
8097 : "er" (i), "m" (v->counter));
8098@@ -238,7 +476,15 @@ static inline void atomic64_add(long i,
8099 */
8100 static inline void atomic64_sub(long i, atomic64_t *v)
8101 {
8102- asm volatile(LOCK_PREFIX "subq %1,%0"
8103+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
8104+
8105+#ifdef CONFIG_PAX_REFCOUNT
8106+ "jno 0f\n"
8107+ LOCK_PREFIX "addq %1,%0\n"
8108+ "int $4\n0:\n"
8109+ _ASM_EXTABLE(0b, 0b)
8110+#endif
8111+
8112 : "=m" (v->counter)
8113 : "er" (i), "m" (v->counter));
8114 }
8115@@ -256,7 +502,16 @@ static inline int atomic64_sub_and_test(
8116 {
8117 unsigned char c;
8118
8119- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
8120+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
8121+
8122+#ifdef CONFIG_PAX_REFCOUNT
8123+ "jno 0f\n"
8124+ LOCK_PREFIX "addq %2,%0\n"
8125+ "int $4\n0:\n"
8126+ _ASM_EXTABLE(0b, 0b)
8127+#endif
8128+
8129+ "sete %1\n"
8130 : "=m" (v->counter), "=qm" (c)
8131 : "er" (i), "m" (v->counter) : "memory");
8132 return c;
8133@@ -270,6 +525,27 @@ static inline int atomic64_sub_and_test(
8134 */
8135 static inline void atomic64_inc(atomic64_t *v)
8136 {
8137+ asm volatile(LOCK_PREFIX "incq %0\n"
8138+
8139+#ifdef CONFIG_PAX_REFCOUNT
8140+ "jno 0f\n"
8141+ LOCK_PREFIX "decq %0\n"
8142+ "int $4\n0:\n"
8143+ _ASM_EXTABLE(0b, 0b)
8144+#endif
8145+
8146+ : "=m" (v->counter)
8147+ : "m" (v->counter));
8148+}
8149+
8150+/**
8151+ * atomic64_inc_unchecked - increment atomic64 variable
8152+ * @v: pointer to type atomic64_unchecked_t
8153+ *
8154+ * Atomically increments @v by 1.
8155+ */
8156+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
8157+{
8158 asm volatile(LOCK_PREFIX "incq %0"
8159 : "=m" (v->counter)
8160 : "m" (v->counter));
8161@@ -283,7 +559,28 @@ static inline void atomic64_inc(atomic64
8162 */
8163 static inline void atomic64_dec(atomic64_t *v)
8164 {
8165- asm volatile(LOCK_PREFIX "decq %0"
8166+ asm volatile(LOCK_PREFIX "decq %0\n"
8167+
8168+#ifdef CONFIG_PAX_REFCOUNT
8169+ "jno 0f\n"
8170+ LOCK_PREFIX "incq %0\n"
8171+ "int $4\n0:\n"
8172+ _ASM_EXTABLE(0b, 0b)
8173+#endif
8174+
8175+ : "=m" (v->counter)
8176+ : "m" (v->counter));
8177+}
8178+
8179+/**
8180+ * atomic64_dec_unchecked - decrement atomic64 variable
8181+ * @v: pointer to type atomic64_t
8182+ *
8183+ * Atomically decrements @v by 1.
8184+ */
8185+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
8186+{
8187+ asm volatile(LOCK_PREFIX "decq %0\n"
8188 : "=m" (v->counter)
8189 : "m" (v->counter));
8190 }
8191@@ -300,7 +597,16 @@ static inline int atomic64_dec_and_test(
8192 {
8193 unsigned char c;
8194
8195- asm volatile(LOCK_PREFIX "decq %0; sete %1"
8196+ asm volatile(LOCK_PREFIX "decq %0\n"
8197+
8198+#ifdef CONFIG_PAX_REFCOUNT
8199+ "jno 0f\n"
8200+ LOCK_PREFIX "incq %0\n"
8201+ "int $4\n0:\n"
8202+ _ASM_EXTABLE(0b, 0b)
8203+#endif
8204+
8205+ "sete %1\n"
8206 : "=m" (v->counter), "=qm" (c)
8207 : "m" (v->counter) : "memory");
8208 return c != 0;
8209@@ -318,7 +624,16 @@ static inline int atomic64_inc_and_test(
8210 {
8211 unsigned char c;
8212
8213- asm volatile(LOCK_PREFIX "incq %0; sete %1"
8214+ asm volatile(LOCK_PREFIX "incq %0\n"
8215+
8216+#ifdef CONFIG_PAX_REFCOUNT
8217+ "jno 0f\n"
8218+ LOCK_PREFIX "decq %0\n"
8219+ "int $4\n0:\n"
8220+ _ASM_EXTABLE(0b, 0b)
8221+#endif
8222+
8223+ "sete %1\n"
8224 : "=m" (v->counter), "=qm" (c)
8225 : "m" (v->counter) : "memory");
8226 return c != 0;
8227@@ -337,7 +652,16 @@ static inline int atomic64_add_negative(
8228 {
8229 unsigned char c;
8230
8231- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
8232+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
8233+
8234+#ifdef CONFIG_PAX_REFCOUNT
8235+ "jno 0f\n"
8236+ LOCK_PREFIX "subq %2,%0\n"
8237+ "int $4\n0:\n"
8238+ _ASM_EXTABLE(0b, 0b)
8239+#endif
8240+
8241+ "sets %1\n"
8242 : "=m" (v->counter), "=qm" (c)
8243 : "er" (i), "m" (v->counter) : "memory");
8244 return c;
8245@@ -353,7 +677,31 @@ static inline int atomic64_add_negative(
8246 static inline long atomic64_add_return(long i, atomic64_t *v)
8247 {
8248 long __i = i;
8249- asm volatile(LOCK_PREFIX "xaddq %0, %1;"
8250+ asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
8251+
8252+#ifdef CONFIG_PAX_REFCOUNT
8253+ "jno 0f\n"
8254+ "movq %0, %1\n"
8255+ "int $4\n0:\n"
8256+ _ASM_EXTABLE(0b, 0b)
8257+#endif
8258+
8259+ : "+r" (i), "+m" (v->counter)
8260+ : : "memory");
8261+ return i + __i;
8262+}
8263+
8264+/**
8265+ * atomic64_add_return_unchecked - add and return
8266+ * @i: integer value to add
8267+ * @v: pointer to type atomic64_unchecked_t
8268+ *
8269+ * Atomically adds @i to @v and returns @i + @v
8270+ */
8271+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
8272+{
8273+ long __i = i;
8274+ asm volatile(LOCK_PREFIX "xaddq %0, %1"
8275 : "+r" (i), "+m" (v->counter)
8276 : : "memory");
8277 return i + __i;
8278@@ -365,6 +713,10 @@ static inline long atomic64_sub_return(l
8279 }
8280
8281 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
8282+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
8283+{
8284+ return atomic64_add_return_unchecked(1, v);
8285+}
8286 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
8287
8288 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
8289@@ -372,21 +724,41 @@ static inline long atomic64_cmpxchg(atom
8290 return cmpxchg(&v->counter, old, new);
8291 }
8292
8293+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
8294+{
8295+ return cmpxchg(&v->counter, old, new);
8296+}
8297+
8298 static inline long atomic64_xchg(atomic64_t *v, long new)
8299 {
8300 return xchg(&v->counter, new);
8301 }
8302
8303+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
8304+{
8305+ return xchg(&v->counter, new);
8306+}
8307+
8308 static inline long atomic_cmpxchg(atomic_t *v, int old, int new)
8309 {
8310 return cmpxchg(&v->counter, old, new);
8311 }
8312
8313+static inline long atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
8314+{
8315+ return cmpxchg(&v->counter, old, new);
8316+}
8317+
8318 static inline long atomic_xchg(atomic_t *v, int new)
8319 {
8320 return xchg(&v->counter, new);
8321 }
8322
8323+static inline long atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
8324+{
8325+ return xchg(&v->counter, new);
8326+}
8327+
8328 /**
8329 * atomic_add_unless - add unless the number is a given value
8330 * @v: pointer of type atomic_t
8331@@ -398,17 +770,30 @@ static inline long atomic_xchg(atomic_t
8332 */
8333 static inline int atomic_add_unless(atomic_t *v, int a, int u)
8334 {
8335- int c, old;
8336+ int c, old, new;
8337 c = atomic_read(v);
8338 for (;;) {
8339- if (unlikely(c == (u)))
8340+ if (unlikely(c == u))
8341 break;
8342- old = atomic_cmpxchg((v), c, c + (a));
8343+
8344+ asm volatile("addl %2,%0\n"
8345+
8346+#ifdef CONFIG_PAX_REFCOUNT
8347+ "jno 0f\n"
8348+ "subl %2,%0\n"
8349+ "int $4\n0:\n"
8350+ _ASM_EXTABLE(0b, 0b)
8351+#endif
8352+
8353+ : "=r" (new)
8354+ : "0" (c), "ir" (a));
8355+
8356+ old = atomic_cmpxchg(v, c, new);
8357 if (likely(old == c))
8358 break;
8359 c = old;
8360 }
8361- return c != (u);
8362+ return c != u;
8363 }
8364
8365 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
8366@@ -424,17 +809,30 @@ static inline int atomic_add_unless(atom
8367 */
8368 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
8369 {
8370- long c, old;
8371+ long c, old, new;
8372 c = atomic64_read(v);
8373 for (;;) {
8374- if (unlikely(c == (u)))
8375+ if (unlikely(c == u))
8376 break;
8377- old = atomic64_cmpxchg((v), c, c + (a));
8378+
8379+ asm volatile("addq %2,%0\n"
8380+
8381+#ifdef CONFIG_PAX_REFCOUNT
8382+ "jno 0f\n"
8383+ "subq %2,%0\n"
8384+ "int $4\n0:\n"
8385+ _ASM_EXTABLE(0b, 0b)
8386+#endif
8387+
8388+ : "=r" (new)
8389+ : "0" (c), "er" (a));
8390+
8391+ old = atomic64_cmpxchg(v, c, new);
8392 if (likely(old == c))
8393 break;
8394 c = old;
8395 }
8396- return c != (u);
8397+ return c != u;
8398 }
8399
8400 /**
8401diff -urNp linux-2.6.32.48/arch/x86/include/asm/bitops.h linux-2.6.32.48/arch/x86/include/asm/bitops.h
8402--- linux-2.6.32.48/arch/x86/include/asm/bitops.h 2011-11-08 19:02:43.000000000 -0500
8403+++ linux-2.6.32.48/arch/x86/include/asm/bitops.h 2011-11-15 19:59:42.000000000 -0500
8404@@ -38,7 +38,7 @@
8405 * a mask operation on a byte.
8406 */
8407 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
8408-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
8409+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
8410 #define CONST_MASK(nr) (1 << ((nr) & 7))
8411
8412 /**
8413diff -urNp linux-2.6.32.48/arch/x86/include/asm/boot.h linux-2.6.32.48/arch/x86/include/asm/boot.h
8414--- linux-2.6.32.48/arch/x86/include/asm/boot.h 2011-11-08 19:02:43.000000000 -0500
8415+++ linux-2.6.32.48/arch/x86/include/asm/boot.h 2011-11-15 19:59:42.000000000 -0500
8416@@ -11,10 +11,15 @@
8417 #include <asm/pgtable_types.h>
8418
8419 /* Physical address where kernel should be loaded. */
8420-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8421+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8422 + (CONFIG_PHYSICAL_ALIGN - 1)) \
8423 & ~(CONFIG_PHYSICAL_ALIGN - 1))
8424
8425+#ifndef __ASSEMBLY__
8426+extern unsigned char __LOAD_PHYSICAL_ADDR[];
8427+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
8428+#endif
8429+
8430 /* Minimum kernel alignment, as a power of two */
8431 #ifdef CONFIG_X86_64
8432 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
8433diff -urNp linux-2.6.32.48/arch/x86/include/asm/cacheflush.h linux-2.6.32.48/arch/x86/include/asm/cacheflush.h
8434--- linux-2.6.32.48/arch/x86/include/asm/cacheflush.h 2011-11-08 19:02:43.000000000 -0500
8435+++ linux-2.6.32.48/arch/x86/include/asm/cacheflush.h 2011-11-15 19:59:42.000000000 -0500
8436@@ -60,7 +60,7 @@ PAGEFLAG(WC, WC)
8437 static inline unsigned long get_page_memtype(struct page *pg)
8438 {
8439 if (!PageUncached(pg) && !PageWC(pg))
8440- return -1;
8441+ return ~0UL;
8442 else if (!PageUncached(pg) && PageWC(pg))
8443 return _PAGE_CACHE_WC;
8444 else if (PageUncached(pg) && !PageWC(pg))
8445@@ -85,7 +85,7 @@ static inline void set_page_memtype(stru
8446 SetPageWC(pg);
8447 break;
8448 default:
8449- case -1:
8450+ case ~0UL:
8451 ClearPageUncached(pg);
8452 ClearPageWC(pg);
8453 break;
8454diff -urNp linux-2.6.32.48/arch/x86/include/asm/cache.h linux-2.6.32.48/arch/x86/include/asm/cache.h
8455--- linux-2.6.32.48/arch/x86/include/asm/cache.h 2011-11-08 19:02:43.000000000 -0500
8456+++ linux-2.6.32.48/arch/x86/include/asm/cache.h 2011-11-15 19:59:42.000000000 -0500
8457@@ -5,9 +5,10 @@
8458
8459 /* L1 cache line size */
8460 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
8461-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8462+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8463
8464 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
8465+#define __read_only __attribute__((__section__(".data.read_only")))
8466
8467 #ifdef CONFIG_X86_VSMP
8468 /* vSMP Internode cacheline shift */
8469diff -urNp linux-2.6.32.48/arch/x86/include/asm/calling.h linux-2.6.32.48/arch/x86/include/asm/calling.h
8470--- linux-2.6.32.48/arch/x86/include/asm/calling.h 2011-11-08 19:02:43.000000000 -0500
8471+++ linux-2.6.32.48/arch/x86/include/asm/calling.h 2011-11-15 19:59:42.000000000 -0500
8472@@ -52,32 +52,32 @@ For 32-bit we have the following convent
8473 * for assembly code:
8474 */
8475
8476-#define R15 0
8477-#define R14 8
8478-#define R13 16
8479-#define R12 24
8480-#define RBP 32
8481-#define RBX 40
8482+#define R15 (0)
8483+#define R14 (8)
8484+#define R13 (16)
8485+#define R12 (24)
8486+#define RBP (32)
8487+#define RBX (40)
8488
8489 /* arguments: interrupts/non tracing syscalls only save up to here: */
8490-#define R11 48
8491-#define R10 56
8492-#define R9 64
8493-#define R8 72
8494-#define RAX 80
8495-#define RCX 88
8496-#define RDX 96
8497-#define RSI 104
8498-#define RDI 112
8499-#define ORIG_RAX 120 /* + error_code */
8500+#define R11 (48)
8501+#define R10 (56)
8502+#define R9 (64)
8503+#define R8 (72)
8504+#define RAX (80)
8505+#define RCX (88)
8506+#define RDX (96)
8507+#define RSI (104)
8508+#define RDI (112)
8509+#define ORIG_RAX (120) /* + error_code */
8510 /* end of arguments */
8511
8512 /* cpu exception frame or undefined in case of fast syscall: */
8513-#define RIP 128
8514-#define CS 136
8515-#define EFLAGS 144
8516-#define RSP 152
8517-#define SS 160
8518+#define RIP (128)
8519+#define CS (136)
8520+#define EFLAGS (144)
8521+#define RSP (152)
8522+#define SS (160)
8523
8524 #define ARGOFFSET R11
8525 #define SWFRAME ORIG_RAX
8526diff -urNp linux-2.6.32.48/arch/x86/include/asm/checksum_32.h linux-2.6.32.48/arch/x86/include/asm/checksum_32.h
8527--- linux-2.6.32.48/arch/x86/include/asm/checksum_32.h 2011-11-08 19:02:43.000000000 -0500
8528+++ linux-2.6.32.48/arch/x86/include/asm/checksum_32.h 2011-11-15 19:59:42.000000000 -0500
8529@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
8530 int len, __wsum sum,
8531 int *src_err_ptr, int *dst_err_ptr);
8532
8533+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
8534+ int len, __wsum sum,
8535+ int *src_err_ptr, int *dst_err_ptr);
8536+
8537+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
8538+ int len, __wsum sum,
8539+ int *src_err_ptr, int *dst_err_ptr);
8540+
8541 /*
8542 * Note: when you get a NULL pointer exception here this means someone
8543 * passed in an incorrect kernel address to one of these functions.
8544@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
8545 int *err_ptr)
8546 {
8547 might_sleep();
8548- return csum_partial_copy_generic((__force void *)src, dst,
8549+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
8550 len, sum, err_ptr, NULL);
8551 }
8552
8553@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
8554 {
8555 might_sleep();
8556 if (access_ok(VERIFY_WRITE, dst, len))
8557- return csum_partial_copy_generic(src, (__force void *)dst,
8558+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
8559 len, sum, NULL, err_ptr);
8560
8561 if (len)
8562diff -urNp linux-2.6.32.48/arch/x86/include/asm/desc_defs.h linux-2.6.32.48/arch/x86/include/asm/desc_defs.h
8563--- linux-2.6.32.48/arch/x86/include/asm/desc_defs.h 2011-11-08 19:02:43.000000000 -0500
8564+++ linux-2.6.32.48/arch/x86/include/asm/desc_defs.h 2011-11-15 19:59:42.000000000 -0500
8565@@ -31,6 +31,12 @@ struct desc_struct {
8566 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
8567 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
8568 };
8569+ struct {
8570+ u16 offset_low;
8571+ u16 seg;
8572+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
8573+ unsigned offset_high: 16;
8574+ } gate;
8575 };
8576 } __attribute__((packed));
8577
8578diff -urNp linux-2.6.32.48/arch/x86/include/asm/desc.h linux-2.6.32.48/arch/x86/include/asm/desc.h
8579--- linux-2.6.32.48/arch/x86/include/asm/desc.h 2011-11-08 19:02:43.000000000 -0500
8580+++ linux-2.6.32.48/arch/x86/include/asm/desc.h 2011-11-15 19:59:42.000000000 -0500
8581@@ -4,6 +4,7 @@
8582 #include <asm/desc_defs.h>
8583 #include <asm/ldt.h>
8584 #include <asm/mmu.h>
8585+#include <asm/pgtable.h>
8586 #include <linux/smp.h>
8587
8588 static inline void fill_ldt(struct desc_struct *desc,
8589@@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_
8590 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
8591 desc->type = (info->read_exec_only ^ 1) << 1;
8592 desc->type |= info->contents << 2;
8593+ desc->type |= info->seg_not_present ^ 1;
8594 desc->s = 1;
8595 desc->dpl = 0x3;
8596 desc->p = info->seg_not_present ^ 1;
8597@@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_
8598 }
8599
8600 extern struct desc_ptr idt_descr;
8601-extern gate_desc idt_table[];
8602-
8603-struct gdt_page {
8604- struct desc_struct gdt[GDT_ENTRIES];
8605-} __attribute__((aligned(PAGE_SIZE)));
8606-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
8607+extern gate_desc idt_table[256];
8608
8609+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
8610 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
8611 {
8612- return per_cpu(gdt_page, cpu).gdt;
8613+ return cpu_gdt_table[cpu];
8614 }
8615
8616 #ifdef CONFIG_X86_64
8617@@ -65,9 +63,14 @@ static inline void pack_gate(gate_desc *
8618 unsigned long base, unsigned dpl, unsigned flags,
8619 unsigned short seg)
8620 {
8621- gate->a = (seg << 16) | (base & 0xffff);
8622- gate->b = (base & 0xffff0000) |
8623- (((0x80 | type | (dpl << 5)) & 0xff) << 8);
8624+ gate->gate.offset_low = base;
8625+ gate->gate.seg = seg;
8626+ gate->gate.reserved = 0;
8627+ gate->gate.type = type;
8628+ gate->gate.s = 0;
8629+ gate->gate.dpl = dpl;
8630+ gate->gate.p = 1;
8631+ gate->gate.offset_high = base >> 16;
8632 }
8633
8634 #endif
8635@@ -115,13 +118,17 @@ static inline void paravirt_free_ldt(str
8636 static inline void native_write_idt_entry(gate_desc *idt, int entry,
8637 const gate_desc *gate)
8638 {
8639+ pax_open_kernel();
8640 memcpy(&idt[entry], gate, sizeof(*gate));
8641+ pax_close_kernel();
8642 }
8643
8644 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
8645 const void *desc)
8646 {
8647+ pax_open_kernel();
8648 memcpy(&ldt[entry], desc, 8);
8649+ pax_close_kernel();
8650 }
8651
8652 static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
8653@@ -139,7 +146,10 @@ static inline void native_write_gdt_entr
8654 size = sizeof(struct desc_struct);
8655 break;
8656 }
8657+
8658+ pax_open_kernel();
8659 memcpy(&gdt[entry], desc, size);
8660+ pax_close_kernel();
8661 }
8662
8663 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
8664@@ -211,7 +221,9 @@ static inline void native_set_ldt(const
8665
8666 static inline void native_load_tr_desc(void)
8667 {
8668+ pax_open_kernel();
8669 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
8670+ pax_close_kernel();
8671 }
8672
8673 static inline void native_load_gdt(const struct desc_ptr *dtr)
8674@@ -246,8 +258,10 @@ static inline void native_load_tls(struc
8675 unsigned int i;
8676 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
8677
8678+ pax_open_kernel();
8679 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
8680 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
8681+ pax_close_kernel();
8682 }
8683
8684 #define _LDT_empty(info) \
8685@@ -309,7 +323,7 @@ static inline void set_desc_limit(struct
8686 desc->limit = (limit >> 16) & 0xf;
8687 }
8688
8689-static inline void _set_gate(int gate, unsigned type, void *addr,
8690+static inline void _set_gate(int gate, unsigned type, const void *addr,
8691 unsigned dpl, unsigned ist, unsigned seg)
8692 {
8693 gate_desc s;
8694@@ -327,7 +341,7 @@ static inline void _set_gate(int gate, u
8695 * Pentium F0 0F bugfix can have resulted in the mapped
8696 * IDT being write-protected.
8697 */
8698-static inline void set_intr_gate(unsigned int n, void *addr)
8699+static inline void set_intr_gate(unsigned int n, const void *addr)
8700 {
8701 BUG_ON((unsigned)n > 0xFF);
8702 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
8703@@ -356,19 +370,19 @@ static inline void alloc_intr_gate(unsig
8704 /*
8705 * This routine sets up an interrupt gate at directory privilege level 3.
8706 */
8707-static inline void set_system_intr_gate(unsigned int n, void *addr)
8708+static inline void set_system_intr_gate(unsigned int n, const void *addr)
8709 {
8710 BUG_ON((unsigned)n > 0xFF);
8711 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
8712 }
8713
8714-static inline void set_system_trap_gate(unsigned int n, void *addr)
8715+static inline void set_system_trap_gate(unsigned int n, const void *addr)
8716 {
8717 BUG_ON((unsigned)n > 0xFF);
8718 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
8719 }
8720
8721-static inline void set_trap_gate(unsigned int n, void *addr)
8722+static inline void set_trap_gate(unsigned int n, const void *addr)
8723 {
8724 BUG_ON((unsigned)n > 0xFF);
8725 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
8726@@ -377,19 +391,31 @@ static inline void set_trap_gate(unsigne
8727 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
8728 {
8729 BUG_ON((unsigned)n > 0xFF);
8730- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
8731+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
8732 }
8733
8734-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
8735+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
8736 {
8737 BUG_ON((unsigned)n > 0xFF);
8738 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
8739 }
8740
8741-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
8742+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
8743 {
8744 BUG_ON((unsigned)n > 0xFF);
8745 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
8746 }
8747
8748+#ifdef CONFIG_X86_32
8749+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
8750+{
8751+ struct desc_struct d;
8752+
8753+ if (likely(limit))
8754+ limit = (limit - 1UL) >> PAGE_SHIFT;
8755+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
8756+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
8757+}
8758+#endif
8759+
8760 #endif /* _ASM_X86_DESC_H */
8761diff -urNp linux-2.6.32.48/arch/x86/include/asm/device.h linux-2.6.32.48/arch/x86/include/asm/device.h
8762--- linux-2.6.32.48/arch/x86/include/asm/device.h 2011-11-08 19:02:43.000000000 -0500
8763+++ linux-2.6.32.48/arch/x86/include/asm/device.h 2011-11-15 19:59:42.000000000 -0500
8764@@ -6,7 +6,7 @@ struct dev_archdata {
8765 void *acpi_handle;
8766 #endif
8767 #ifdef CONFIG_X86_64
8768-struct dma_map_ops *dma_ops;
8769+ const struct dma_map_ops *dma_ops;
8770 #endif
8771 #ifdef CONFIG_DMAR
8772 void *iommu; /* hook for IOMMU specific extension */
8773diff -urNp linux-2.6.32.48/arch/x86/include/asm/dma-mapping.h linux-2.6.32.48/arch/x86/include/asm/dma-mapping.h
8774--- linux-2.6.32.48/arch/x86/include/asm/dma-mapping.h 2011-11-08 19:02:43.000000000 -0500
8775+++ linux-2.6.32.48/arch/x86/include/asm/dma-mapping.h 2011-11-15 19:59:42.000000000 -0500
8776@@ -25,9 +25,9 @@ extern int iommu_merge;
8777 extern struct device x86_dma_fallback_dev;
8778 extern int panic_on_overflow;
8779
8780-extern struct dma_map_ops *dma_ops;
8781+extern const struct dma_map_ops *dma_ops;
8782
8783-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
8784+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
8785 {
8786 #ifdef CONFIG_X86_32
8787 return dma_ops;
8788@@ -44,7 +44,7 @@ static inline struct dma_map_ops *get_dm
8789 /* Make sure we keep the same behaviour */
8790 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
8791 {
8792- struct dma_map_ops *ops = get_dma_ops(dev);
8793+ const struct dma_map_ops *ops = get_dma_ops(dev);
8794 if (ops->mapping_error)
8795 return ops->mapping_error(dev, dma_addr);
8796
8797@@ -122,7 +122,7 @@ static inline void *
8798 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
8799 gfp_t gfp)
8800 {
8801- struct dma_map_ops *ops = get_dma_ops(dev);
8802+ const struct dma_map_ops *ops = get_dma_ops(dev);
8803 void *memory;
8804
8805 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
8806@@ -149,7 +149,7 @@ dma_alloc_coherent(struct device *dev, s
8807 static inline void dma_free_coherent(struct device *dev, size_t size,
8808 void *vaddr, dma_addr_t bus)
8809 {
8810- struct dma_map_ops *ops = get_dma_ops(dev);
8811+ const struct dma_map_ops *ops = get_dma_ops(dev);
8812
8813 WARN_ON(irqs_disabled()); /* for portability */
8814
8815diff -urNp linux-2.6.32.48/arch/x86/include/asm/e820.h linux-2.6.32.48/arch/x86/include/asm/e820.h
8816--- linux-2.6.32.48/arch/x86/include/asm/e820.h 2011-11-08 19:02:43.000000000 -0500
8817+++ linux-2.6.32.48/arch/x86/include/asm/e820.h 2011-11-15 19:59:42.000000000 -0500
8818@@ -133,7 +133,7 @@ extern char *default_machine_specific_me
8819 #define ISA_END_ADDRESS 0x100000
8820 #define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS)
8821
8822-#define BIOS_BEGIN 0x000a0000
8823+#define BIOS_BEGIN 0x000c0000
8824 #define BIOS_END 0x00100000
8825
8826 #ifdef __KERNEL__
8827diff -urNp linux-2.6.32.48/arch/x86/include/asm/elf.h linux-2.6.32.48/arch/x86/include/asm/elf.h
8828--- linux-2.6.32.48/arch/x86/include/asm/elf.h 2011-11-08 19:02:43.000000000 -0500
8829+++ linux-2.6.32.48/arch/x86/include/asm/elf.h 2011-11-15 19:59:42.000000000 -0500
8830@@ -257,7 +257,25 @@ extern int force_personality32;
8831 the loader. We need to make sure that it is out of the way of the program
8832 that it will "exec", and that there is sufficient room for the brk. */
8833
8834+#ifdef CONFIG_PAX_SEGMEXEC
8835+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
8836+#else
8837 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
8838+#endif
8839+
8840+#ifdef CONFIG_PAX_ASLR
8841+#ifdef CONFIG_X86_32
8842+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
8843+
8844+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8845+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8846+#else
8847+#define PAX_ELF_ET_DYN_BASE 0x400000UL
8848+
8849+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8850+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8851+#endif
8852+#endif
8853
8854 /* This yields a mask that user programs can use to figure out what
8855 instruction set this CPU supports. This could be done in user space,
8856@@ -310,9 +328,7 @@ do { \
8857
8858 #define ARCH_DLINFO \
8859 do { \
8860- if (vdso_enabled) \
8861- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
8862- (unsigned long)current->mm->context.vdso); \
8863+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
8864 } while (0)
8865
8866 #define AT_SYSINFO 32
8867@@ -323,7 +339,7 @@ do { \
8868
8869 #endif /* !CONFIG_X86_32 */
8870
8871-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
8872+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
8873
8874 #define VDSO_ENTRY \
8875 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
8876@@ -337,7 +353,4 @@ extern int arch_setup_additional_pages(s
8877 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
8878 #define compat_arch_setup_additional_pages syscall32_setup_pages
8879
8880-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8881-#define arch_randomize_brk arch_randomize_brk
8882-
8883 #endif /* _ASM_X86_ELF_H */
8884diff -urNp linux-2.6.32.48/arch/x86/include/asm/emergency-restart.h linux-2.6.32.48/arch/x86/include/asm/emergency-restart.h
8885--- linux-2.6.32.48/arch/x86/include/asm/emergency-restart.h 2011-11-08 19:02:43.000000000 -0500
8886+++ linux-2.6.32.48/arch/x86/include/asm/emergency-restart.h 2011-11-15 19:59:42.000000000 -0500
8887@@ -15,6 +15,6 @@ enum reboot_type {
8888
8889 extern enum reboot_type reboot_type;
8890
8891-extern void machine_emergency_restart(void);
8892+extern void machine_emergency_restart(void) __noreturn;
8893
8894 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
8895diff -urNp linux-2.6.32.48/arch/x86/include/asm/futex.h linux-2.6.32.48/arch/x86/include/asm/futex.h
8896--- linux-2.6.32.48/arch/x86/include/asm/futex.h 2011-11-08 19:02:43.000000000 -0500
8897+++ linux-2.6.32.48/arch/x86/include/asm/futex.h 2011-11-15 19:59:42.000000000 -0500
8898@@ -12,16 +12,18 @@
8899 #include <asm/system.h>
8900
8901 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
8902+ typecheck(u32 __user *, uaddr); \
8903 asm volatile("1:\t" insn "\n" \
8904 "2:\t.section .fixup,\"ax\"\n" \
8905 "3:\tmov\t%3, %1\n" \
8906 "\tjmp\t2b\n" \
8907 "\t.previous\n" \
8908 _ASM_EXTABLE(1b, 3b) \
8909- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
8910+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
8911 : "i" (-EFAULT), "0" (oparg), "1" (0))
8912
8913 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
8914+ typecheck(u32 __user *, uaddr); \
8915 asm volatile("1:\tmovl %2, %0\n" \
8916 "\tmovl\t%0, %3\n" \
8917 "\t" insn "\n" \
8918@@ -34,10 +36,10 @@
8919 _ASM_EXTABLE(1b, 4b) \
8920 _ASM_EXTABLE(2b, 4b) \
8921 : "=&a" (oldval), "=&r" (ret), \
8922- "+m" (*uaddr), "=&r" (tem) \
8923+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
8924 : "r" (oparg), "i" (-EFAULT), "1" (0))
8925
8926-static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
8927+static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8928 {
8929 int op = (encoded_op >> 28) & 7;
8930 int cmp = (encoded_op >> 24) & 15;
8931@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
8932
8933 switch (op) {
8934 case FUTEX_OP_SET:
8935- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
8936+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
8937 break;
8938 case FUTEX_OP_ADD:
8939- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
8940+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
8941 uaddr, oparg);
8942 break;
8943 case FUTEX_OP_OR:
8944@@ -109,7 +111,7 @@ static inline int futex_atomic_op_inuser
8945 return ret;
8946 }
8947
8948-static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
8949+static inline int futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval,
8950 int newval)
8951 {
8952
8953@@ -119,16 +121,16 @@ static inline int futex_atomic_cmpxchg_i
8954 return -ENOSYS;
8955 #endif
8956
8957- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
8958+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
8959 return -EFAULT;
8960
8961- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
8962+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %1\n"
8963 "2:\t.section .fixup, \"ax\"\n"
8964 "3:\tmov %2, %0\n"
8965 "\tjmp 2b\n"
8966 "\t.previous\n"
8967 _ASM_EXTABLE(1b, 3b)
8968- : "=a" (oldval), "+m" (*uaddr)
8969+ : "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
8970 : "i" (-EFAULT), "r" (newval), "0" (oldval)
8971 : "memory"
8972 );
8973diff -urNp linux-2.6.32.48/arch/x86/include/asm/hw_irq.h linux-2.6.32.48/arch/x86/include/asm/hw_irq.h
8974--- linux-2.6.32.48/arch/x86/include/asm/hw_irq.h 2011-11-08 19:02:43.000000000 -0500
8975+++ linux-2.6.32.48/arch/x86/include/asm/hw_irq.h 2011-11-15 19:59:42.000000000 -0500
8976@@ -92,8 +92,8 @@ extern void setup_ioapic_dest(void);
8977 extern void enable_IO_APIC(void);
8978
8979 /* Statistics */
8980-extern atomic_t irq_err_count;
8981-extern atomic_t irq_mis_count;
8982+extern atomic_unchecked_t irq_err_count;
8983+extern atomic_unchecked_t irq_mis_count;
8984
8985 /* EISA */
8986 extern void eisa_set_level_irq(unsigned int irq);
8987diff -urNp linux-2.6.32.48/arch/x86/include/asm/i387.h linux-2.6.32.48/arch/x86/include/asm/i387.h
8988--- linux-2.6.32.48/arch/x86/include/asm/i387.h 2011-11-08 19:02:43.000000000 -0500
8989+++ linux-2.6.32.48/arch/x86/include/asm/i387.h 2011-11-15 19:59:42.000000000 -0500
8990@@ -60,6 +60,11 @@ static inline int fxrstor_checking(struc
8991 {
8992 int err;
8993
8994+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8995+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8996+ fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
8997+#endif
8998+
8999 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
9000 "2:\n"
9001 ".section .fixup,\"ax\"\n"
9002@@ -105,6 +110,11 @@ static inline int fxsave_user(struct i38
9003 {
9004 int err;
9005
9006+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9007+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
9008+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
9009+#endif
9010+
9011 asm volatile("1: rex64/fxsave (%[fx])\n\t"
9012 "2:\n"
9013 ".section .fixup,\"ax\"\n"
9014@@ -195,13 +205,8 @@ static inline int fxrstor_checking(struc
9015 }
9016
9017 /* We need a safe address that is cheap to find and that is already
9018- in L1 during context switch. The best choices are unfortunately
9019- different for UP and SMP */
9020-#ifdef CONFIG_SMP
9021-#define safe_address (__per_cpu_offset[0])
9022-#else
9023-#define safe_address (kstat_cpu(0).cpustat.user)
9024-#endif
9025+ in L1 during context switch. */
9026+#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
9027
9028 /*
9029 * These must be called with preempt disabled
9030@@ -291,7 +296,7 @@ static inline void kernel_fpu_begin(void
9031 struct thread_info *me = current_thread_info();
9032 preempt_disable();
9033 if (me->status & TS_USEDFPU)
9034- __save_init_fpu(me->task);
9035+ __save_init_fpu(current);
9036 else
9037 clts();
9038 }
9039diff -urNp linux-2.6.32.48/arch/x86/include/asm/io_32.h linux-2.6.32.48/arch/x86/include/asm/io_32.h
9040--- linux-2.6.32.48/arch/x86/include/asm/io_32.h 2011-11-08 19:02:43.000000000 -0500
9041+++ linux-2.6.32.48/arch/x86/include/asm/io_32.h 2011-11-15 19:59:42.000000000 -0500
9042@@ -3,6 +3,7 @@
9043
9044 #include <linux/string.h>
9045 #include <linux/compiler.h>
9046+#include <asm/processor.h>
9047
9048 /*
9049 * This file contains the definitions for the x86 IO instructions
9050@@ -42,6 +43,17 @@
9051
9052 #ifdef __KERNEL__
9053
9054+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
9055+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
9056+{
9057+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
9058+}
9059+
9060+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
9061+{
9062+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
9063+}
9064+
9065 #include <asm-generic/iomap.h>
9066
9067 #include <linux/vmalloc.h>
9068diff -urNp linux-2.6.32.48/arch/x86/include/asm/io_64.h linux-2.6.32.48/arch/x86/include/asm/io_64.h
9069--- linux-2.6.32.48/arch/x86/include/asm/io_64.h 2011-11-08 19:02:43.000000000 -0500
9070+++ linux-2.6.32.48/arch/x86/include/asm/io_64.h 2011-11-15 19:59:42.000000000 -0500
9071@@ -140,6 +140,17 @@ __OUTS(l)
9072
9073 #include <linux/vmalloc.h>
9074
9075+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
9076+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
9077+{
9078+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
9079+}
9080+
9081+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
9082+{
9083+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
9084+}
9085+
9086 #include <asm-generic/iomap.h>
9087
9088 void __memcpy_fromio(void *, unsigned long, unsigned);
9089diff -urNp linux-2.6.32.48/arch/x86/include/asm/iommu.h linux-2.6.32.48/arch/x86/include/asm/iommu.h
9090--- linux-2.6.32.48/arch/x86/include/asm/iommu.h 2011-11-08 19:02:43.000000000 -0500
9091+++ linux-2.6.32.48/arch/x86/include/asm/iommu.h 2011-11-15 19:59:42.000000000 -0500
9092@@ -3,7 +3,7 @@
9093
9094 extern void pci_iommu_shutdown(void);
9095 extern void no_iommu_init(void);
9096-extern struct dma_map_ops nommu_dma_ops;
9097+extern const struct dma_map_ops nommu_dma_ops;
9098 extern int force_iommu, no_iommu;
9099 extern int iommu_detected;
9100 extern int iommu_pass_through;
9101diff -urNp linux-2.6.32.48/arch/x86/include/asm/irqflags.h linux-2.6.32.48/arch/x86/include/asm/irqflags.h
9102--- linux-2.6.32.48/arch/x86/include/asm/irqflags.h 2011-11-08 19:02:43.000000000 -0500
9103+++ linux-2.6.32.48/arch/x86/include/asm/irqflags.h 2011-11-15 19:59:42.000000000 -0500
9104@@ -142,6 +142,11 @@ static inline unsigned long __raw_local_
9105 sti; \
9106 sysexit
9107
9108+#define GET_CR0_INTO_RDI mov %cr0, %rdi
9109+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
9110+#define GET_CR3_INTO_RDI mov %cr3, %rdi
9111+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
9112+
9113 #else
9114 #define INTERRUPT_RETURN iret
9115 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
9116diff -urNp linux-2.6.32.48/arch/x86/include/asm/kprobes.h linux-2.6.32.48/arch/x86/include/asm/kprobes.h
9117--- linux-2.6.32.48/arch/x86/include/asm/kprobes.h 2011-11-08 19:02:43.000000000 -0500
9118+++ linux-2.6.32.48/arch/x86/include/asm/kprobes.h 2011-11-15 19:59:42.000000000 -0500
9119@@ -34,13 +34,8 @@ typedef u8 kprobe_opcode_t;
9120 #define BREAKPOINT_INSTRUCTION 0xcc
9121 #define RELATIVEJUMP_INSTRUCTION 0xe9
9122 #define MAX_INSN_SIZE 16
9123-#define MAX_STACK_SIZE 64
9124-#define MIN_STACK_SIZE(ADDR) \
9125- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
9126- THREAD_SIZE - (unsigned long)(ADDR))) \
9127- ? (MAX_STACK_SIZE) \
9128- : (((unsigned long)current_thread_info()) + \
9129- THREAD_SIZE - (unsigned long)(ADDR)))
9130+#define MAX_STACK_SIZE 64UL
9131+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
9132
9133 #define flush_insn_slot(p) do { } while (0)
9134
9135diff -urNp linux-2.6.32.48/arch/x86/include/asm/kvm_host.h linux-2.6.32.48/arch/x86/include/asm/kvm_host.h
9136--- linux-2.6.32.48/arch/x86/include/asm/kvm_host.h 2011-11-08 19:02:43.000000000 -0500
9137+++ linux-2.6.32.48/arch/x86/include/asm/kvm_host.h 2011-11-15 19:59:42.000000000 -0500
9138@@ -534,9 +534,9 @@ struct kvm_x86_ops {
9139 bool (*gb_page_enable)(void);
9140
9141 const struct trace_print_flags *exit_reasons_str;
9142-};
9143+} __do_const;
9144
9145-extern struct kvm_x86_ops *kvm_x86_ops;
9146+extern const struct kvm_x86_ops *kvm_x86_ops;
9147
9148 int kvm_mmu_module_init(void);
9149 void kvm_mmu_module_exit(void);
9150diff -urNp linux-2.6.32.48/arch/x86/include/asm/local.h linux-2.6.32.48/arch/x86/include/asm/local.h
9151--- linux-2.6.32.48/arch/x86/include/asm/local.h 2011-11-08 19:02:43.000000000 -0500
9152+++ linux-2.6.32.48/arch/x86/include/asm/local.h 2011-11-15 19:59:42.000000000 -0500
9153@@ -18,26 +18,58 @@ typedef struct {
9154
9155 static inline void local_inc(local_t *l)
9156 {
9157- asm volatile(_ASM_INC "%0"
9158+ asm volatile(_ASM_INC "%0\n"
9159+
9160+#ifdef CONFIG_PAX_REFCOUNT
9161+ "jno 0f\n"
9162+ _ASM_DEC "%0\n"
9163+ "int $4\n0:\n"
9164+ _ASM_EXTABLE(0b, 0b)
9165+#endif
9166+
9167 : "+m" (l->a.counter));
9168 }
9169
9170 static inline void local_dec(local_t *l)
9171 {
9172- asm volatile(_ASM_DEC "%0"
9173+ asm volatile(_ASM_DEC "%0\n"
9174+
9175+#ifdef CONFIG_PAX_REFCOUNT
9176+ "jno 0f\n"
9177+ _ASM_INC "%0\n"
9178+ "int $4\n0:\n"
9179+ _ASM_EXTABLE(0b, 0b)
9180+#endif
9181+
9182 : "+m" (l->a.counter));
9183 }
9184
9185 static inline void local_add(long i, local_t *l)
9186 {
9187- asm volatile(_ASM_ADD "%1,%0"
9188+ asm volatile(_ASM_ADD "%1,%0\n"
9189+
9190+#ifdef CONFIG_PAX_REFCOUNT
9191+ "jno 0f\n"
9192+ _ASM_SUB "%1,%0\n"
9193+ "int $4\n0:\n"
9194+ _ASM_EXTABLE(0b, 0b)
9195+#endif
9196+
9197 : "+m" (l->a.counter)
9198 : "ir" (i));
9199 }
9200
9201 static inline void local_sub(long i, local_t *l)
9202 {
9203- asm volatile(_ASM_SUB "%1,%0"
9204+ asm volatile(_ASM_SUB "%1,%0\n"
9205+
9206+#ifdef CONFIG_PAX_REFCOUNT
9207+ "jno 0f\n"
9208+ _ASM_ADD "%1,%0\n"
9209+ "int $4\n0:\n"
9210+ _ASM_EXTABLE(0b, 0b)
9211+#endif
9212+
9213 : "+m" (l->a.counter)
9214 : "ir" (i));
9215 }
9216@@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
9217 {
9218 unsigned char c;
9219
9220- asm volatile(_ASM_SUB "%2,%0; sete %1"
9221+ asm volatile(_ASM_SUB "%2,%0\n"
9222+
9223+#ifdef CONFIG_PAX_REFCOUNT
9224+ "jno 0f\n"
9225+ _ASM_ADD "%2,%0\n"
9226+ "int $4\n0:\n"
9227+ _ASM_EXTABLE(0b, 0b)
9228+#endif
9229+
9230+ "sete %1\n"
9231 : "+m" (l->a.counter), "=qm" (c)
9232 : "ir" (i) : "memory");
9233 return c;
9234@@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
9235 {
9236 unsigned char c;
9237
9238- asm volatile(_ASM_DEC "%0; sete %1"
9239+ asm volatile(_ASM_DEC "%0\n"
9240+
9241+#ifdef CONFIG_PAX_REFCOUNT
9242+ "jno 0f\n"
9243+ _ASM_INC "%0\n"
9244+ "int $4\n0:\n"
9245+ _ASM_EXTABLE(0b, 0b)
9246+#endif
9247+
9248+ "sete %1\n"
9249 : "+m" (l->a.counter), "=qm" (c)
9250 : : "memory");
9251 return c != 0;
9252@@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
9253 {
9254 unsigned char c;
9255
9256- asm volatile(_ASM_INC "%0; sete %1"
9257+ asm volatile(_ASM_INC "%0\n"
9258+
9259+#ifdef CONFIG_PAX_REFCOUNT
9260+ "jno 0f\n"
9261+ _ASM_DEC "%0\n"
9262+ "int $4\n0:\n"
9263+ _ASM_EXTABLE(0b, 0b)
9264+#endif
9265+
9266+ "sete %1\n"
9267 : "+m" (l->a.counter), "=qm" (c)
9268 : : "memory");
9269 return c != 0;
9270@@ -110,7 +169,16 @@ static inline int local_add_negative(lon
9271 {
9272 unsigned char c;
9273
9274- asm volatile(_ASM_ADD "%2,%0; sets %1"
9275+ asm volatile(_ASM_ADD "%2,%0\n"
9276+
9277+#ifdef CONFIG_PAX_REFCOUNT
9278+ "jno 0f\n"
9279+ _ASM_SUB "%2,%0\n"
9280+ "int $4\n0:\n"
9281+ _ASM_EXTABLE(0b, 0b)
9282+#endif
9283+
9284+ "sets %1\n"
9285 : "+m" (l->a.counter), "=qm" (c)
9286 : "ir" (i) : "memory");
9287 return c;
9288@@ -133,7 +201,15 @@ static inline long local_add_return(long
9289 #endif
9290 /* Modern 486+ processor */
9291 __i = i;
9292- asm volatile(_ASM_XADD "%0, %1;"
9293+ asm volatile(_ASM_XADD "%0, %1\n"
9294+
9295+#ifdef CONFIG_PAX_REFCOUNT
9296+ "jno 0f\n"
9297+ _ASM_MOV "%0,%1\n"
9298+ "int $4\n0:\n"
9299+ _ASM_EXTABLE(0b, 0b)
9300+#endif
9301+
9302 : "+r" (i), "+m" (l->a.counter)
9303 : : "memory");
9304 return i + __i;
9305diff -urNp linux-2.6.32.48/arch/x86/include/asm/microcode.h linux-2.6.32.48/arch/x86/include/asm/microcode.h
9306--- linux-2.6.32.48/arch/x86/include/asm/microcode.h 2011-11-08 19:02:43.000000000 -0500
9307+++ linux-2.6.32.48/arch/x86/include/asm/microcode.h 2011-11-15 19:59:42.000000000 -0500
9308@@ -12,13 +12,13 @@ struct device;
9309 enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
9310
9311 struct microcode_ops {
9312- enum ucode_state (*request_microcode_user) (int cpu,
9313+ enum ucode_state (* const request_microcode_user) (int cpu,
9314 const void __user *buf, size_t size);
9315
9316- enum ucode_state (*request_microcode_fw) (int cpu,
9317+ enum ucode_state (* const request_microcode_fw) (int cpu,
9318 struct device *device);
9319
9320- void (*microcode_fini_cpu) (int cpu);
9321+ void (* const microcode_fini_cpu) (int cpu);
9322
9323 /*
9324 * The generic 'microcode_core' part guarantees that
9325@@ -38,18 +38,18 @@ struct ucode_cpu_info {
9326 extern struct ucode_cpu_info ucode_cpu_info[];
9327
9328 #ifdef CONFIG_MICROCODE_INTEL
9329-extern struct microcode_ops * __init init_intel_microcode(void);
9330+extern const struct microcode_ops * __init init_intel_microcode(void);
9331 #else
9332-static inline struct microcode_ops * __init init_intel_microcode(void)
9333+static inline const struct microcode_ops * __init init_intel_microcode(void)
9334 {
9335 return NULL;
9336 }
9337 #endif /* CONFIG_MICROCODE_INTEL */
9338
9339 #ifdef CONFIG_MICROCODE_AMD
9340-extern struct microcode_ops * __init init_amd_microcode(void);
9341+extern const struct microcode_ops * __init init_amd_microcode(void);
9342 #else
9343-static inline struct microcode_ops * __init init_amd_microcode(void)
9344+static inline const struct microcode_ops * __init init_amd_microcode(void)
9345 {
9346 return NULL;
9347 }
9348diff -urNp linux-2.6.32.48/arch/x86/include/asm/mman.h linux-2.6.32.48/arch/x86/include/asm/mman.h
9349--- linux-2.6.32.48/arch/x86/include/asm/mman.h 2011-11-08 19:02:43.000000000 -0500
9350+++ linux-2.6.32.48/arch/x86/include/asm/mman.h 2011-11-15 19:59:42.000000000 -0500
9351@@ -5,4 +5,14 @@
9352
9353 #include <asm-generic/mman.h>
9354
9355+#ifdef __KERNEL__
9356+#ifndef __ASSEMBLY__
9357+#ifdef CONFIG_X86_32
9358+#define arch_mmap_check i386_mmap_check
9359+int i386_mmap_check(unsigned long addr, unsigned long len,
9360+ unsigned long flags);
9361+#endif
9362+#endif
9363+#endif
9364+
9365 #endif /* _ASM_X86_MMAN_H */
9366diff -urNp linux-2.6.32.48/arch/x86/include/asm/mmu_context.h linux-2.6.32.48/arch/x86/include/asm/mmu_context.h
9367--- linux-2.6.32.48/arch/x86/include/asm/mmu_context.h 2011-11-08 19:02:43.000000000 -0500
9368+++ linux-2.6.32.48/arch/x86/include/asm/mmu_context.h 2011-11-15 19:59:42.000000000 -0500
9369@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *m
9370
9371 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
9372 {
9373+
9374+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9375+ unsigned int i;
9376+ pgd_t *pgd;
9377+
9378+ pax_open_kernel();
9379+ pgd = get_cpu_pgd(smp_processor_id());
9380+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
9381+ set_pgd_batched(pgd+i, native_make_pgd(0));
9382+ pax_close_kernel();
9383+#endif
9384+
9385 #ifdef CONFIG_SMP
9386 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
9387 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
9388@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_s
9389 struct task_struct *tsk)
9390 {
9391 unsigned cpu = smp_processor_id();
9392+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) && defined(CONFIG_SMP)
9393+ int tlbstate = TLBSTATE_OK;
9394+#endif
9395
9396 if (likely(prev != next)) {
9397 #ifdef CONFIG_SMP
9398+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9399+ tlbstate = percpu_read(cpu_tlbstate.state);
9400+#endif
9401 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
9402 percpu_write(cpu_tlbstate.active_mm, next);
9403 #endif
9404 cpumask_set_cpu(cpu, mm_cpumask(next));
9405
9406 /* Re-load page tables */
9407+#ifdef CONFIG_PAX_PER_CPU_PGD
9408+ pax_open_kernel();
9409+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
9410+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
9411+ pax_close_kernel();
9412+ load_cr3(get_cpu_pgd(cpu));
9413+#else
9414 load_cr3(next->pgd);
9415+#endif
9416
9417 /* stop flush ipis for the previous mm */
9418 cpumask_clear_cpu(cpu, mm_cpumask(prev));
9419@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_s
9420 */
9421 if (unlikely(prev->context.ldt != next->context.ldt))
9422 load_LDT_nolock(&next->context);
9423- }
9424+
9425+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
9426+ if (!nx_enabled) {
9427+ smp_mb__before_clear_bit();
9428+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
9429+ smp_mb__after_clear_bit();
9430+ cpu_set(cpu, next->context.cpu_user_cs_mask);
9431+ }
9432+#endif
9433+
9434+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9435+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
9436+ prev->context.user_cs_limit != next->context.user_cs_limit))
9437+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9438 #ifdef CONFIG_SMP
9439+ else if (unlikely(tlbstate != TLBSTATE_OK))
9440+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9441+#endif
9442+#endif
9443+
9444+ }
9445 else {
9446+
9447+#ifdef CONFIG_PAX_PER_CPU_PGD
9448+ pax_open_kernel();
9449+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
9450+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
9451+ pax_close_kernel();
9452+ load_cr3(get_cpu_pgd(cpu));
9453+#endif
9454+
9455+#ifdef CONFIG_SMP
9456 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
9457 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
9458
9459@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_s
9460 * tlb flush IPI delivery. We must reload CR3
9461 * to make sure to use no freed page tables.
9462 */
9463+
9464+#ifndef CONFIG_PAX_PER_CPU_PGD
9465 load_cr3(next->pgd);
9466+#endif
9467+
9468 load_LDT_nolock(&next->context);
9469+
9470+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
9471+ if (!nx_enabled)
9472+ cpu_set(cpu, next->context.cpu_user_cs_mask);
9473+#endif
9474+
9475+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9476+#ifdef CONFIG_PAX_PAGEEXEC
9477+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && nx_enabled))
9478+#endif
9479+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9480+#endif
9481+
9482 }
9483- }
9484 #endif
9485+ }
9486 }
9487
9488 #define activate_mm(prev, next) \
9489diff -urNp linux-2.6.32.48/arch/x86/include/asm/mmu.h linux-2.6.32.48/arch/x86/include/asm/mmu.h
9490--- linux-2.6.32.48/arch/x86/include/asm/mmu.h 2011-11-08 19:02:43.000000000 -0500
9491+++ linux-2.6.32.48/arch/x86/include/asm/mmu.h 2011-11-15 19:59:42.000000000 -0500
9492@@ -9,10 +9,23 @@
9493 * we put the segment information here.
9494 */
9495 typedef struct {
9496- void *ldt;
9497+ struct desc_struct *ldt;
9498 int size;
9499 struct mutex lock;
9500- void *vdso;
9501+ unsigned long vdso;
9502+
9503+#ifdef CONFIG_X86_32
9504+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
9505+ unsigned long user_cs_base;
9506+ unsigned long user_cs_limit;
9507+
9508+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
9509+ cpumask_t cpu_user_cs_mask;
9510+#endif
9511+
9512+#endif
9513+#endif
9514+
9515 } mm_context_t;
9516
9517 #ifdef CONFIG_SMP
9518diff -urNp linux-2.6.32.48/arch/x86/include/asm/module.h linux-2.6.32.48/arch/x86/include/asm/module.h
9519--- linux-2.6.32.48/arch/x86/include/asm/module.h 2011-11-08 19:02:43.000000000 -0500
9520+++ linux-2.6.32.48/arch/x86/include/asm/module.h 2011-11-15 19:59:42.000000000 -0500
9521@@ -5,6 +5,7 @@
9522
9523 #ifdef CONFIG_X86_64
9524 /* X86_64 does not define MODULE_PROC_FAMILY */
9525+#define MODULE_PROC_FAMILY ""
9526 #elif defined CONFIG_M386
9527 #define MODULE_PROC_FAMILY "386 "
9528 #elif defined CONFIG_M486
9529@@ -59,13 +60,24 @@
9530 #error unknown processor family
9531 #endif
9532
9533-#ifdef CONFIG_X86_32
9534-# ifdef CONFIG_4KSTACKS
9535-# define MODULE_STACKSIZE "4KSTACKS "
9536-# else
9537-# define MODULE_STACKSIZE ""
9538-# endif
9539-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
9540+#if defined(CONFIG_X86_32) && defined(CONFIG_4KSTACKS)
9541+#define MODULE_STACKSIZE "4KSTACKS "
9542+#else
9543+#define MODULE_STACKSIZE ""
9544+#endif
9545+
9546+#ifdef CONFIG_PAX_KERNEXEC
9547+#define MODULE_PAX_KERNEXEC "KERNEXEC "
9548+#else
9549+#define MODULE_PAX_KERNEXEC ""
9550 #endif
9551
9552+#ifdef CONFIG_PAX_MEMORY_UDEREF
9553+#define MODULE_PAX_UDEREF "UDEREF "
9554+#else
9555+#define MODULE_PAX_UDEREF ""
9556+#endif
9557+
9558+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
9559+
9560 #endif /* _ASM_X86_MODULE_H */
9561diff -urNp linux-2.6.32.48/arch/x86/include/asm/page_64_types.h linux-2.6.32.48/arch/x86/include/asm/page_64_types.h
9562--- linux-2.6.32.48/arch/x86/include/asm/page_64_types.h 2011-11-08 19:02:43.000000000 -0500
9563+++ linux-2.6.32.48/arch/x86/include/asm/page_64_types.h 2011-11-15 19:59:42.000000000 -0500
9564@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
9565
9566 /* duplicated to the one in bootmem.h */
9567 extern unsigned long max_pfn;
9568-extern unsigned long phys_base;
9569+extern const unsigned long phys_base;
9570
9571 extern unsigned long __phys_addr(unsigned long);
9572 #define __phys_reloc_hide(x) (x)
9573diff -urNp linux-2.6.32.48/arch/x86/include/asm/paravirt.h linux-2.6.32.48/arch/x86/include/asm/paravirt.h
9574--- linux-2.6.32.48/arch/x86/include/asm/paravirt.h 2011-11-08 19:02:43.000000000 -0500
9575+++ linux-2.6.32.48/arch/x86/include/asm/paravirt.h 2011-11-15 19:59:42.000000000 -0500
9576@@ -648,6 +648,18 @@ static inline void set_pgd(pgd_t *pgdp,
9577 val);
9578 }
9579
9580+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
9581+{
9582+ pgdval_t val = native_pgd_val(pgd);
9583+
9584+ if (sizeof(pgdval_t) > sizeof(long))
9585+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
9586+ val, (u64)val >> 32);
9587+ else
9588+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
9589+ val);
9590+}
9591+
9592 static inline void pgd_clear(pgd_t *pgdp)
9593 {
9594 set_pgd(pgdp, __pgd(0));
9595@@ -729,6 +741,21 @@ static inline void __set_fixmap(unsigned
9596 pv_mmu_ops.set_fixmap(idx, phys, flags);
9597 }
9598
9599+#ifdef CONFIG_PAX_KERNEXEC
9600+static inline unsigned long pax_open_kernel(void)
9601+{
9602+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
9603+}
9604+
9605+static inline unsigned long pax_close_kernel(void)
9606+{
9607+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
9608+}
9609+#else
9610+static inline unsigned long pax_open_kernel(void) { return 0; }
9611+static inline unsigned long pax_close_kernel(void) { return 0; }
9612+#endif
9613+
9614 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
9615
9616 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
9617@@ -945,7 +972,7 @@ extern void default_banner(void);
9618
9619 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
9620 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
9621-#define PARA_INDIRECT(addr) *%cs:addr
9622+#define PARA_INDIRECT(addr) *%ss:addr
9623 #endif
9624
9625 #define INTERRUPT_RETURN \
9626@@ -1022,6 +1049,21 @@ extern void default_banner(void);
9627 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
9628 CLBR_NONE, \
9629 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
9630+
9631+#define GET_CR0_INTO_RDI \
9632+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
9633+ mov %rax,%rdi
9634+
9635+#define SET_RDI_INTO_CR0 \
9636+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
9637+
9638+#define GET_CR3_INTO_RDI \
9639+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
9640+ mov %rax,%rdi
9641+
9642+#define SET_RDI_INTO_CR3 \
9643+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
9644+
9645 #endif /* CONFIG_X86_32 */
9646
9647 #endif /* __ASSEMBLY__ */
9648diff -urNp linux-2.6.32.48/arch/x86/include/asm/paravirt_types.h linux-2.6.32.48/arch/x86/include/asm/paravirt_types.h
9649--- linux-2.6.32.48/arch/x86/include/asm/paravirt_types.h 2011-11-08 19:02:43.000000000 -0500
9650+++ linux-2.6.32.48/arch/x86/include/asm/paravirt_types.h 2011-11-15 19:59:42.000000000 -0500
9651@@ -78,19 +78,19 @@ struct pv_init_ops {
9652 */
9653 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
9654 unsigned long addr, unsigned len);
9655-};
9656+} __no_const;
9657
9658
9659 struct pv_lazy_ops {
9660 /* Set deferred update mode, used for batching operations. */
9661 void (*enter)(void);
9662 void (*leave)(void);
9663-};
9664+} __no_const;
9665
9666 struct pv_time_ops {
9667 unsigned long long (*sched_clock)(void);
9668 unsigned long (*get_tsc_khz)(void);
9669-};
9670+} __no_const;
9671
9672 struct pv_cpu_ops {
9673 /* hooks for various privileged instructions */
9674@@ -186,7 +186,7 @@ struct pv_cpu_ops {
9675
9676 void (*start_context_switch)(struct task_struct *prev);
9677 void (*end_context_switch)(struct task_struct *next);
9678-};
9679+} __no_const;
9680
9681 struct pv_irq_ops {
9682 /*
9683@@ -217,7 +217,7 @@ struct pv_apic_ops {
9684 unsigned long start_eip,
9685 unsigned long start_esp);
9686 #endif
9687-};
9688+} __no_const;
9689
9690 struct pv_mmu_ops {
9691 unsigned long (*read_cr2)(void);
9692@@ -301,6 +301,7 @@ struct pv_mmu_ops {
9693 struct paravirt_callee_save make_pud;
9694
9695 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
9696+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
9697 #endif /* PAGETABLE_LEVELS == 4 */
9698 #endif /* PAGETABLE_LEVELS >= 3 */
9699
9700@@ -316,6 +317,12 @@ struct pv_mmu_ops {
9701 an mfn. We can tell which is which from the index. */
9702 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
9703 phys_addr_t phys, pgprot_t flags);
9704+
9705+#ifdef CONFIG_PAX_KERNEXEC
9706+ unsigned long (*pax_open_kernel)(void);
9707+ unsigned long (*pax_close_kernel)(void);
9708+#endif
9709+
9710 };
9711
9712 struct raw_spinlock;
9713@@ -326,7 +333,7 @@ struct pv_lock_ops {
9714 void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
9715 int (*spin_trylock)(struct raw_spinlock *lock);
9716 void (*spin_unlock)(struct raw_spinlock *lock);
9717-};
9718+} __no_const;
9719
9720 /* This contains all the paravirt structures: we get a convenient
9721 * number for each function using the offset which we use to indicate
9722diff -urNp linux-2.6.32.48/arch/x86/include/asm/pci_x86.h linux-2.6.32.48/arch/x86/include/asm/pci_x86.h
9723--- linux-2.6.32.48/arch/x86/include/asm/pci_x86.h 2011-11-08 19:02:43.000000000 -0500
9724+++ linux-2.6.32.48/arch/x86/include/asm/pci_x86.h 2011-11-15 19:59:42.000000000 -0500
9725@@ -89,16 +89,16 @@ extern int (*pcibios_enable_irq)(struct
9726 extern void (*pcibios_disable_irq)(struct pci_dev *dev);
9727
9728 struct pci_raw_ops {
9729- int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
9730+ int (* const read)(unsigned int domain, unsigned int bus, unsigned int devfn,
9731 int reg, int len, u32 *val);
9732- int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
9733+ int (* const write)(unsigned int domain, unsigned int bus, unsigned int devfn,
9734 int reg, int len, u32 val);
9735 };
9736
9737-extern struct pci_raw_ops *raw_pci_ops;
9738-extern struct pci_raw_ops *raw_pci_ext_ops;
9739+extern const struct pci_raw_ops *raw_pci_ops;
9740+extern const struct pci_raw_ops *raw_pci_ext_ops;
9741
9742-extern struct pci_raw_ops pci_direct_conf1;
9743+extern const struct pci_raw_ops pci_direct_conf1;
9744 extern bool port_cf9_safe;
9745
9746 /* arch_initcall level */
9747diff -urNp linux-2.6.32.48/arch/x86/include/asm/percpu.h linux-2.6.32.48/arch/x86/include/asm/percpu.h
9748--- linux-2.6.32.48/arch/x86/include/asm/percpu.h 2011-11-08 19:02:43.000000000 -0500
9749+++ linux-2.6.32.48/arch/x86/include/asm/percpu.h 2011-11-15 19:59:42.000000000 -0500
9750@@ -78,6 +78,7 @@ do { \
9751 if (0) { \
9752 T__ tmp__; \
9753 tmp__ = (val); \
9754+ (void)tmp__; \
9755 } \
9756 switch (sizeof(var)) { \
9757 case 1: \
9758diff -urNp linux-2.6.32.48/arch/x86/include/asm/pgalloc.h linux-2.6.32.48/arch/x86/include/asm/pgalloc.h
9759--- linux-2.6.32.48/arch/x86/include/asm/pgalloc.h 2011-11-08 19:02:43.000000000 -0500
9760+++ linux-2.6.32.48/arch/x86/include/asm/pgalloc.h 2011-11-15 19:59:42.000000000 -0500
9761@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
9762 pmd_t *pmd, pte_t *pte)
9763 {
9764 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9765+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
9766+}
9767+
9768+static inline void pmd_populate_user(struct mm_struct *mm,
9769+ pmd_t *pmd, pte_t *pte)
9770+{
9771+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9772 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
9773 }
9774
9775diff -urNp linux-2.6.32.48/arch/x86/include/asm/pgtable-2level.h linux-2.6.32.48/arch/x86/include/asm/pgtable-2level.h
9776--- linux-2.6.32.48/arch/x86/include/asm/pgtable-2level.h 2011-11-08 19:02:43.000000000 -0500
9777+++ linux-2.6.32.48/arch/x86/include/asm/pgtable-2level.h 2011-11-15 19:59:42.000000000 -0500
9778@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
9779
9780 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9781 {
9782+ pax_open_kernel();
9783 *pmdp = pmd;
9784+ pax_close_kernel();
9785 }
9786
9787 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9788diff -urNp linux-2.6.32.48/arch/x86/include/asm/pgtable_32.h linux-2.6.32.48/arch/x86/include/asm/pgtable_32.h
9789--- linux-2.6.32.48/arch/x86/include/asm/pgtable_32.h 2011-11-08 19:02:43.000000000 -0500
9790+++ linux-2.6.32.48/arch/x86/include/asm/pgtable_32.h 2011-11-15 19:59:42.000000000 -0500
9791@@ -26,9 +26,6 @@
9792 struct mm_struct;
9793 struct vm_area_struct;
9794
9795-extern pgd_t swapper_pg_dir[1024];
9796-extern pgd_t trampoline_pg_dir[1024];
9797-
9798 static inline void pgtable_cache_init(void) { }
9799 static inline void check_pgt_cache(void) { }
9800 void paging_init(void);
9801@@ -49,6 +46,12 @@ extern void set_pmd_pfn(unsigned long, u
9802 # include <asm/pgtable-2level.h>
9803 #endif
9804
9805+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
9806+extern pgd_t trampoline_pg_dir[PTRS_PER_PGD];
9807+#ifdef CONFIG_X86_PAE
9808+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
9809+#endif
9810+
9811 #if defined(CONFIG_HIGHPTE)
9812 #define __KM_PTE \
9813 (in_nmi() ? KM_NMI_PTE : \
9814@@ -73,7 +76,9 @@ extern void set_pmd_pfn(unsigned long, u
9815 /* Clear a kernel PTE and flush it from the TLB */
9816 #define kpte_clear_flush(ptep, vaddr) \
9817 do { \
9818+ pax_open_kernel(); \
9819 pte_clear(&init_mm, (vaddr), (ptep)); \
9820+ pax_close_kernel(); \
9821 __flush_tlb_one((vaddr)); \
9822 } while (0)
9823
9824@@ -85,6 +90,9 @@ do { \
9825
9826 #endif /* !__ASSEMBLY__ */
9827
9828+#define HAVE_ARCH_UNMAPPED_AREA
9829+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
9830+
9831 /*
9832 * kern_addr_valid() is (1) for FLATMEM and (0) for
9833 * SPARSEMEM and DISCONTIGMEM
9834diff -urNp linux-2.6.32.48/arch/x86/include/asm/pgtable_32_types.h linux-2.6.32.48/arch/x86/include/asm/pgtable_32_types.h
9835--- linux-2.6.32.48/arch/x86/include/asm/pgtable_32_types.h 2011-11-08 19:02:43.000000000 -0500
9836+++ linux-2.6.32.48/arch/x86/include/asm/pgtable_32_types.h 2011-11-15 19:59:42.000000000 -0500
9837@@ -8,7 +8,7 @@
9838 */
9839 #ifdef CONFIG_X86_PAE
9840 # include <asm/pgtable-3level_types.h>
9841-# define PMD_SIZE (1UL << PMD_SHIFT)
9842+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
9843 # define PMD_MASK (~(PMD_SIZE - 1))
9844 #else
9845 # include <asm/pgtable-2level_types.h>
9846@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
9847 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
9848 #endif
9849
9850+#ifdef CONFIG_PAX_KERNEXEC
9851+#ifndef __ASSEMBLY__
9852+extern unsigned char MODULES_EXEC_VADDR[];
9853+extern unsigned char MODULES_EXEC_END[];
9854+#endif
9855+#include <asm/boot.h>
9856+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
9857+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
9858+#else
9859+#define ktla_ktva(addr) (addr)
9860+#define ktva_ktla(addr) (addr)
9861+#endif
9862+
9863 #define MODULES_VADDR VMALLOC_START
9864 #define MODULES_END VMALLOC_END
9865 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
9866diff -urNp linux-2.6.32.48/arch/x86/include/asm/pgtable-3level.h linux-2.6.32.48/arch/x86/include/asm/pgtable-3level.h
9867--- linux-2.6.32.48/arch/x86/include/asm/pgtable-3level.h 2011-11-08 19:02:43.000000000 -0500
9868+++ linux-2.6.32.48/arch/x86/include/asm/pgtable-3level.h 2011-11-15 19:59:42.000000000 -0500
9869@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
9870
9871 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9872 {
9873+ pax_open_kernel();
9874 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
9875+ pax_close_kernel();
9876 }
9877
9878 static inline void native_set_pud(pud_t *pudp, pud_t pud)
9879 {
9880+ pax_open_kernel();
9881 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
9882+ pax_close_kernel();
9883 }
9884
9885 /*
9886diff -urNp linux-2.6.32.48/arch/x86/include/asm/pgtable_64.h linux-2.6.32.48/arch/x86/include/asm/pgtable_64.h
9887--- linux-2.6.32.48/arch/x86/include/asm/pgtable_64.h 2011-11-08 19:02:43.000000000 -0500
9888+++ linux-2.6.32.48/arch/x86/include/asm/pgtable_64.h 2011-11-15 19:59:42.000000000 -0500
9889@@ -16,10 +16,13 @@
9890
9891 extern pud_t level3_kernel_pgt[512];
9892 extern pud_t level3_ident_pgt[512];
9893+extern pud_t level3_vmalloc_pgt[512];
9894+extern pud_t level3_vmemmap_pgt[512];
9895+extern pud_t level2_vmemmap_pgt[512];
9896 extern pmd_t level2_kernel_pgt[512];
9897 extern pmd_t level2_fixmap_pgt[512];
9898-extern pmd_t level2_ident_pgt[512];
9899-extern pgd_t init_level4_pgt[];
9900+extern pmd_t level2_ident_pgt[512*2];
9901+extern pgd_t init_level4_pgt[512];
9902
9903 #define swapper_pg_dir init_level4_pgt
9904
9905@@ -74,7 +77,9 @@ static inline pte_t native_ptep_get_and_
9906
9907 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9908 {
9909+ pax_open_kernel();
9910 *pmdp = pmd;
9911+ pax_close_kernel();
9912 }
9913
9914 static inline void native_pmd_clear(pmd_t *pmd)
9915@@ -94,6 +99,13 @@ static inline void native_pud_clear(pud_
9916
9917 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
9918 {
9919+ pax_open_kernel();
9920+ *pgdp = pgd;
9921+ pax_close_kernel();
9922+}
9923+
9924+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
9925+{
9926 *pgdp = pgd;
9927 }
9928
9929diff -urNp linux-2.6.32.48/arch/x86/include/asm/pgtable_64_types.h linux-2.6.32.48/arch/x86/include/asm/pgtable_64_types.h
9930--- linux-2.6.32.48/arch/x86/include/asm/pgtable_64_types.h 2011-11-08 19:02:43.000000000 -0500
9931+++ linux-2.6.32.48/arch/x86/include/asm/pgtable_64_types.h 2011-11-15 19:59:42.000000000 -0500
9932@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
9933 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
9934 #define MODULES_END _AC(0xffffffffff000000, UL)
9935 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
9936+#define MODULES_EXEC_VADDR MODULES_VADDR
9937+#define MODULES_EXEC_END MODULES_END
9938+
9939+#define ktla_ktva(addr) (addr)
9940+#define ktva_ktla(addr) (addr)
9941
9942 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
9943diff -urNp linux-2.6.32.48/arch/x86/include/asm/pgtable.h linux-2.6.32.48/arch/x86/include/asm/pgtable.h
9944--- linux-2.6.32.48/arch/x86/include/asm/pgtable.h 2011-11-08 19:02:43.000000000 -0500
9945+++ linux-2.6.32.48/arch/x86/include/asm/pgtable.h 2011-11-15 19:59:42.000000000 -0500
9946@@ -39,6 +39,7 @@ extern struct list_head pgd_list;
9947
9948 #ifndef __PAGETABLE_PUD_FOLDED
9949 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
9950+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
9951 #define pgd_clear(pgd) native_pgd_clear(pgd)
9952 #endif
9953
9954@@ -74,12 +75,51 @@ extern struct list_head pgd_list;
9955
9956 #define arch_end_context_switch(prev) do {} while(0)
9957
9958+#define pax_open_kernel() native_pax_open_kernel()
9959+#define pax_close_kernel() native_pax_close_kernel()
9960 #endif /* CONFIG_PARAVIRT */
9961
9962+#define __HAVE_ARCH_PAX_OPEN_KERNEL
9963+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
9964+
9965+#ifdef CONFIG_PAX_KERNEXEC
9966+static inline unsigned long native_pax_open_kernel(void)
9967+{
9968+ unsigned long cr0;
9969+
9970+ preempt_disable();
9971+ barrier();
9972+ cr0 = read_cr0() ^ X86_CR0_WP;
9973+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
9974+ write_cr0(cr0);
9975+ return cr0 ^ X86_CR0_WP;
9976+}
9977+
9978+static inline unsigned long native_pax_close_kernel(void)
9979+{
9980+ unsigned long cr0;
9981+
9982+ cr0 = read_cr0() ^ X86_CR0_WP;
9983+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
9984+ write_cr0(cr0);
9985+ barrier();
9986+ preempt_enable_no_resched();
9987+ return cr0 ^ X86_CR0_WP;
9988+}
9989+#else
9990+static inline unsigned long native_pax_open_kernel(void) { return 0; }
9991+static inline unsigned long native_pax_close_kernel(void) { return 0; }
9992+#endif
9993+
9994 /*
9995 * The following only work if pte_present() is true.
9996 * Undefined behaviour if not..
9997 */
9998+static inline int pte_user(pte_t pte)
9999+{
10000+ return pte_val(pte) & _PAGE_USER;
10001+}
10002+
10003 static inline int pte_dirty(pte_t pte)
10004 {
10005 return pte_flags(pte) & _PAGE_DIRTY;
10006@@ -167,9 +207,29 @@ static inline pte_t pte_wrprotect(pte_t
10007 return pte_clear_flags(pte, _PAGE_RW);
10008 }
10009
10010+static inline pte_t pte_mkread(pte_t pte)
10011+{
10012+ return __pte(pte_val(pte) | _PAGE_USER);
10013+}
10014+
10015 static inline pte_t pte_mkexec(pte_t pte)
10016 {
10017- return pte_clear_flags(pte, _PAGE_NX);
10018+#ifdef CONFIG_X86_PAE
10019+ if (__supported_pte_mask & _PAGE_NX)
10020+ return pte_clear_flags(pte, _PAGE_NX);
10021+ else
10022+#endif
10023+ return pte_set_flags(pte, _PAGE_USER);
10024+}
10025+
10026+static inline pte_t pte_exprotect(pte_t pte)
10027+{
10028+#ifdef CONFIG_X86_PAE
10029+ if (__supported_pte_mask & _PAGE_NX)
10030+ return pte_set_flags(pte, _PAGE_NX);
10031+ else
10032+#endif
10033+ return pte_clear_flags(pte, _PAGE_USER);
10034 }
10035
10036 static inline pte_t pte_mkdirty(pte_t pte)
10037@@ -302,6 +362,15 @@ pte_t *populate_extra_pte(unsigned long
10038 #endif
10039
10040 #ifndef __ASSEMBLY__
10041+
10042+#ifdef CONFIG_PAX_PER_CPU_PGD
10043+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
10044+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
10045+{
10046+ return cpu_pgd[cpu];
10047+}
10048+#endif
10049+
10050 #include <linux/mm_types.h>
10051
10052 static inline int pte_none(pte_t pte)
10053@@ -472,7 +541,7 @@ static inline pud_t *pud_offset(pgd_t *p
10054
10055 static inline int pgd_bad(pgd_t pgd)
10056 {
10057- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
10058+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
10059 }
10060
10061 static inline int pgd_none(pgd_t pgd)
10062@@ -495,7 +564,12 @@ static inline int pgd_none(pgd_t pgd)
10063 * pgd_offset() returns a (pgd_t *)
10064 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
10065 */
10066-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
10067+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
10068+
10069+#ifdef CONFIG_PAX_PER_CPU_PGD
10070+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
10071+#endif
10072+
10073 /*
10074 * a shortcut which implies the use of the kernel's pgd, instead
10075 * of a process's
10076@@ -506,6 +580,20 @@ static inline int pgd_none(pgd_t pgd)
10077 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
10078 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
10079
10080+#ifdef CONFIG_X86_32
10081+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
10082+#else
10083+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
10084+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
10085+
10086+#ifdef CONFIG_PAX_MEMORY_UDEREF
10087+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
10088+#else
10089+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
10090+#endif
10091+
10092+#endif
10093+
10094 #ifndef __ASSEMBLY__
10095
10096 extern int direct_gbpages;
10097@@ -611,11 +699,23 @@ static inline void ptep_set_wrprotect(st
10098 * dst and src can be on the same page, but the range must not overlap,
10099 * and must not cross a page boundary.
10100 */
10101-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
10102+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
10103 {
10104- memcpy(dst, src, count * sizeof(pgd_t));
10105+ pax_open_kernel();
10106+ while (count--)
10107+ *dst++ = *src++;
10108+ pax_close_kernel();
10109 }
10110
10111+#ifdef CONFIG_PAX_PER_CPU_PGD
10112+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
10113+#endif
10114+
10115+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10116+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
10117+#else
10118+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
10119+#endif
10120
10121 #include <asm-generic/pgtable.h>
10122 #endif /* __ASSEMBLY__ */
10123diff -urNp linux-2.6.32.48/arch/x86/include/asm/pgtable_types.h linux-2.6.32.48/arch/x86/include/asm/pgtable_types.h
10124--- linux-2.6.32.48/arch/x86/include/asm/pgtable_types.h 2011-11-08 19:02:43.000000000 -0500
10125+++ linux-2.6.32.48/arch/x86/include/asm/pgtable_types.h 2011-11-15 19:59:42.000000000 -0500
10126@@ -16,12 +16,11 @@
10127 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
10128 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
10129 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
10130-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
10131+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
10132 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
10133 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
10134 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
10135-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
10136-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
10137+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
10138 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
10139
10140 /* If _PAGE_BIT_PRESENT is clear, we use these: */
10141@@ -39,7 +38,6 @@
10142 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
10143 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
10144 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
10145-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
10146 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
10147 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
10148 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
10149@@ -55,8 +53,10 @@
10150
10151 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
10152 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
10153-#else
10154+#elif defined(CONFIG_KMEMCHECK)
10155 #define _PAGE_NX (_AT(pteval_t, 0))
10156+#else
10157+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
10158 #endif
10159
10160 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
10161@@ -93,6 +93,9 @@
10162 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
10163 _PAGE_ACCESSED)
10164
10165+#define PAGE_READONLY_NOEXEC PAGE_READONLY
10166+#define PAGE_SHARED_NOEXEC PAGE_SHARED
10167+
10168 #define __PAGE_KERNEL_EXEC \
10169 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
10170 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
10171@@ -103,8 +106,8 @@
10172 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
10173 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
10174 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
10175-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
10176-#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
10177+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
10178+#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
10179 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
10180 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
10181 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
10182@@ -163,8 +166,8 @@
10183 * bits are combined, this will alow user to access the high address mapped
10184 * VDSO in the presence of CONFIG_COMPAT_VDSO
10185 */
10186-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
10187-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
10188+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
10189+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
10190 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
10191 #endif
10192
10193@@ -202,7 +205,17 @@ static inline pgdval_t pgd_flags(pgd_t p
10194 {
10195 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
10196 }
10197+#endif
10198
10199+#if PAGETABLE_LEVELS == 3
10200+#include <asm-generic/pgtable-nopud.h>
10201+#endif
10202+
10203+#if PAGETABLE_LEVELS == 2
10204+#include <asm-generic/pgtable-nopmd.h>
10205+#endif
10206+
10207+#ifndef __ASSEMBLY__
10208 #if PAGETABLE_LEVELS > 3
10209 typedef struct { pudval_t pud; } pud_t;
10210
10211@@ -216,8 +229,6 @@ static inline pudval_t native_pud_val(pu
10212 return pud.pud;
10213 }
10214 #else
10215-#include <asm-generic/pgtable-nopud.h>
10216-
10217 static inline pudval_t native_pud_val(pud_t pud)
10218 {
10219 return native_pgd_val(pud.pgd);
10220@@ -237,8 +248,6 @@ static inline pmdval_t native_pmd_val(pm
10221 return pmd.pmd;
10222 }
10223 #else
10224-#include <asm-generic/pgtable-nopmd.h>
10225-
10226 static inline pmdval_t native_pmd_val(pmd_t pmd)
10227 {
10228 return native_pgd_val(pmd.pud.pgd);
10229@@ -278,7 +287,16 @@ typedef struct page *pgtable_t;
10230
10231 extern pteval_t __supported_pte_mask;
10232 extern void set_nx(void);
10233+
10234+#ifdef CONFIG_X86_32
10235+#ifdef CONFIG_X86_PAE
10236 extern int nx_enabled;
10237+#else
10238+#define nx_enabled (0)
10239+#endif
10240+#else
10241+#define nx_enabled (1)
10242+#endif
10243
10244 #define pgprot_writecombine pgprot_writecombine
10245 extern pgprot_t pgprot_writecombine(pgprot_t prot);
10246diff -urNp linux-2.6.32.48/arch/x86/include/asm/processor.h linux-2.6.32.48/arch/x86/include/asm/processor.h
10247--- linux-2.6.32.48/arch/x86/include/asm/processor.h 2011-11-08 19:02:43.000000000 -0500
10248+++ linux-2.6.32.48/arch/x86/include/asm/processor.h 2011-11-15 19:59:42.000000000 -0500
10249@@ -272,7 +272,7 @@ struct tss_struct {
10250
10251 } ____cacheline_aligned;
10252
10253-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
10254+extern struct tss_struct init_tss[NR_CPUS];
10255
10256 /*
10257 * Save the original ist values for checking stack pointers during debugging
10258@@ -911,11 +911,18 @@ static inline void spin_lock_prefetch(co
10259 */
10260 #define TASK_SIZE PAGE_OFFSET
10261 #define TASK_SIZE_MAX TASK_SIZE
10262+
10263+#ifdef CONFIG_PAX_SEGMEXEC
10264+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
10265+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
10266+#else
10267 #define STACK_TOP TASK_SIZE
10268-#define STACK_TOP_MAX STACK_TOP
10269+#endif
10270+
10271+#define STACK_TOP_MAX TASK_SIZE
10272
10273 #define INIT_THREAD { \
10274- .sp0 = sizeof(init_stack) + (long)&init_stack, \
10275+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
10276 .vm86_info = NULL, \
10277 .sysenter_cs = __KERNEL_CS, \
10278 .io_bitmap_ptr = NULL, \
10279@@ -929,7 +936,7 @@ static inline void spin_lock_prefetch(co
10280 */
10281 #define INIT_TSS { \
10282 .x86_tss = { \
10283- .sp0 = sizeof(init_stack) + (long)&init_stack, \
10284+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
10285 .ss0 = __KERNEL_DS, \
10286 .ss1 = __KERNEL_CS, \
10287 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
10288@@ -940,11 +947,7 @@ static inline void spin_lock_prefetch(co
10289 extern unsigned long thread_saved_pc(struct task_struct *tsk);
10290
10291 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
10292-#define KSTK_TOP(info) \
10293-({ \
10294- unsigned long *__ptr = (unsigned long *)(info); \
10295- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
10296-})
10297+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
10298
10299 /*
10300 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
10301@@ -959,7 +962,7 @@ extern unsigned long thread_saved_pc(str
10302 #define task_pt_regs(task) \
10303 ({ \
10304 struct pt_regs *__regs__; \
10305- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
10306+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
10307 __regs__ - 1; \
10308 })
10309
10310@@ -969,13 +972,13 @@ extern unsigned long thread_saved_pc(str
10311 /*
10312 * User space process size. 47bits minus one guard page.
10313 */
10314-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
10315+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
10316
10317 /* This decides where the kernel will search for a free chunk of vm
10318 * space during mmap's.
10319 */
10320 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
10321- 0xc0000000 : 0xFFFFe000)
10322+ 0xc0000000 : 0xFFFFf000)
10323
10324 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
10325 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
10326@@ -986,11 +989,11 @@ extern unsigned long thread_saved_pc(str
10327 #define STACK_TOP_MAX TASK_SIZE_MAX
10328
10329 #define INIT_THREAD { \
10330- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
10331+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
10332 }
10333
10334 #define INIT_TSS { \
10335- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
10336+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
10337 }
10338
10339 /*
10340@@ -1012,6 +1015,10 @@ extern void start_thread(struct pt_regs
10341 */
10342 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
10343
10344+#ifdef CONFIG_PAX_SEGMEXEC
10345+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
10346+#endif
10347+
10348 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
10349
10350 /* Get/set a process' ability to use the timestamp counter instruction */
10351diff -urNp linux-2.6.32.48/arch/x86/include/asm/ptrace.h linux-2.6.32.48/arch/x86/include/asm/ptrace.h
10352--- linux-2.6.32.48/arch/x86/include/asm/ptrace.h 2011-11-08 19:02:43.000000000 -0500
10353+++ linux-2.6.32.48/arch/x86/include/asm/ptrace.h 2011-11-15 19:59:42.000000000 -0500
10354@@ -151,28 +151,29 @@ static inline unsigned long regs_return_
10355 }
10356
10357 /*
10358- * user_mode_vm(regs) determines whether a register set came from user mode.
10359+ * user_mode(regs) determines whether a register set came from user mode.
10360 * This is true if V8086 mode was enabled OR if the register set was from
10361 * protected mode with RPL-3 CS value. This tricky test checks that with
10362 * one comparison. Many places in the kernel can bypass this full check
10363- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
10364+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
10365+ * be used.
10366 */
10367-static inline int user_mode(struct pt_regs *regs)
10368+static inline int user_mode_novm(struct pt_regs *regs)
10369 {
10370 #ifdef CONFIG_X86_32
10371 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
10372 #else
10373- return !!(regs->cs & 3);
10374+ return !!(regs->cs & SEGMENT_RPL_MASK);
10375 #endif
10376 }
10377
10378-static inline int user_mode_vm(struct pt_regs *regs)
10379+static inline int user_mode(struct pt_regs *regs)
10380 {
10381 #ifdef CONFIG_X86_32
10382 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
10383 USER_RPL;
10384 #else
10385- return user_mode(regs);
10386+ return user_mode_novm(regs);
10387 #endif
10388 }
10389
10390diff -urNp linux-2.6.32.48/arch/x86/include/asm/reboot.h linux-2.6.32.48/arch/x86/include/asm/reboot.h
10391--- linux-2.6.32.48/arch/x86/include/asm/reboot.h 2011-11-08 19:02:43.000000000 -0500
10392+++ linux-2.6.32.48/arch/x86/include/asm/reboot.h 2011-11-15 19:59:42.000000000 -0500
10393@@ -6,19 +6,19 @@
10394 struct pt_regs;
10395
10396 struct machine_ops {
10397- void (*restart)(char *cmd);
10398- void (*halt)(void);
10399- void (*power_off)(void);
10400+ void (* __noreturn restart)(char *cmd);
10401+ void (* __noreturn halt)(void);
10402+ void (* __noreturn power_off)(void);
10403 void (*shutdown)(void);
10404 void (*crash_shutdown)(struct pt_regs *);
10405- void (*emergency_restart)(void);
10406-};
10407+ void (* __noreturn emergency_restart)(void);
10408+} __no_const;
10409
10410 extern struct machine_ops machine_ops;
10411
10412 void native_machine_crash_shutdown(struct pt_regs *regs);
10413 void native_machine_shutdown(void);
10414-void machine_real_restart(const unsigned char *code, int length);
10415+void machine_real_restart(const unsigned char *code, unsigned int length) __noreturn;
10416
10417 typedef void (*nmi_shootdown_cb)(int, struct die_args*);
10418 void nmi_shootdown_cpus(nmi_shootdown_cb callback);
10419diff -urNp linux-2.6.32.48/arch/x86/include/asm/rwsem.h linux-2.6.32.48/arch/x86/include/asm/rwsem.h
10420--- linux-2.6.32.48/arch/x86/include/asm/rwsem.h 2011-11-08 19:02:43.000000000 -0500
10421+++ linux-2.6.32.48/arch/x86/include/asm/rwsem.h 2011-11-15 19:59:42.000000000 -0500
10422@@ -118,6 +118,14 @@ static inline void __down_read(struct rw
10423 {
10424 asm volatile("# beginning down_read\n\t"
10425 LOCK_PREFIX _ASM_INC "(%1)\n\t"
10426+
10427+#ifdef CONFIG_PAX_REFCOUNT
10428+ "jno 0f\n"
10429+ LOCK_PREFIX _ASM_DEC "(%1)\n\t"
10430+ "int $4\n0:\n"
10431+ _ASM_EXTABLE(0b, 0b)
10432+#endif
10433+
10434 /* adds 0x00000001, returns the old value */
10435 " jns 1f\n"
10436 " call call_rwsem_down_read_failed\n"
10437@@ -139,6 +147,14 @@ static inline int __down_read_trylock(st
10438 "1:\n\t"
10439 " mov %1,%2\n\t"
10440 " add %3,%2\n\t"
10441+
10442+#ifdef CONFIG_PAX_REFCOUNT
10443+ "jno 0f\n"
10444+ "sub %3,%2\n"
10445+ "int $4\n0:\n"
10446+ _ASM_EXTABLE(0b, 0b)
10447+#endif
10448+
10449 " jle 2f\n\t"
10450 LOCK_PREFIX " cmpxchg %2,%0\n\t"
10451 " jnz 1b\n\t"
10452@@ -160,6 +176,14 @@ static inline void __down_write_nested(s
10453 tmp = RWSEM_ACTIVE_WRITE_BIAS;
10454 asm volatile("# beginning down_write\n\t"
10455 LOCK_PREFIX " xadd %1,(%2)\n\t"
10456+
10457+#ifdef CONFIG_PAX_REFCOUNT
10458+ "jno 0f\n"
10459+ "mov %1,(%2)\n"
10460+ "int $4\n0:\n"
10461+ _ASM_EXTABLE(0b, 0b)
10462+#endif
10463+
10464 /* subtract 0x0000ffff, returns the old value */
10465 " test %1,%1\n\t"
10466 /* was the count 0 before? */
10467@@ -198,6 +222,14 @@ static inline void __up_read(struct rw_s
10468 rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
10469 asm volatile("# beginning __up_read\n\t"
10470 LOCK_PREFIX " xadd %1,(%2)\n\t"
10471+
10472+#ifdef CONFIG_PAX_REFCOUNT
10473+ "jno 0f\n"
10474+ "mov %1,(%2)\n"
10475+ "int $4\n0:\n"
10476+ _ASM_EXTABLE(0b, 0b)
10477+#endif
10478+
10479 /* subtracts 1, returns the old value */
10480 " jns 1f\n\t"
10481 " call call_rwsem_wake\n"
10482@@ -216,6 +248,14 @@ static inline void __up_write(struct rw_
10483 rwsem_count_t tmp;
10484 asm volatile("# beginning __up_write\n\t"
10485 LOCK_PREFIX " xadd %1,(%2)\n\t"
10486+
10487+#ifdef CONFIG_PAX_REFCOUNT
10488+ "jno 0f\n"
10489+ "mov %1,(%2)\n"
10490+ "int $4\n0:\n"
10491+ _ASM_EXTABLE(0b, 0b)
10492+#endif
10493+
10494 /* tries to transition
10495 0xffff0001 -> 0x00000000 */
10496 " jz 1f\n"
10497@@ -234,6 +274,14 @@ static inline void __downgrade_write(str
10498 {
10499 asm volatile("# beginning __downgrade_write\n\t"
10500 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
10501+
10502+#ifdef CONFIG_PAX_REFCOUNT
10503+ "jno 0f\n"
10504+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
10505+ "int $4\n0:\n"
10506+ _ASM_EXTABLE(0b, 0b)
10507+#endif
10508+
10509 /*
10510 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
10511 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
10512@@ -253,7 +301,15 @@ static inline void __downgrade_write(str
10513 static inline void rwsem_atomic_add(rwsem_count_t delta,
10514 struct rw_semaphore *sem)
10515 {
10516- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
10517+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
10518+
10519+#ifdef CONFIG_PAX_REFCOUNT
10520+ "jno 0f\n"
10521+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
10522+ "int $4\n0:\n"
10523+ _ASM_EXTABLE(0b, 0b)
10524+#endif
10525+
10526 : "+m" (sem->count)
10527 : "er" (delta));
10528 }
10529@@ -266,7 +322,15 @@ static inline rwsem_count_t rwsem_atomic
10530 {
10531 rwsem_count_t tmp = delta;
10532
10533- asm volatile(LOCK_PREFIX "xadd %0,%1"
10534+ asm volatile(LOCK_PREFIX "xadd %0,%1\n"
10535+
10536+#ifdef CONFIG_PAX_REFCOUNT
10537+ "jno 0f\n"
10538+ "mov %0,%1\n"
10539+ "int $4\n0:\n"
10540+ _ASM_EXTABLE(0b, 0b)
10541+#endif
10542+
10543 : "+r" (tmp), "+m" (sem->count)
10544 : : "memory");
10545
10546diff -urNp linux-2.6.32.48/arch/x86/include/asm/segment.h linux-2.6.32.48/arch/x86/include/asm/segment.h
10547--- linux-2.6.32.48/arch/x86/include/asm/segment.h 2011-11-08 19:02:43.000000000 -0500
10548+++ linux-2.6.32.48/arch/x86/include/asm/segment.h 2011-11-15 19:59:42.000000000 -0500
10549@@ -62,10 +62,15 @@
10550 * 26 - ESPFIX small SS
10551 * 27 - per-cpu [ offset to per-cpu data area ]
10552 * 28 - stack_canary-20 [ for stack protector ]
10553- * 29 - unused
10554- * 30 - unused
10555+ * 29 - PCI BIOS CS
10556+ * 30 - PCI BIOS DS
10557 * 31 - TSS for double fault handler
10558 */
10559+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
10560+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
10561+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
10562+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
10563+
10564 #define GDT_ENTRY_TLS_MIN 6
10565 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
10566
10567@@ -77,6 +82,8 @@
10568
10569 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
10570
10571+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
10572+
10573 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
10574
10575 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
10576@@ -88,7 +95,7 @@
10577 #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
10578 #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
10579
10580-#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
10581+#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
10582 #ifdef CONFIG_SMP
10583 #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
10584 #else
10585@@ -102,6 +109,12 @@
10586 #define __KERNEL_STACK_CANARY 0
10587 #endif
10588
10589+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE + 17)
10590+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
10591+
10592+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE + 18)
10593+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
10594+
10595 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
10596
10597 /*
10598@@ -139,7 +152,7 @@
10599 */
10600
10601 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
10602-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
10603+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
10604
10605
10606 #else
10607@@ -163,6 +176,8 @@
10608 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
10609 #define __USER32_DS __USER_DS
10610
10611+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
10612+
10613 #define GDT_ENTRY_TSS 8 /* needs two entries */
10614 #define GDT_ENTRY_LDT 10 /* needs two entries */
10615 #define GDT_ENTRY_TLS_MIN 12
10616@@ -183,6 +198,7 @@
10617 #endif
10618
10619 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
10620+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS * 8)
10621 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
10622 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3)
10623 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3)
10624diff -urNp linux-2.6.32.48/arch/x86/include/asm/smp.h linux-2.6.32.48/arch/x86/include/asm/smp.h
10625--- linux-2.6.32.48/arch/x86/include/asm/smp.h 2011-11-08 19:02:43.000000000 -0500
10626+++ linux-2.6.32.48/arch/x86/include/asm/smp.h 2011-11-15 19:59:42.000000000 -0500
10627@@ -24,7 +24,7 @@ extern unsigned int num_processors;
10628 DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
10629 DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
10630 DECLARE_PER_CPU(u16, cpu_llc_id);
10631-DECLARE_PER_CPU(int, cpu_number);
10632+DECLARE_PER_CPU(unsigned int, cpu_number);
10633
10634 static inline struct cpumask *cpu_sibling_mask(int cpu)
10635 {
10636@@ -40,10 +40,7 @@ DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_ap
10637 DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
10638
10639 /* Static state in head.S used to set up a CPU */
10640-extern struct {
10641- void *sp;
10642- unsigned short ss;
10643-} stack_start;
10644+extern unsigned long stack_start; /* Initial stack pointer address */
10645
10646 struct smp_ops {
10647 void (*smp_prepare_boot_cpu)(void);
10648@@ -60,7 +57,7 @@ struct smp_ops {
10649
10650 void (*send_call_func_ipi)(const struct cpumask *mask);
10651 void (*send_call_func_single_ipi)(int cpu);
10652-};
10653+} __no_const;
10654
10655 /* Globals due to paravirt */
10656 extern void set_cpu_sibling_map(int cpu);
10657@@ -175,14 +172,8 @@ extern unsigned disabled_cpus __cpuinitd
10658 extern int safe_smp_processor_id(void);
10659
10660 #elif defined(CONFIG_X86_64_SMP)
10661-#define raw_smp_processor_id() (percpu_read(cpu_number))
10662-
10663-#define stack_smp_processor_id() \
10664-({ \
10665- struct thread_info *ti; \
10666- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
10667- ti->cpu; \
10668-})
10669+#define raw_smp_processor_id() (percpu_read(cpu_number))
10670+#define stack_smp_processor_id() raw_smp_processor_id()
10671 #define safe_smp_processor_id() smp_processor_id()
10672
10673 #endif
10674diff -urNp linux-2.6.32.48/arch/x86/include/asm/spinlock.h linux-2.6.32.48/arch/x86/include/asm/spinlock.h
10675--- linux-2.6.32.48/arch/x86/include/asm/spinlock.h 2011-11-08 19:02:43.000000000 -0500
10676+++ linux-2.6.32.48/arch/x86/include/asm/spinlock.h 2011-11-15 19:59:42.000000000 -0500
10677@@ -249,6 +249,14 @@ static inline int __raw_write_can_lock(r
10678 static inline void __raw_read_lock(raw_rwlock_t *rw)
10679 {
10680 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
10681+
10682+#ifdef CONFIG_PAX_REFCOUNT
10683+ "jno 0f\n"
10684+ LOCK_PREFIX " addl $1,(%0)\n"
10685+ "int $4\n0:\n"
10686+ _ASM_EXTABLE(0b, 0b)
10687+#endif
10688+
10689 "jns 1f\n"
10690 "call __read_lock_failed\n\t"
10691 "1:\n"
10692@@ -258,6 +266,14 @@ static inline void __raw_read_lock(raw_r
10693 static inline void __raw_write_lock(raw_rwlock_t *rw)
10694 {
10695 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
10696+
10697+#ifdef CONFIG_PAX_REFCOUNT
10698+ "jno 0f\n"
10699+ LOCK_PREFIX " addl %1,(%0)\n"
10700+ "int $4\n0:\n"
10701+ _ASM_EXTABLE(0b, 0b)
10702+#endif
10703+
10704 "jz 1f\n"
10705 "call __write_lock_failed\n\t"
10706 "1:\n"
10707@@ -286,12 +302,29 @@ static inline int __raw_write_trylock(ra
10708
10709 static inline void __raw_read_unlock(raw_rwlock_t *rw)
10710 {
10711- asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
10712+ asm volatile(LOCK_PREFIX "incl %0\n"
10713+
10714+#ifdef CONFIG_PAX_REFCOUNT
10715+ "jno 0f\n"
10716+ LOCK_PREFIX "decl %0\n"
10717+ "int $4\n0:\n"
10718+ _ASM_EXTABLE(0b, 0b)
10719+#endif
10720+
10721+ :"+m" (rw->lock) : : "memory");
10722 }
10723
10724 static inline void __raw_write_unlock(raw_rwlock_t *rw)
10725 {
10726- asm volatile(LOCK_PREFIX "addl %1, %0"
10727+ asm volatile(LOCK_PREFIX "addl %1, %0\n"
10728+
10729+#ifdef CONFIG_PAX_REFCOUNT
10730+ "jno 0f\n"
10731+ LOCK_PREFIX "subl %1, %0\n"
10732+ "int $4\n0:\n"
10733+ _ASM_EXTABLE(0b, 0b)
10734+#endif
10735+
10736 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
10737 }
10738
10739diff -urNp linux-2.6.32.48/arch/x86/include/asm/stackprotector.h linux-2.6.32.48/arch/x86/include/asm/stackprotector.h
10740--- linux-2.6.32.48/arch/x86/include/asm/stackprotector.h 2011-11-08 19:02:43.000000000 -0500
10741+++ linux-2.6.32.48/arch/x86/include/asm/stackprotector.h 2011-11-15 19:59:42.000000000 -0500
10742@@ -48,7 +48,7 @@
10743 * head_32 for boot CPU and setup_per_cpu_areas() for others.
10744 */
10745 #define GDT_STACK_CANARY_INIT \
10746- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
10747+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
10748
10749 /*
10750 * Initialize the stackprotector canary value.
10751@@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
10752
10753 static inline void load_stack_canary_segment(void)
10754 {
10755-#ifdef CONFIG_X86_32
10756+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
10757 asm volatile ("mov %0, %%gs" : : "r" (0));
10758 #endif
10759 }
10760diff -urNp linux-2.6.32.48/arch/x86/include/asm/system.h linux-2.6.32.48/arch/x86/include/asm/system.h
10761--- linux-2.6.32.48/arch/x86/include/asm/system.h 2011-11-08 19:02:43.000000000 -0500
10762+++ linux-2.6.32.48/arch/x86/include/asm/system.h 2011-11-15 19:59:42.000000000 -0500
10763@@ -132,7 +132,7 @@ do { \
10764 "thread_return:\n\t" \
10765 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
10766 __switch_canary \
10767- "movq %P[thread_info](%%rsi),%%r8\n\t" \
10768+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
10769 "movq %%rax,%%rdi\n\t" \
10770 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
10771 "jnz ret_from_fork\n\t" \
10772@@ -143,7 +143,7 @@ do { \
10773 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
10774 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
10775 [_tif_fork] "i" (_TIF_FORK), \
10776- [thread_info] "i" (offsetof(struct task_struct, stack)), \
10777+ [thread_info] "m" (per_cpu_var(current_tinfo)), \
10778 [current_task] "m" (per_cpu_var(current_task)) \
10779 __switch_canary_iparam \
10780 : "memory", "cc" __EXTRA_CLOBBER)
10781@@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
10782 {
10783 unsigned long __limit;
10784 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
10785- return __limit + 1;
10786+ return __limit;
10787 }
10788
10789 static inline void native_clts(void)
10790@@ -340,12 +340,12 @@ void enable_hlt(void);
10791
10792 void cpu_idle_wait(void);
10793
10794-extern unsigned long arch_align_stack(unsigned long sp);
10795+#define arch_align_stack(x) ((x) & ~0xfUL)
10796 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
10797
10798 void default_idle(void);
10799
10800-void stop_this_cpu(void *dummy);
10801+void stop_this_cpu(void *dummy) __noreturn;
10802
10803 /*
10804 * Force strict CPU ordering.
10805diff -urNp linux-2.6.32.48/arch/x86/include/asm/thread_info.h linux-2.6.32.48/arch/x86/include/asm/thread_info.h
10806--- linux-2.6.32.48/arch/x86/include/asm/thread_info.h 2011-11-08 19:02:43.000000000 -0500
10807+++ linux-2.6.32.48/arch/x86/include/asm/thread_info.h 2011-11-15 19:59:42.000000000 -0500
10808@@ -10,6 +10,7 @@
10809 #include <linux/compiler.h>
10810 #include <asm/page.h>
10811 #include <asm/types.h>
10812+#include <asm/percpu.h>
10813
10814 /*
10815 * low level task data that entry.S needs immediate access to
10816@@ -24,7 +25,6 @@ struct exec_domain;
10817 #include <asm/atomic.h>
10818
10819 struct thread_info {
10820- struct task_struct *task; /* main task structure */
10821 struct exec_domain *exec_domain; /* execution domain */
10822 __u32 flags; /* low level flags */
10823 __u32 status; /* thread synchronous flags */
10824@@ -34,18 +34,12 @@ struct thread_info {
10825 mm_segment_t addr_limit;
10826 struct restart_block restart_block;
10827 void __user *sysenter_return;
10828-#ifdef CONFIG_X86_32
10829- unsigned long previous_esp; /* ESP of the previous stack in
10830- case of nested (IRQ) stacks
10831- */
10832- __u8 supervisor_stack[0];
10833-#endif
10834+ unsigned long lowest_stack;
10835 int uaccess_err;
10836 };
10837
10838-#define INIT_THREAD_INFO(tsk) \
10839+#define INIT_THREAD_INFO \
10840 { \
10841- .task = &tsk, \
10842 .exec_domain = &default_exec_domain, \
10843 .flags = 0, \
10844 .cpu = 0, \
10845@@ -56,7 +50,7 @@ struct thread_info {
10846 }, \
10847 }
10848
10849-#define init_thread_info (init_thread_union.thread_info)
10850+#define init_thread_info (init_thread_union.stack)
10851 #define init_stack (init_thread_union.stack)
10852
10853 #else /* !__ASSEMBLY__ */
10854@@ -163,6 +157,23 @@ struct thread_info {
10855 #define alloc_thread_info(tsk) \
10856 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
10857
10858+#ifdef __ASSEMBLY__
10859+/* how to get the thread information struct from ASM */
10860+#define GET_THREAD_INFO(reg) \
10861+ mov PER_CPU_VAR(current_tinfo), reg
10862+
10863+/* use this one if reg already contains %esp */
10864+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
10865+#else
10866+/* how to get the thread information struct from C */
10867+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
10868+
10869+static __always_inline struct thread_info *current_thread_info(void)
10870+{
10871+ return percpu_read_stable(current_tinfo);
10872+}
10873+#endif
10874+
10875 #ifdef CONFIG_X86_32
10876
10877 #define STACK_WARN (THREAD_SIZE/8)
10878@@ -173,35 +184,13 @@ struct thread_info {
10879 */
10880 #ifndef __ASSEMBLY__
10881
10882-
10883 /* how to get the current stack pointer from C */
10884 register unsigned long current_stack_pointer asm("esp") __used;
10885
10886-/* how to get the thread information struct from C */
10887-static inline struct thread_info *current_thread_info(void)
10888-{
10889- return (struct thread_info *)
10890- (current_stack_pointer & ~(THREAD_SIZE - 1));
10891-}
10892-
10893-#else /* !__ASSEMBLY__ */
10894-
10895-/* how to get the thread information struct from ASM */
10896-#define GET_THREAD_INFO(reg) \
10897- movl $-THREAD_SIZE, reg; \
10898- andl %esp, reg
10899-
10900-/* use this one if reg already contains %esp */
10901-#define GET_THREAD_INFO_WITH_ESP(reg) \
10902- andl $-THREAD_SIZE, reg
10903-
10904 #endif
10905
10906 #else /* X86_32 */
10907
10908-#include <asm/percpu.h>
10909-#define KERNEL_STACK_OFFSET (5*8)
10910-
10911 /*
10912 * macros/functions for gaining access to the thread information structure
10913 * preempt_count needs to be 1 initially, until the scheduler is functional.
10914@@ -209,21 +198,8 @@ static inline struct thread_info *curren
10915 #ifndef __ASSEMBLY__
10916 DECLARE_PER_CPU(unsigned long, kernel_stack);
10917
10918-static inline struct thread_info *current_thread_info(void)
10919-{
10920- struct thread_info *ti;
10921- ti = (void *)(percpu_read_stable(kernel_stack) +
10922- KERNEL_STACK_OFFSET - THREAD_SIZE);
10923- return ti;
10924-}
10925-
10926-#else /* !__ASSEMBLY__ */
10927-
10928-/* how to get the thread information struct from ASM */
10929-#define GET_THREAD_INFO(reg) \
10930- movq PER_CPU_VAR(kernel_stack),reg ; \
10931- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
10932-
10933+/* how to get the current stack pointer from C */
10934+register unsigned long current_stack_pointer asm("rsp") __used;
10935 #endif
10936
10937 #endif /* !X86_32 */
10938@@ -260,5 +236,16 @@ extern void arch_task_cache_init(void);
10939 extern void free_thread_info(struct thread_info *ti);
10940 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
10941 #define arch_task_cache_init arch_task_cache_init
10942+
10943+#define __HAVE_THREAD_FUNCTIONS
10944+#define task_thread_info(task) (&(task)->tinfo)
10945+#define task_stack_page(task) ((task)->stack)
10946+#define setup_thread_stack(p, org) do {} while (0)
10947+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
10948+
10949+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
10950+extern struct task_struct *alloc_task_struct(void);
10951+extern void free_task_struct(struct task_struct *);
10952+
10953 #endif
10954 #endif /* _ASM_X86_THREAD_INFO_H */
10955diff -urNp linux-2.6.32.48/arch/x86/include/asm/uaccess_32.h linux-2.6.32.48/arch/x86/include/asm/uaccess_32.h
10956--- linux-2.6.32.48/arch/x86/include/asm/uaccess_32.h 2011-11-08 19:02:43.000000000 -0500
10957+++ linux-2.6.32.48/arch/x86/include/asm/uaccess_32.h 2011-11-15 19:59:42.000000000 -0500
10958@@ -44,6 +44,11 @@ unsigned long __must_check __copy_from_u
10959 static __always_inline unsigned long __must_check
10960 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10961 {
10962+ pax_track_stack();
10963+
10964+ if ((long)n < 0)
10965+ return n;
10966+
10967 if (__builtin_constant_p(n)) {
10968 unsigned long ret;
10969
10970@@ -62,6 +67,8 @@ __copy_to_user_inatomic(void __user *to,
10971 return ret;
10972 }
10973 }
10974+ if (!__builtin_constant_p(n))
10975+ check_object_size(from, n, true);
10976 return __copy_to_user_ll(to, from, n);
10977 }
10978
10979@@ -83,12 +90,16 @@ static __always_inline unsigned long __m
10980 __copy_to_user(void __user *to, const void *from, unsigned long n)
10981 {
10982 might_fault();
10983+
10984 return __copy_to_user_inatomic(to, from, n);
10985 }
10986
10987 static __always_inline unsigned long
10988 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
10989 {
10990+ if ((long)n < 0)
10991+ return n;
10992+
10993 /* Avoid zeroing the tail if the copy fails..
10994 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
10995 * but as the zeroing behaviour is only significant when n is not
10996@@ -138,6 +149,12 @@ static __always_inline unsigned long
10997 __copy_from_user(void *to, const void __user *from, unsigned long n)
10998 {
10999 might_fault();
11000+
11001+ pax_track_stack();
11002+
11003+ if ((long)n < 0)
11004+ return n;
11005+
11006 if (__builtin_constant_p(n)) {
11007 unsigned long ret;
11008
11009@@ -153,6 +170,8 @@ __copy_from_user(void *to, const void __
11010 return ret;
11011 }
11012 }
11013+ if (!__builtin_constant_p(n))
11014+ check_object_size(to, n, false);
11015 return __copy_from_user_ll(to, from, n);
11016 }
11017
11018@@ -160,6 +179,10 @@ static __always_inline unsigned long __c
11019 const void __user *from, unsigned long n)
11020 {
11021 might_fault();
11022+
11023+ if ((long)n < 0)
11024+ return n;
11025+
11026 if (__builtin_constant_p(n)) {
11027 unsigned long ret;
11028
11029@@ -182,14 +205,62 @@ static __always_inline unsigned long
11030 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
11031 unsigned long n)
11032 {
11033- return __copy_from_user_ll_nocache_nozero(to, from, n);
11034+ if ((long)n < 0)
11035+ return n;
11036+
11037+ return __copy_from_user_ll_nocache_nozero(to, from, n);
11038+}
11039+
11040+/**
11041+ * copy_to_user: - Copy a block of data into user space.
11042+ * @to: Destination address, in user space.
11043+ * @from: Source address, in kernel space.
11044+ * @n: Number of bytes to copy.
11045+ *
11046+ * Context: User context only. This function may sleep.
11047+ *
11048+ * Copy data from kernel space to user space.
11049+ *
11050+ * Returns number of bytes that could not be copied.
11051+ * On success, this will be zero.
11052+ */
11053+static __always_inline unsigned long __must_check
11054+copy_to_user(void __user *to, const void *from, unsigned long n)
11055+{
11056+ if (access_ok(VERIFY_WRITE, to, n))
11057+ n = __copy_to_user(to, from, n);
11058+ return n;
11059+}
11060+
11061+/**
11062+ * copy_from_user: - Copy a block of data from user space.
11063+ * @to: Destination address, in kernel space.
11064+ * @from: Source address, in user space.
11065+ * @n: Number of bytes to copy.
11066+ *
11067+ * Context: User context only. This function may sleep.
11068+ *
11069+ * Copy data from user space to kernel space.
11070+ *
11071+ * Returns number of bytes that could not be copied.
11072+ * On success, this will be zero.
11073+ *
11074+ * If some data could not be copied, this function will pad the copied
11075+ * data to the requested size using zero bytes.
11076+ */
11077+static __always_inline unsigned long __must_check
11078+copy_from_user(void *to, const void __user *from, unsigned long n)
11079+{
11080+ if (access_ok(VERIFY_READ, from, n))
11081+ n = __copy_from_user(to, from, n);
11082+ else if ((long)n > 0) {
11083+ if (!__builtin_constant_p(n))
11084+ check_object_size(to, n, false);
11085+ memset(to, 0, n);
11086+ }
11087+ return n;
11088 }
11089
11090-unsigned long __must_check copy_to_user(void __user *to,
11091- const void *from, unsigned long n);
11092-unsigned long __must_check copy_from_user(void *to,
11093- const void __user *from,
11094- unsigned long n);
11095 long __must_check strncpy_from_user(char *dst, const char __user *src,
11096 long count);
11097 long __must_check __strncpy_from_user(char *dst,
11098diff -urNp linux-2.6.32.48/arch/x86/include/asm/uaccess_64.h linux-2.6.32.48/arch/x86/include/asm/uaccess_64.h
11099--- linux-2.6.32.48/arch/x86/include/asm/uaccess_64.h 2011-11-08 19:02:43.000000000 -0500
11100+++ linux-2.6.32.48/arch/x86/include/asm/uaccess_64.h 2011-11-15 19:59:42.000000000 -0500
11101@@ -9,6 +9,9 @@
11102 #include <linux/prefetch.h>
11103 #include <linux/lockdep.h>
11104 #include <asm/page.h>
11105+#include <asm/pgtable.h>
11106+
11107+#define set_fs(x) (current_thread_info()->addr_limit = (x))
11108
11109 /*
11110 * Copy To/From Userspace
11111@@ -19,113 +22,203 @@ __must_check unsigned long
11112 copy_user_generic(void *to, const void *from, unsigned len);
11113
11114 __must_check unsigned long
11115-copy_to_user(void __user *to, const void *from, unsigned len);
11116-__must_check unsigned long
11117-copy_from_user(void *to, const void __user *from, unsigned len);
11118-__must_check unsigned long
11119 copy_in_user(void __user *to, const void __user *from, unsigned len);
11120
11121 static __always_inline __must_check
11122-int __copy_from_user(void *dst, const void __user *src, unsigned size)
11123+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
11124 {
11125- int ret = 0;
11126+ unsigned ret = 0;
11127
11128 might_fault();
11129- if (!__builtin_constant_p(size))
11130- return copy_user_generic(dst, (__force void *)src, size);
11131+
11132+ if ((int)size < 0)
11133+ return size;
11134+
11135+#ifdef CONFIG_PAX_MEMORY_UDEREF
11136+ if (!__access_ok(VERIFY_READ, src, size))
11137+ return size;
11138+#endif
11139+
11140+ if (!__builtin_constant_p(size)) {
11141+ check_object_size(dst, size, false);
11142+
11143+#ifdef CONFIG_PAX_MEMORY_UDEREF
11144+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11145+ src += PAX_USER_SHADOW_BASE;
11146+#endif
11147+
11148+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
11149+ }
11150 switch (size) {
11151- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
11152+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
11153 ret, "b", "b", "=q", 1);
11154 return ret;
11155- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
11156+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
11157 ret, "w", "w", "=r", 2);
11158 return ret;
11159- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
11160+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
11161 ret, "l", "k", "=r", 4);
11162 return ret;
11163- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
11164+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
11165 ret, "q", "", "=r", 8);
11166 return ret;
11167 case 10:
11168- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
11169+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
11170 ret, "q", "", "=r", 10);
11171 if (unlikely(ret))
11172 return ret;
11173 __get_user_asm(*(u16 *)(8 + (char *)dst),
11174- (u16 __user *)(8 + (char __user *)src),
11175+ (const u16 __user *)(8 + (const char __user *)src),
11176 ret, "w", "w", "=r", 2);
11177 return ret;
11178 case 16:
11179- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
11180+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
11181 ret, "q", "", "=r", 16);
11182 if (unlikely(ret))
11183 return ret;
11184 __get_user_asm(*(u64 *)(8 + (char *)dst),
11185- (u64 __user *)(8 + (char __user *)src),
11186+ (const u64 __user *)(8 + (const char __user *)src),
11187 ret, "q", "", "=r", 8);
11188 return ret;
11189 default:
11190- return copy_user_generic(dst, (__force void *)src, size);
11191+
11192+#ifdef CONFIG_PAX_MEMORY_UDEREF
11193+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11194+ src += PAX_USER_SHADOW_BASE;
11195+#endif
11196+
11197+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
11198 }
11199 }
11200
11201 static __always_inline __must_check
11202-int __copy_to_user(void __user *dst, const void *src, unsigned size)
11203+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
11204 {
11205- int ret = 0;
11206+ unsigned ret = 0;
11207
11208 might_fault();
11209- if (!__builtin_constant_p(size))
11210- return copy_user_generic((__force void *)dst, src, size);
11211+
11212+ pax_track_stack();
11213+
11214+ if ((int)size < 0)
11215+ return size;
11216+
11217+#ifdef CONFIG_PAX_MEMORY_UDEREF
11218+ if (!__access_ok(VERIFY_WRITE, dst, size))
11219+ return size;
11220+#endif
11221+
11222+ if (!__builtin_constant_p(size)) {
11223+ check_object_size(src, size, true);
11224+
11225+#ifdef CONFIG_PAX_MEMORY_UDEREF
11226+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11227+ dst += PAX_USER_SHADOW_BASE;
11228+#endif
11229+
11230+ return copy_user_generic((__force_kernel void *)dst, src, size);
11231+ }
11232 switch (size) {
11233- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
11234+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
11235 ret, "b", "b", "iq", 1);
11236 return ret;
11237- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
11238+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
11239 ret, "w", "w", "ir", 2);
11240 return ret;
11241- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
11242+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
11243 ret, "l", "k", "ir", 4);
11244 return ret;
11245- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
11246+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11247 ret, "q", "", "er", 8);
11248 return ret;
11249 case 10:
11250- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
11251+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11252 ret, "q", "", "er", 10);
11253 if (unlikely(ret))
11254 return ret;
11255 asm("":::"memory");
11256- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
11257+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
11258 ret, "w", "w", "ir", 2);
11259 return ret;
11260 case 16:
11261- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
11262+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11263 ret, "q", "", "er", 16);
11264 if (unlikely(ret))
11265 return ret;
11266 asm("":::"memory");
11267- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
11268+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
11269 ret, "q", "", "er", 8);
11270 return ret;
11271 default:
11272- return copy_user_generic((__force void *)dst, src, size);
11273+
11274+#ifdef CONFIG_PAX_MEMORY_UDEREF
11275+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11276+ dst += PAX_USER_SHADOW_BASE;
11277+#endif
11278+
11279+ return copy_user_generic((__force_kernel void *)dst, src, size);
11280+ }
11281+}
11282+
11283+static __always_inline __must_check
11284+unsigned long copy_to_user(void __user *to, const void *from, unsigned len)
11285+{
11286+ if (access_ok(VERIFY_WRITE, to, len))
11287+ len = __copy_to_user(to, from, len);
11288+ return len;
11289+}
11290+
11291+static __always_inline __must_check
11292+unsigned long copy_from_user(void *to, const void __user *from, unsigned len)
11293+{
11294+ if ((int)len < 0)
11295+ return len;
11296+
11297+ if (access_ok(VERIFY_READ, from, len))
11298+ len = __copy_from_user(to, from, len);
11299+ else if ((int)len > 0) {
11300+ if (!__builtin_constant_p(len))
11301+ check_object_size(to, len, false);
11302+ memset(to, 0, len);
11303 }
11304+ return len;
11305 }
11306
11307 static __always_inline __must_check
11308-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11309+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11310 {
11311- int ret = 0;
11312+ unsigned ret = 0;
11313
11314 might_fault();
11315- if (!__builtin_constant_p(size))
11316- return copy_user_generic((__force void *)dst,
11317- (__force void *)src, size);
11318+
11319+ pax_track_stack();
11320+
11321+ if ((int)size < 0)
11322+ return size;
11323+
11324+#ifdef CONFIG_PAX_MEMORY_UDEREF
11325+ if (!__access_ok(VERIFY_READ, src, size))
11326+ return size;
11327+ if (!__access_ok(VERIFY_WRITE, dst, size))
11328+ return size;
11329+#endif
11330+
11331+ if (!__builtin_constant_p(size)) {
11332+
11333+#ifdef CONFIG_PAX_MEMORY_UDEREF
11334+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11335+ src += PAX_USER_SHADOW_BASE;
11336+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11337+ dst += PAX_USER_SHADOW_BASE;
11338+#endif
11339+
11340+ return copy_user_generic((__force_kernel void *)dst,
11341+ (__force_kernel const void *)src, size);
11342+ }
11343 switch (size) {
11344 case 1: {
11345 u8 tmp;
11346- __get_user_asm(tmp, (u8 __user *)src,
11347+ __get_user_asm(tmp, (const u8 __user *)src,
11348 ret, "b", "b", "=q", 1);
11349 if (likely(!ret))
11350 __put_user_asm(tmp, (u8 __user *)dst,
11351@@ -134,7 +227,7 @@ int __copy_in_user(void __user *dst, con
11352 }
11353 case 2: {
11354 u16 tmp;
11355- __get_user_asm(tmp, (u16 __user *)src,
11356+ __get_user_asm(tmp, (const u16 __user *)src,
11357 ret, "w", "w", "=r", 2);
11358 if (likely(!ret))
11359 __put_user_asm(tmp, (u16 __user *)dst,
11360@@ -144,7 +237,7 @@ int __copy_in_user(void __user *dst, con
11361
11362 case 4: {
11363 u32 tmp;
11364- __get_user_asm(tmp, (u32 __user *)src,
11365+ __get_user_asm(tmp, (const u32 __user *)src,
11366 ret, "l", "k", "=r", 4);
11367 if (likely(!ret))
11368 __put_user_asm(tmp, (u32 __user *)dst,
11369@@ -153,7 +246,7 @@ int __copy_in_user(void __user *dst, con
11370 }
11371 case 8: {
11372 u64 tmp;
11373- __get_user_asm(tmp, (u64 __user *)src,
11374+ __get_user_asm(tmp, (const u64 __user *)src,
11375 ret, "q", "", "=r", 8);
11376 if (likely(!ret))
11377 __put_user_asm(tmp, (u64 __user *)dst,
11378@@ -161,8 +254,16 @@ int __copy_in_user(void __user *dst, con
11379 return ret;
11380 }
11381 default:
11382- return copy_user_generic((__force void *)dst,
11383- (__force void *)src, size);
11384+
11385+#ifdef CONFIG_PAX_MEMORY_UDEREF
11386+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11387+ src += PAX_USER_SHADOW_BASE;
11388+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11389+ dst += PAX_USER_SHADOW_BASE;
11390+#endif
11391+
11392+ return copy_user_generic((__force_kernel void *)dst,
11393+ (__force_kernel const void *)src, size);
11394 }
11395 }
11396
11397@@ -176,33 +277,75 @@ __must_check long strlen_user(const char
11398 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
11399 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
11400
11401-__must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
11402- unsigned size);
11403+static __must_check __always_inline unsigned long
11404+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
11405+{
11406+ pax_track_stack();
11407+
11408+ if ((int)size < 0)
11409+ return size;
11410+
11411+#ifdef CONFIG_PAX_MEMORY_UDEREF
11412+ if (!__access_ok(VERIFY_READ, src, size))
11413+ return size;
11414+
11415+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11416+ src += PAX_USER_SHADOW_BASE;
11417+#endif
11418
11419-static __must_check __always_inline int
11420+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
11421+}
11422+
11423+static __must_check __always_inline unsigned long
11424 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
11425 {
11426- return copy_user_generic((__force void *)dst, src, size);
11427+ if ((int)size < 0)
11428+ return size;
11429+
11430+#ifdef CONFIG_PAX_MEMORY_UDEREF
11431+ if (!__access_ok(VERIFY_WRITE, dst, size))
11432+ return size;
11433+
11434+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11435+ dst += PAX_USER_SHADOW_BASE;
11436+#endif
11437+
11438+ return copy_user_generic((__force_kernel void *)dst, src, size);
11439 }
11440
11441-extern long __copy_user_nocache(void *dst, const void __user *src,
11442+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
11443 unsigned size, int zerorest);
11444
11445-static inline int
11446-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
11447+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
11448 {
11449 might_sleep();
11450+
11451+ if ((int)size < 0)
11452+ return size;
11453+
11454+#ifdef CONFIG_PAX_MEMORY_UDEREF
11455+ if (!__access_ok(VERIFY_READ, src, size))
11456+ return size;
11457+#endif
11458+
11459 return __copy_user_nocache(dst, src, size, 1);
11460 }
11461
11462-static inline int
11463-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11464+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11465 unsigned size)
11466 {
11467+ if ((int)size < 0)
11468+ return size;
11469+
11470+#ifdef CONFIG_PAX_MEMORY_UDEREF
11471+ if (!__access_ok(VERIFY_READ, src, size))
11472+ return size;
11473+#endif
11474+
11475 return __copy_user_nocache(dst, src, size, 0);
11476 }
11477
11478-unsigned long
11479-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
11480+extern unsigned long
11481+copy_user_handle_tail(char __user *to, char __user *from, unsigned len, unsigned zerorest);
11482
11483 #endif /* _ASM_X86_UACCESS_64_H */
11484diff -urNp linux-2.6.32.48/arch/x86/include/asm/uaccess.h linux-2.6.32.48/arch/x86/include/asm/uaccess.h
11485--- linux-2.6.32.48/arch/x86/include/asm/uaccess.h 2011-11-08 19:02:43.000000000 -0500
11486+++ linux-2.6.32.48/arch/x86/include/asm/uaccess.h 2011-11-15 19:59:42.000000000 -0500
11487@@ -8,12 +8,15 @@
11488 #include <linux/thread_info.h>
11489 #include <linux/prefetch.h>
11490 #include <linux/string.h>
11491+#include <linux/sched.h>
11492 #include <asm/asm.h>
11493 #include <asm/page.h>
11494
11495 #define VERIFY_READ 0
11496 #define VERIFY_WRITE 1
11497
11498+extern void check_object_size(const void *ptr, unsigned long n, bool to);
11499+
11500 /*
11501 * The fs value determines whether argument validity checking should be
11502 * performed or not. If get_fs() == USER_DS, checking is performed, with
11503@@ -29,7 +32,12 @@
11504
11505 #define get_ds() (KERNEL_DS)
11506 #define get_fs() (current_thread_info()->addr_limit)
11507+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
11508+void __set_fs(mm_segment_t x);
11509+void set_fs(mm_segment_t x);
11510+#else
11511 #define set_fs(x) (current_thread_info()->addr_limit = (x))
11512+#endif
11513
11514 #define segment_eq(a, b) ((a).seg == (b).seg)
11515
11516@@ -77,7 +85,33 @@
11517 * checks that the pointer is in the user space range - after calling
11518 * this function, memory access functions may still return -EFAULT.
11519 */
11520-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
11521+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
11522+#define access_ok(type, addr, size) \
11523+({ \
11524+ long __size = size; \
11525+ unsigned long __addr = (unsigned long)addr; \
11526+ unsigned long __addr_ao = __addr & PAGE_MASK; \
11527+ unsigned long __end_ao = __addr + __size - 1; \
11528+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
11529+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
11530+ while(__addr_ao <= __end_ao) { \
11531+ char __c_ao; \
11532+ __addr_ao += PAGE_SIZE; \
11533+ if (__size > PAGE_SIZE) \
11534+ cond_resched(); \
11535+ if (__get_user(__c_ao, (char __user *)__addr)) \
11536+ break; \
11537+ if (type != VERIFY_WRITE) { \
11538+ __addr = __addr_ao; \
11539+ continue; \
11540+ } \
11541+ if (__put_user(__c_ao, (char __user *)__addr)) \
11542+ break; \
11543+ __addr = __addr_ao; \
11544+ } \
11545+ } \
11546+ __ret_ao; \
11547+})
11548
11549 /*
11550 * The exception table consists of pairs of addresses: the first is the
11551@@ -183,12 +217,20 @@ extern int __get_user_bad(void);
11552 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
11553 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
11554
11555-
11556+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
11557+#define __copyuser_seg "gs;"
11558+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
11559+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
11560+#else
11561+#define __copyuser_seg
11562+#define __COPYUSER_SET_ES
11563+#define __COPYUSER_RESTORE_ES
11564+#endif
11565
11566 #ifdef CONFIG_X86_32
11567 #define __put_user_asm_u64(x, addr, err, errret) \
11568- asm volatile("1: movl %%eax,0(%2)\n" \
11569- "2: movl %%edx,4(%2)\n" \
11570+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
11571+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
11572 "3:\n" \
11573 ".section .fixup,\"ax\"\n" \
11574 "4: movl %3,%0\n" \
11575@@ -200,8 +242,8 @@ extern int __get_user_bad(void);
11576 : "A" (x), "r" (addr), "i" (errret), "0" (err))
11577
11578 #define __put_user_asm_ex_u64(x, addr) \
11579- asm volatile("1: movl %%eax,0(%1)\n" \
11580- "2: movl %%edx,4(%1)\n" \
11581+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
11582+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
11583 "3:\n" \
11584 _ASM_EXTABLE(1b, 2b - 1b) \
11585 _ASM_EXTABLE(2b, 3b - 2b) \
11586@@ -253,7 +295,7 @@ extern void __put_user_8(void);
11587 __typeof__(*(ptr)) __pu_val; \
11588 __chk_user_ptr(ptr); \
11589 might_fault(); \
11590- __pu_val = x; \
11591+ __pu_val = (x); \
11592 switch (sizeof(*(ptr))) { \
11593 case 1: \
11594 __put_user_x(1, __pu_val, ptr, __ret_pu); \
11595@@ -374,7 +416,7 @@ do { \
11596 } while (0)
11597
11598 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
11599- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
11600+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
11601 "2:\n" \
11602 ".section .fixup,\"ax\"\n" \
11603 "3: mov %3,%0\n" \
11604@@ -382,7 +424,7 @@ do { \
11605 " jmp 2b\n" \
11606 ".previous\n" \
11607 _ASM_EXTABLE(1b, 3b) \
11608- : "=r" (err), ltype(x) \
11609+ : "=r" (err), ltype (x) \
11610 : "m" (__m(addr)), "i" (errret), "0" (err))
11611
11612 #define __get_user_size_ex(x, ptr, size) \
11613@@ -407,7 +449,7 @@ do { \
11614 } while (0)
11615
11616 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
11617- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
11618+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
11619 "2:\n" \
11620 _ASM_EXTABLE(1b, 2b - 1b) \
11621 : ltype(x) : "m" (__m(addr)))
11622@@ -424,13 +466,24 @@ do { \
11623 int __gu_err; \
11624 unsigned long __gu_val; \
11625 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
11626- (x) = (__force __typeof__(*(ptr)))__gu_val; \
11627+ (x) = (__typeof__(*(ptr)))__gu_val; \
11628 __gu_err; \
11629 })
11630
11631 /* FIXME: this hack is definitely wrong -AK */
11632 struct __large_struct { unsigned long buf[100]; };
11633-#define __m(x) (*(struct __large_struct __user *)(x))
11634+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11635+#define ____m(x) \
11636+({ \
11637+ unsigned long ____x = (unsigned long)(x); \
11638+ if (____x < PAX_USER_SHADOW_BASE) \
11639+ ____x += PAX_USER_SHADOW_BASE; \
11640+ (void __user *)____x; \
11641+})
11642+#else
11643+#define ____m(x) (x)
11644+#endif
11645+#define __m(x) (*(struct __large_struct __user *)____m(x))
11646
11647 /*
11648 * Tell gcc we read from memory instead of writing: this is because
11649@@ -438,7 +491,7 @@ struct __large_struct { unsigned long bu
11650 * aliasing issues.
11651 */
11652 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
11653- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
11654+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
11655 "2:\n" \
11656 ".section .fixup,\"ax\"\n" \
11657 "3: mov %3,%0\n" \
11658@@ -446,10 +499,10 @@ struct __large_struct { unsigned long bu
11659 ".previous\n" \
11660 _ASM_EXTABLE(1b, 3b) \
11661 : "=r"(err) \
11662- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
11663+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
11664
11665 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
11666- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
11667+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
11668 "2:\n" \
11669 _ASM_EXTABLE(1b, 2b - 1b) \
11670 : : ltype(x), "m" (__m(addr)))
11671@@ -488,8 +541,12 @@ struct __large_struct { unsigned long bu
11672 * On error, the variable @x is set to zero.
11673 */
11674
11675+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11676+#define __get_user(x, ptr) get_user((x), (ptr))
11677+#else
11678 #define __get_user(x, ptr) \
11679 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
11680+#endif
11681
11682 /**
11683 * __put_user: - Write a simple value into user space, with less checking.
11684@@ -511,8 +568,12 @@ struct __large_struct { unsigned long bu
11685 * Returns zero on success, or -EFAULT on error.
11686 */
11687
11688+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11689+#define __put_user(x, ptr) put_user((x), (ptr))
11690+#else
11691 #define __put_user(x, ptr) \
11692 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
11693+#endif
11694
11695 #define __get_user_unaligned __get_user
11696 #define __put_user_unaligned __put_user
11697@@ -530,7 +591,7 @@ struct __large_struct { unsigned long bu
11698 #define get_user_ex(x, ptr) do { \
11699 unsigned long __gue_val; \
11700 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
11701- (x) = (__force __typeof__(*(ptr)))__gue_val; \
11702+ (x) = (__typeof__(*(ptr)))__gue_val; \
11703 } while (0)
11704
11705 #ifdef CONFIG_X86_WP_WORKS_OK
11706@@ -567,6 +628,7 @@ extern struct movsl_mask {
11707
11708 #define ARCH_HAS_NOCACHE_UACCESS 1
11709
11710+#define ARCH_HAS_SORT_EXTABLE
11711 #ifdef CONFIG_X86_32
11712 # include "uaccess_32.h"
11713 #else
11714diff -urNp linux-2.6.32.48/arch/x86/include/asm/vdso.h linux-2.6.32.48/arch/x86/include/asm/vdso.h
11715--- linux-2.6.32.48/arch/x86/include/asm/vdso.h 2011-11-08 19:02:43.000000000 -0500
11716+++ linux-2.6.32.48/arch/x86/include/asm/vdso.h 2011-11-15 19:59:42.000000000 -0500
11717@@ -25,7 +25,7 @@ extern const char VDSO32_PRELINK[];
11718 #define VDSO32_SYMBOL(base, name) \
11719 ({ \
11720 extern const char VDSO32_##name[]; \
11721- (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
11722+ (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
11723 })
11724 #endif
11725
11726diff -urNp linux-2.6.32.48/arch/x86/include/asm/vgtod.h linux-2.6.32.48/arch/x86/include/asm/vgtod.h
11727--- linux-2.6.32.48/arch/x86/include/asm/vgtod.h 2011-11-08 19:02:43.000000000 -0500
11728+++ linux-2.6.32.48/arch/x86/include/asm/vgtod.h 2011-11-15 19:59:42.000000000 -0500
11729@@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
11730 int sysctl_enabled;
11731 struct timezone sys_tz;
11732 struct { /* extract of a clocksource struct */
11733+ char name[8];
11734 cycle_t (*vread)(void);
11735 cycle_t cycle_last;
11736 cycle_t mask;
11737diff -urNp linux-2.6.32.48/arch/x86/include/asm/vmi.h linux-2.6.32.48/arch/x86/include/asm/vmi.h
11738--- linux-2.6.32.48/arch/x86/include/asm/vmi.h 2011-11-08 19:02:43.000000000 -0500
11739+++ linux-2.6.32.48/arch/x86/include/asm/vmi.h 2011-11-15 19:59:42.000000000 -0500
11740@@ -191,6 +191,7 @@ struct vrom_header {
11741 u8 reserved[96]; /* Reserved for headers */
11742 char vmi_init[8]; /* VMI_Init jump point */
11743 char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
11744+ char rom_data[8048]; /* rest of the option ROM */
11745 } __attribute__((packed));
11746
11747 struct pnp_header {
11748diff -urNp linux-2.6.32.48/arch/x86/include/asm/vmi_time.h linux-2.6.32.48/arch/x86/include/asm/vmi_time.h
11749--- linux-2.6.32.48/arch/x86/include/asm/vmi_time.h 2011-11-08 19:02:43.000000000 -0500
11750+++ linux-2.6.32.48/arch/x86/include/asm/vmi_time.h 2011-11-15 19:59:42.000000000 -0500
11751@@ -43,7 +43,7 @@ extern struct vmi_timer_ops {
11752 int (*wallclock_updated)(void);
11753 void (*set_alarm)(u32 flags, u64 expiry, u64 period);
11754 void (*cancel_alarm)(u32 flags);
11755-} vmi_timer_ops;
11756+} __no_const vmi_timer_ops;
11757
11758 /* Prototypes */
11759 extern void __init vmi_time_init(void);
11760diff -urNp linux-2.6.32.48/arch/x86/include/asm/vsyscall.h linux-2.6.32.48/arch/x86/include/asm/vsyscall.h
11761--- linux-2.6.32.48/arch/x86/include/asm/vsyscall.h 2011-11-08 19:02:43.000000000 -0500
11762+++ linux-2.6.32.48/arch/x86/include/asm/vsyscall.h 2011-11-15 19:59:42.000000000 -0500
11763@@ -15,9 +15,10 @@ enum vsyscall_num {
11764
11765 #ifdef __KERNEL__
11766 #include <linux/seqlock.h>
11767+#include <linux/getcpu.h>
11768+#include <linux/time.h>
11769
11770 #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
11771-#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
11772
11773 /* Definitions for CONFIG_GENERIC_TIME definitions */
11774 #define __section_vsyscall_gtod_data __attribute__ \
11775@@ -31,7 +32,6 @@ enum vsyscall_num {
11776 #define VGETCPU_LSL 2
11777
11778 extern int __vgetcpu_mode;
11779-extern volatile unsigned long __jiffies;
11780
11781 /* kernel space (writeable) */
11782 extern int vgetcpu_mode;
11783@@ -39,6 +39,9 @@ extern struct timezone sys_tz;
11784
11785 extern void map_vsyscall(void);
11786
11787+extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
11788+extern time_t vtime(time_t *t);
11789+extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
11790 #endif /* __KERNEL__ */
11791
11792 #endif /* _ASM_X86_VSYSCALL_H */
11793diff -urNp linux-2.6.32.48/arch/x86/include/asm/x86_init.h linux-2.6.32.48/arch/x86/include/asm/x86_init.h
11794--- linux-2.6.32.48/arch/x86/include/asm/x86_init.h 2011-11-08 19:02:43.000000000 -0500
11795+++ linux-2.6.32.48/arch/x86/include/asm/x86_init.h 2011-11-15 19:59:42.000000000 -0500
11796@@ -28,7 +28,7 @@ struct x86_init_mpparse {
11797 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
11798 void (*find_smp_config)(unsigned int reserve);
11799 void (*get_smp_config)(unsigned int early);
11800-};
11801+} __no_const;
11802
11803 /**
11804 * struct x86_init_resources - platform specific resource related ops
11805@@ -42,7 +42,7 @@ struct x86_init_resources {
11806 void (*probe_roms)(void);
11807 void (*reserve_resources)(void);
11808 char *(*memory_setup)(void);
11809-};
11810+} __no_const;
11811
11812 /**
11813 * struct x86_init_irqs - platform specific interrupt setup
11814@@ -55,7 +55,7 @@ struct x86_init_irqs {
11815 void (*pre_vector_init)(void);
11816 void (*intr_init)(void);
11817 void (*trap_init)(void);
11818-};
11819+} __no_const;
11820
11821 /**
11822 * struct x86_init_oem - oem platform specific customizing functions
11823@@ -65,7 +65,7 @@ struct x86_init_irqs {
11824 struct x86_init_oem {
11825 void (*arch_setup)(void);
11826 void (*banner)(void);
11827-};
11828+} __no_const;
11829
11830 /**
11831 * struct x86_init_paging - platform specific paging functions
11832@@ -75,7 +75,7 @@ struct x86_init_oem {
11833 struct x86_init_paging {
11834 void (*pagetable_setup_start)(pgd_t *base);
11835 void (*pagetable_setup_done)(pgd_t *base);
11836-};
11837+} __no_const;
11838
11839 /**
11840 * struct x86_init_timers - platform specific timer setup
11841@@ -88,7 +88,7 @@ struct x86_init_timers {
11842 void (*setup_percpu_clockev)(void);
11843 void (*tsc_pre_init)(void);
11844 void (*timer_init)(void);
11845-};
11846+} __no_const;
11847
11848 /**
11849 * struct x86_init_ops - functions for platform specific setup
11850@@ -101,7 +101,7 @@ struct x86_init_ops {
11851 struct x86_init_oem oem;
11852 struct x86_init_paging paging;
11853 struct x86_init_timers timers;
11854-};
11855+} __no_const;
11856
11857 /**
11858 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
11859@@ -109,7 +109,7 @@ struct x86_init_ops {
11860 */
11861 struct x86_cpuinit_ops {
11862 void (*setup_percpu_clockev)(void);
11863-};
11864+} __no_const;
11865
11866 /**
11867 * struct x86_platform_ops - platform specific runtime functions
11868@@ -121,7 +121,7 @@ struct x86_platform_ops {
11869 unsigned long (*calibrate_tsc)(void);
11870 unsigned long (*get_wallclock)(void);
11871 int (*set_wallclock)(unsigned long nowtime);
11872-};
11873+} __no_const;
11874
11875 extern struct x86_init_ops x86_init;
11876 extern struct x86_cpuinit_ops x86_cpuinit;
11877diff -urNp linux-2.6.32.48/arch/x86/include/asm/xsave.h linux-2.6.32.48/arch/x86/include/asm/xsave.h
11878--- linux-2.6.32.48/arch/x86/include/asm/xsave.h 2011-11-08 19:02:43.000000000 -0500
11879+++ linux-2.6.32.48/arch/x86/include/asm/xsave.h 2011-11-15 19:59:42.000000000 -0500
11880@@ -56,6 +56,12 @@ static inline int xrstor_checking(struct
11881 static inline int xsave_user(struct xsave_struct __user *buf)
11882 {
11883 int err;
11884+
11885+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11886+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
11887+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
11888+#endif
11889+
11890 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
11891 "2:\n"
11892 ".section .fixup,\"ax\"\n"
11893@@ -78,10 +84,15 @@ static inline int xsave_user(struct xsav
11894 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
11895 {
11896 int err;
11897- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
11898+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
11899 u32 lmask = mask;
11900 u32 hmask = mask >> 32;
11901
11902+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11903+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
11904+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
11905+#endif
11906+
11907 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
11908 "2:\n"
11909 ".section .fixup,\"ax\"\n"
11910diff -urNp linux-2.6.32.48/arch/x86/Kconfig linux-2.6.32.48/arch/x86/Kconfig
11911--- linux-2.6.32.48/arch/x86/Kconfig 2011-11-08 19:02:43.000000000 -0500
11912+++ linux-2.6.32.48/arch/x86/Kconfig 2011-11-15 19:59:42.000000000 -0500
11913@@ -223,7 +223,7 @@ config X86_TRAMPOLINE
11914
11915 config X86_32_LAZY_GS
11916 def_bool y
11917- depends on X86_32 && !CC_STACKPROTECTOR
11918+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
11919
11920 config KTIME_SCALAR
11921 def_bool X86_32
11922@@ -1008,7 +1008,7 @@ choice
11923
11924 config NOHIGHMEM
11925 bool "off"
11926- depends on !X86_NUMAQ
11927+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11928 ---help---
11929 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
11930 However, the address space of 32-bit x86 processors is only 4
11931@@ -1045,7 +1045,7 @@ config NOHIGHMEM
11932
11933 config HIGHMEM4G
11934 bool "4GB"
11935- depends on !X86_NUMAQ
11936+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11937 ---help---
11938 Select this if you have a 32-bit processor and between 1 and 4
11939 gigabytes of physical RAM.
11940@@ -1099,7 +1099,7 @@ config PAGE_OFFSET
11941 hex
11942 default 0xB0000000 if VMSPLIT_3G_OPT
11943 default 0x80000000 if VMSPLIT_2G
11944- default 0x78000000 if VMSPLIT_2G_OPT
11945+ default 0x70000000 if VMSPLIT_2G_OPT
11946 default 0x40000000 if VMSPLIT_1G
11947 default 0xC0000000
11948 depends on X86_32
11949@@ -1460,6 +1460,7 @@ config SECCOMP
11950
11951 config CC_STACKPROTECTOR
11952 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
11953+ depends on X86_64 || !PAX_MEMORY_UDEREF
11954 ---help---
11955 This option turns on the -fstack-protector GCC feature. This
11956 feature puts, at the beginning of functions, a canary value on
11957@@ -1517,6 +1518,7 @@ config KEXEC_JUMP
11958 config PHYSICAL_START
11959 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
11960 default "0x1000000"
11961+ range 0x400000 0x40000000
11962 ---help---
11963 This gives the physical address where the kernel is loaded.
11964
11965@@ -1581,6 +1583,7 @@ config PHYSICAL_ALIGN
11966 hex
11967 prompt "Alignment value to which kernel should be aligned" if X86_32
11968 default "0x1000000"
11969+ range 0x400000 0x1000000 if PAX_KERNEXEC
11970 range 0x2000 0x1000000
11971 ---help---
11972 This value puts the alignment restrictions on physical address
11973@@ -1612,9 +1615,10 @@ config HOTPLUG_CPU
11974 Say N if you want to disable CPU hotplug.
11975
11976 config COMPAT_VDSO
11977- def_bool y
11978+ def_bool n
11979 prompt "Compat VDSO support"
11980 depends on X86_32 || IA32_EMULATION
11981+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
11982 ---help---
11983 Map the 32-bit VDSO to the predictable old-style address too.
11984 ---help---
11985diff -urNp linux-2.6.32.48/arch/x86/Kconfig.cpu linux-2.6.32.48/arch/x86/Kconfig.cpu
11986--- linux-2.6.32.48/arch/x86/Kconfig.cpu 2011-11-08 19:02:43.000000000 -0500
11987+++ linux-2.6.32.48/arch/x86/Kconfig.cpu 2011-11-15 19:59:42.000000000 -0500
11988@@ -340,7 +340,7 @@ config X86_PPRO_FENCE
11989
11990 config X86_F00F_BUG
11991 def_bool y
11992- depends on M586MMX || M586TSC || M586 || M486 || M386
11993+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
11994
11995 config X86_WP_WORKS_OK
11996 def_bool y
11997@@ -360,7 +360,7 @@ config X86_POPAD_OK
11998
11999 config X86_ALIGNMENT_16
12000 def_bool y
12001- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
12002+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
12003
12004 config X86_INTEL_USERCOPY
12005 def_bool y
12006@@ -406,7 +406,7 @@ config X86_CMPXCHG64
12007 # generates cmov.
12008 config X86_CMOV
12009 def_bool y
12010- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
12011+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
12012
12013 config X86_MINIMUM_CPU_FAMILY
12014 int
12015diff -urNp linux-2.6.32.48/arch/x86/Kconfig.debug linux-2.6.32.48/arch/x86/Kconfig.debug
12016--- linux-2.6.32.48/arch/x86/Kconfig.debug 2011-11-08 19:02:43.000000000 -0500
12017+++ linux-2.6.32.48/arch/x86/Kconfig.debug 2011-11-15 19:59:42.000000000 -0500
12018@@ -99,7 +99,7 @@ config X86_PTDUMP
12019 config DEBUG_RODATA
12020 bool "Write protect kernel read-only data structures"
12021 default y
12022- depends on DEBUG_KERNEL
12023+ depends on DEBUG_KERNEL && BROKEN
12024 ---help---
12025 Mark the kernel read-only data as write-protected in the pagetables,
12026 in order to catch accidental (and incorrect) writes to such const
12027diff -urNp linux-2.6.32.48/arch/x86/kernel/acpi/realmode/Makefile linux-2.6.32.48/arch/x86/kernel/acpi/realmode/Makefile
12028--- linux-2.6.32.48/arch/x86/kernel/acpi/realmode/Makefile 2011-11-08 19:02:43.000000000 -0500
12029+++ linux-2.6.32.48/arch/x86/kernel/acpi/realmode/Makefile 2011-11-15 19:59:42.000000000 -0500
12030@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
12031 $(call cc-option, -fno-stack-protector) \
12032 $(call cc-option, -mpreferred-stack-boundary=2)
12033 KBUILD_CFLAGS += $(call cc-option, -m32)
12034+ifdef CONSTIFY_PLUGIN
12035+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
12036+endif
12037 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
12038 GCOV_PROFILE := n
12039
12040diff -urNp linux-2.6.32.48/arch/x86/kernel/acpi/realmode/wakeup.S linux-2.6.32.48/arch/x86/kernel/acpi/realmode/wakeup.S
12041--- linux-2.6.32.48/arch/x86/kernel/acpi/realmode/wakeup.S 2011-11-08 19:02:43.000000000 -0500
12042+++ linux-2.6.32.48/arch/x86/kernel/acpi/realmode/wakeup.S 2011-11-15 19:59:42.000000000 -0500
12043@@ -91,6 +91,9 @@ _start:
12044 /* Do any other stuff... */
12045
12046 #ifndef CONFIG_64BIT
12047+ /* Recheck NX bit overrides (64bit path does this in trampoline) */
12048+ call verify_cpu
12049+
12050 /* This could also be done in C code... */
12051 movl pmode_cr3, %eax
12052 movl %eax, %cr3
12053@@ -104,7 +107,7 @@ _start:
12054 movl %eax, %ecx
12055 orl %edx, %ecx
12056 jz 1f
12057- movl $0xc0000080, %ecx
12058+ mov $MSR_EFER, %ecx
12059 wrmsr
12060 1:
12061
12062@@ -114,6 +117,7 @@ _start:
12063 movl pmode_cr0, %eax
12064 movl %eax, %cr0
12065 jmp pmode_return
12066+# include "../../verify_cpu.S"
12067 #else
12068 pushw $0
12069 pushw trampoline_segment
12070diff -urNp linux-2.6.32.48/arch/x86/kernel/acpi/sleep.c linux-2.6.32.48/arch/x86/kernel/acpi/sleep.c
12071--- linux-2.6.32.48/arch/x86/kernel/acpi/sleep.c 2011-11-08 19:02:43.000000000 -0500
12072+++ linux-2.6.32.48/arch/x86/kernel/acpi/sleep.c 2011-11-15 19:59:42.000000000 -0500
12073@@ -11,11 +11,12 @@
12074 #include <linux/cpumask.h>
12075 #include <asm/segment.h>
12076 #include <asm/desc.h>
12077+#include <asm/e820.h>
12078
12079 #include "realmode/wakeup.h"
12080 #include "sleep.h"
12081
12082-unsigned long acpi_wakeup_address;
12083+unsigned long acpi_wakeup_address = 0x2000;
12084 unsigned long acpi_realmode_flags;
12085
12086 /* address in low memory of the wakeup routine. */
12087@@ -98,9 +99,13 @@ int acpi_save_state_mem(void)
12088 #else /* CONFIG_64BIT */
12089 header->trampoline_segment = setup_trampoline() >> 4;
12090 #ifdef CONFIG_SMP
12091- stack_start.sp = temp_stack + sizeof(temp_stack);
12092+ stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
12093+
12094+ pax_open_kernel();
12095 early_gdt_descr.address =
12096 (unsigned long)get_cpu_gdt_table(smp_processor_id());
12097+ pax_close_kernel();
12098+
12099 initial_gs = per_cpu_offset(smp_processor_id());
12100 #endif
12101 initial_code = (unsigned long)wakeup_long64;
12102@@ -134,14 +139,8 @@ void __init acpi_reserve_bootmem(void)
12103 return;
12104 }
12105
12106- acpi_realmode = (unsigned long)alloc_bootmem_low(WAKEUP_SIZE);
12107-
12108- if (!acpi_realmode) {
12109- printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
12110- return;
12111- }
12112-
12113- acpi_wakeup_address = virt_to_phys((void *)acpi_realmode);
12114+ reserve_early(acpi_wakeup_address, acpi_wakeup_address + WAKEUP_SIZE, "ACPI Wakeup Code");
12115+ acpi_realmode = (unsigned long)__va(acpi_wakeup_address);;
12116 }
12117
12118
12119diff -urNp linux-2.6.32.48/arch/x86/kernel/acpi/wakeup_32.S linux-2.6.32.48/arch/x86/kernel/acpi/wakeup_32.S
12120--- linux-2.6.32.48/arch/x86/kernel/acpi/wakeup_32.S 2011-11-08 19:02:43.000000000 -0500
12121+++ linux-2.6.32.48/arch/x86/kernel/acpi/wakeup_32.S 2011-11-15 19:59:42.000000000 -0500
12122@@ -30,13 +30,11 @@ wakeup_pmode_return:
12123 # and restore the stack ... but you need gdt for this to work
12124 movl saved_context_esp, %esp
12125
12126- movl %cs:saved_magic, %eax
12127- cmpl $0x12345678, %eax
12128+ cmpl $0x12345678, saved_magic
12129 jne bogus_magic
12130
12131 # jump to place where we left off
12132- movl saved_eip, %eax
12133- jmp *%eax
12134+ jmp *(saved_eip)
12135
12136 bogus_magic:
12137 jmp bogus_magic
12138diff -urNp linux-2.6.32.48/arch/x86/kernel/alternative.c linux-2.6.32.48/arch/x86/kernel/alternative.c
12139--- linux-2.6.32.48/arch/x86/kernel/alternative.c 2011-11-08 19:02:43.000000000 -0500
12140+++ linux-2.6.32.48/arch/x86/kernel/alternative.c 2011-11-15 19:59:42.000000000 -0500
12141@@ -407,7 +407,7 @@ void __init_or_module apply_paravirt(str
12142
12143 BUG_ON(p->len > MAX_PATCH_LEN);
12144 /* prep the buffer with the original instructions */
12145- memcpy(insnbuf, p->instr, p->len);
12146+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
12147 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
12148 (unsigned long)p->instr, p->len);
12149
12150@@ -475,7 +475,7 @@ void __init alternative_instructions(voi
12151 if (smp_alt_once)
12152 free_init_pages("SMP alternatives",
12153 (unsigned long)__smp_locks,
12154- (unsigned long)__smp_locks_end);
12155+ PAGE_ALIGN((unsigned long)__smp_locks_end));
12156
12157 restart_nmi();
12158 }
12159@@ -492,13 +492,17 @@ void __init alternative_instructions(voi
12160 * instructions. And on the local CPU you need to be protected again NMI or MCE
12161 * handlers seeing an inconsistent instruction while you patch.
12162 */
12163-static void *__init_or_module text_poke_early(void *addr, const void *opcode,
12164+static void *__kprobes text_poke_early(void *addr, const void *opcode,
12165 size_t len)
12166 {
12167 unsigned long flags;
12168 local_irq_save(flags);
12169- memcpy(addr, opcode, len);
12170+
12171+ pax_open_kernel();
12172+ memcpy(ktla_ktva(addr), opcode, len);
12173 sync_core();
12174+ pax_close_kernel();
12175+
12176 local_irq_restore(flags);
12177 /* Could also do a CLFLUSH here to speed up CPU recovery; but
12178 that causes hangs on some VIA CPUs. */
12179@@ -520,35 +524,21 @@ static void *__init_or_module text_poke_
12180 */
12181 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
12182 {
12183- unsigned long flags;
12184- char *vaddr;
12185+ unsigned char *vaddr = ktla_ktva(addr);
12186 struct page *pages[2];
12187- int i;
12188+ size_t i;
12189
12190 if (!core_kernel_text((unsigned long)addr)) {
12191- pages[0] = vmalloc_to_page(addr);
12192- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
12193+ pages[0] = vmalloc_to_page(vaddr);
12194+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
12195 } else {
12196- pages[0] = virt_to_page(addr);
12197+ pages[0] = virt_to_page(vaddr);
12198 WARN_ON(!PageReserved(pages[0]));
12199- pages[1] = virt_to_page(addr + PAGE_SIZE);
12200+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
12201 }
12202 BUG_ON(!pages[0]);
12203- local_irq_save(flags);
12204- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
12205- if (pages[1])
12206- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
12207- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
12208- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
12209- clear_fixmap(FIX_TEXT_POKE0);
12210- if (pages[1])
12211- clear_fixmap(FIX_TEXT_POKE1);
12212- local_flush_tlb();
12213- sync_core();
12214- /* Could also do a CLFLUSH here to speed up CPU recovery; but
12215- that causes hangs on some VIA CPUs. */
12216+ text_poke_early(addr, opcode, len);
12217 for (i = 0; i < len; i++)
12218- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
12219- local_irq_restore(flags);
12220+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
12221 return addr;
12222 }
12223diff -urNp linux-2.6.32.48/arch/x86/kernel/amd_iommu.c linux-2.6.32.48/arch/x86/kernel/amd_iommu.c
12224--- linux-2.6.32.48/arch/x86/kernel/amd_iommu.c 2011-11-08 19:02:43.000000000 -0500
12225+++ linux-2.6.32.48/arch/x86/kernel/amd_iommu.c 2011-11-15 19:59:42.000000000 -0500
12226@@ -2076,7 +2076,7 @@ static void prealloc_protection_domains(
12227 }
12228 }
12229
12230-static struct dma_map_ops amd_iommu_dma_ops = {
12231+static const struct dma_map_ops amd_iommu_dma_ops = {
12232 .alloc_coherent = alloc_coherent,
12233 .free_coherent = free_coherent,
12234 .map_page = map_page,
12235diff -urNp linux-2.6.32.48/arch/x86/kernel/apic/apic.c linux-2.6.32.48/arch/x86/kernel/apic/apic.c
12236--- linux-2.6.32.48/arch/x86/kernel/apic/apic.c 2011-11-08 19:02:43.000000000 -0500
12237+++ linux-2.6.32.48/arch/x86/kernel/apic/apic.c 2011-11-15 19:59:42.000000000 -0500
12238@@ -170,7 +170,7 @@ int first_system_vector = 0xfe;
12239 /*
12240 * Debug level, exported for io_apic.c
12241 */
12242-unsigned int apic_verbosity;
12243+int apic_verbosity;
12244
12245 int pic_mode;
12246
12247@@ -1794,7 +1794,7 @@ void smp_error_interrupt(struct pt_regs
12248 apic_write(APIC_ESR, 0);
12249 v1 = apic_read(APIC_ESR);
12250 ack_APIC_irq();
12251- atomic_inc(&irq_err_count);
12252+ atomic_inc_unchecked(&irq_err_count);
12253
12254 /*
12255 * Here is what the APIC error bits mean:
12256@@ -2184,6 +2184,8 @@ static int __cpuinit apic_cluster_num(vo
12257 u16 *bios_cpu_apicid;
12258 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
12259
12260+ pax_track_stack();
12261+
12262 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
12263 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
12264
12265diff -urNp linux-2.6.32.48/arch/x86/kernel/apic/io_apic.c linux-2.6.32.48/arch/x86/kernel/apic/io_apic.c
12266--- linux-2.6.32.48/arch/x86/kernel/apic/io_apic.c 2011-11-08 19:02:43.000000000 -0500
12267+++ linux-2.6.32.48/arch/x86/kernel/apic/io_apic.c 2011-11-15 19:59:42.000000000 -0500
12268@@ -716,7 +716,7 @@ struct IO_APIC_route_entry **alloc_ioapi
12269 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
12270 GFP_ATOMIC);
12271 if (!ioapic_entries)
12272- return 0;
12273+ return NULL;
12274
12275 for (apic = 0; apic < nr_ioapics; apic++) {
12276 ioapic_entries[apic] =
12277@@ -733,7 +733,7 @@ nomem:
12278 kfree(ioapic_entries[apic]);
12279 kfree(ioapic_entries);
12280
12281- return 0;
12282+ return NULL;
12283 }
12284
12285 /*
12286@@ -1150,7 +1150,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
12287 }
12288 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
12289
12290-void lock_vector_lock(void)
12291+void lock_vector_lock(void) __acquires(vector_lock)
12292 {
12293 /* Used to the online set of cpus does not change
12294 * during assign_irq_vector.
12295@@ -1158,7 +1158,7 @@ void lock_vector_lock(void)
12296 spin_lock(&vector_lock);
12297 }
12298
12299-void unlock_vector_lock(void)
12300+void unlock_vector_lock(void) __releases(vector_lock)
12301 {
12302 spin_unlock(&vector_lock);
12303 }
12304@@ -2542,7 +2542,7 @@ static void ack_apic_edge(unsigned int i
12305 ack_APIC_irq();
12306 }
12307
12308-atomic_t irq_mis_count;
12309+atomic_unchecked_t irq_mis_count;
12310
12311 static void ack_apic_level(unsigned int irq)
12312 {
12313@@ -2626,7 +2626,7 @@ static void ack_apic_level(unsigned int
12314
12315 /* Tail end of version 0x11 I/O APIC bug workaround */
12316 if (!(v & (1 << (i & 0x1f)))) {
12317- atomic_inc(&irq_mis_count);
12318+ atomic_inc_unchecked(&irq_mis_count);
12319 spin_lock(&ioapic_lock);
12320 __mask_and_edge_IO_APIC_irq(cfg);
12321 __unmask_and_level_IO_APIC_irq(cfg);
12322diff -urNp linux-2.6.32.48/arch/x86/kernel/apm_32.c linux-2.6.32.48/arch/x86/kernel/apm_32.c
12323--- linux-2.6.32.48/arch/x86/kernel/apm_32.c 2011-11-08 19:02:43.000000000 -0500
12324+++ linux-2.6.32.48/arch/x86/kernel/apm_32.c 2011-11-15 19:59:42.000000000 -0500
12325@@ -410,7 +410,7 @@ static DEFINE_SPINLOCK(user_list_lock);
12326 * This is for buggy BIOS's that refer to (real mode) segment 0x40
12327 * even though they are called in protected mode.
12328 */
12329-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
12330+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
12331 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
12332
12333 static const char driver_version[] = "1.16ac"; /* no spaces */
12334@@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
12335 BUG_ON(cpu != 0);
12336 gdt = get_cpu_gdt_table(cpu);
12337 save_desc_40 = gdt[0x40 / 8];
12338+
12339+ pax_open_kernel();
12340 gdt[0x40 / 8] = bad_bios_desc;
12341+ pax_close_kernel();
12342
12343 apm_irq_save(flags);
12344 APM_DO_SAVE_SEGS;
12345@@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
12346 &call->esi);
12347 APM_DO_RESTORE_SEGS;
12348 apm_irq_restore(flags);
12349+
12350+ pax_open_kernel();
12351 gdt[0x40 / 8] = save_desc_40;
12352+ pax_close_kernel();
12353+
12354 put_cpu();
12355
12356 return call->eax & 0xff;
12357@@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void
12358 BUG_ON(cpu != 0);
12359 gdt = get_cpu_gdt_table(cpu);
12360 save_desc_40 = gdt[0x40 / 8];
12361+
12362+ pax_open_kernel();
12363 gdt[0x40 / 8] = bad_bios_desc;
12364+ pax_close_kernel();
12365
12366 apm_irq_save(flags);
12367 APM_DO_SAVE_SEGS;
12368@@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void
12369 &call->eax);
12370 APM_DO_RESTORE_SEGS;
12371 apm_irq_restore(flags);
12372+
12373+ pax_open_kernel();
12374 gdt[0x40 / 8] = save_desc_40;
12375+ pax_close_kernel();
12376+
12377 put_cpu();
12378 return error;
12379 }
12380@@ -975,7 +989,7 @@ recalc:
12381
12382 static void apm_power_off(void)
12383 {
12384- unsigned char po_bios_call[] = {
12385+ const unsigned char po_bios_call[] = {
12386 0xb8, 0x00, 0x10, /* movw $0x1000,ax */
12387 0x8e, 0xd0, /* movw ax,ss */
12388 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */
12389@@ -2357,12 +2371,15 @@ static int __init apm_init(void)
12390 * code to that CPU.
12391 */
12392 gdt = get_cpu_gdt_table(0);
12393+
12394+ pax_open_kernel();
12395 set_desc_base(&gdt[APM_CS >> 3],
12396 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
12397 set_desc_base(&gdt[APM_CS_16 >> 3],
12398 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
12399 set_desc_base(&gdt[APM_DS >> 3],
12400 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
12401+ pax_close_kernel();
12402
12403 proc_create("apm", 0, NULL, &apm_file_ops);
12404
12405diff -urNp linux-2.6.32.48/arch/x86/kernel/asm-offsets_32.c linux-2.6.32.48/arch/x86/kernel/asm-offsets_32.c
12406--- linux-2.6.32.48/arch/x86/kernel/asm-offsets_32.c 2011-11-08 19:02:43.000000000 -0500
12407+++ linux-2.6.32.48/arch/x86/kernel/asm-offsets_32.c 2011-11-15 19:59:42.000000000 -0500
12408@@ -51,7 +51,6 @@ void foo(void)
12409 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
12410 BLANK();
12411
12412- OFFSET(TI_task, thread_info, task);
12413 OFFSET(TI_exec_domain, thread_info, exec_domain);
12414 OFFSET(TI_flags, thread_info, flags);
12415 OFFSET(TI_status, thread_info, status);
12416@@ -60,6 +59,8 @@ void foo(void)
12417 OFFSET(TI_restart_block, thread_info, restart_block);
12418 OFFSET(TI_sysenter_return, thread_info, sysenter_return);
12419 OFFSET(TI_cpu, thread_info, cpu);
12420+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
12421+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
12422 BLANK();
12423
12424 OFFSET(GDS_size, desc_ptr, size);
12425@@ -99,6 +100,7 @@ void foo(void)
12426
12427 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
12428 DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
12429+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
12430 DEFINE(PTRS_PER_PTE, PTRS_PER_PTE);
12431 DEFINE(PTRS_PER_PMD, PTRS_PER_PMD);
12432 DEFINE(PTRS_PER_PGD, PTRS_PER_PGD);
12433@@ -115,6 +117,11 @@ void foo(void)
12434 OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
12435 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
12436 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
12437+
12438+#ifdef CONFIG_PAX_KERNEXEC
12439+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
12440+#endif
12441+
12442 #endif
12443
12444 #ifdef CONFIG_XEN
12445diff -urNp linux-2.6.32.48/arch/x86/kernel/asm-offsets_64.c linux-2.6.32.48/arch/x86/kernel/asm-offsets_64.c
12446--- linux-2.6.32.48/arch/x86/kernel/asm-offsets_64.c 2011-11-08 19:02:43.000000000 -0500
12447+++ linux-2.6.32.48/arch/x86/kernel/asm-offsets_64.c 2011-11-15 19:59:42.000000000 -0500
12448@@ -44,6 +44,8 @@ int main(void)
12449 ENTRY(addr_limit);
12450 ENTRY(preempt_count);
12451 ENTRY(status);
12452+ ENTRY(lowest_stack);
12453+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
12454 #ifdef CONFIG_IA32_EMULATION
12455 ENTRY(sysenter_return);
12456 #endif
12457@@ -63,6 +65,18 @@ int main(void)
12458 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
12459 OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
12460 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
12461+
12462+#ifdef CONFIG_PAX_KERNEXEC
12463+ OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
12464+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
12465+#endif
12466+
12467+#ifdef CONFIG_PAX_MEMORY_UDEREF
12468+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
12469+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
12470+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
12471+#endif
12472+
12473 #endif
12474
12475
12476@@ -115,6 +129,7 @@ int main(void)
12477 ENTRY(cr8);
12478 BLANK();
12479 #undef ENTRY
12480+ DEFINE(TSS_size, sizeof(struct tss_struct));
12481 DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
12482 BLANK();
12483 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
12484@@ -130,6 +145,7 @@ int main(void)
12485
12486 BLANK();
12487 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
12488+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
12489 #ifdef CONFIG_XEN
12490 BLANK();
12491 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
12492diff -urNp linux-2.6.32.48/arch/x86/kernel/cpu/amd.c linux-2.6.32.48/arch/x86/kernel/cpu/amd.c
12493--- linux-2.6.32.48/arch/x86/kernel/cpu/amd.c 2011-11-08 19:02:43.000000000 -0500
12494+++ linux-2.6.32.48/arch/x86/kernel/cpu/amd.c 2011-11-15 19:59:42.000000000 -0500
12495@@ -602,7 +602,7 @@ static unsigned int __cpuinit amd_size_c
12496 unsigned int size)
12497 {
12498 /* AMD errata T13 (order #21922) */
12499- if ((c->x86 == 6)) {
12500+ if (c->x86 == 6) {
12501 /* Duron Rev A0 */
12502 if (c->x86_model == 3 && c->x86_mask == 0)
12503 size = 64;
12504diff -urNp linux-2.6.32.48/arch/x86/kernel/cpu/common.c linux-2.6.32.48/arch/x86/kernel/cpu/common.c
12505--- linux-2.6.32.48/arch/x86/kernel/cpu/common.c 2011-11-08 19:02:43.000000000 -0500
12506+++ linux-2.6.32.48/arch/x86/kernel/cpu/common.c 2011-11-15 19:59:42.000000000 -0500
12507@@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
12508
12509 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
12510
12511-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
12512-#ifdef CONFIG_X86_64
12513- /*
12514- * We need valid kernel segments for data and code in long mode too
12515- * IRET will check the segment types kkeil 2000/10/28
12516- * Also sysret mandates a special GDT layout
12517- *
12518- * TLS descriptors are currently at a different place compared to i386.
12519- * Hopefully nobody expects them at a fixed place (Wine?)
12520- */
12521- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
12522- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
12523- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
12524- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
12525- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
12526- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
12527-#else
12528- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
12529- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12530- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
12531- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
12532- /*
12533- * Segments used for calling PnP BIOS have byte granularity.
12534- * They code segments and data segments have fixed 64k limits,
12535- * the transfer segment sizes are set at run time.
12536- */
12537- /* 32-bit code */
12538- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
12539- /* 16-bit code */
12540- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
12541- /* 16-bit data */
12542- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
12543- /* 16-bit data */
12544- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
12545- /* 16-bit data */
12546- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
12547- /*
12548- * The APM segments have byte granularity and their bases
12549- * are set at run time. All have 64k limits.
12550- */
12551- /* 32-bit code */
12552- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
12553- /* 16-bit code */
12554- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
12555- /* data */
12556- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
12557-
12558- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12559- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12560- GDT_STACK_CANARY_INIT
12561-#endif
12562-} };
12563-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
12564-
12565 static int __init x86_xsave_setup(char *s)
12566 {
12567 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
12568@@ -344,7 +290,7 @@ void switch_to_new_gdt(int cpu)
12569 {
12570 struct desc_ptr gdt_descr;
12571
12572- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
12573+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
12574 gdt_descr.size = GDT_SIZE - 1;
12575 load_gdt(&gdt_descr);
12576 /* Reload the per-cpu base */
12577@@ -798,6 +744,10 @@ static void __cpuinit identify_cpu(struc
12578 /* Filter out anything that depends on CPUID levels we don't have */
12579 filter_cpuid_features(c, true);
12580
12581+#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
12582+ setup_clear_cpu_cap(X86_FEATURE_SEP);
12583+#endif
12584+
12585 /* If the model name is still unset, do table lookup. */
12586 if (!c->x86_model_id[0]) {
12587 const char *p;
12588@@ -980,6 +930,9 @@ static __init int setup_disablecpuid(cha
12589 }
12590 __setup("clearcpuid=", setup_disablecpuid);
12591
12592+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
12593+EXPORT_PER_CPU_SYMBOL(current_tinfo);
12594+
12595 #ifdef CONFIG_X86_64
12596 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
12597
12598@@ -995,7 +948,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
12599 EXPORT_PER_CPU_SYMBOL(current_task);
12600
12601 DEFINE_PER_CPU(unsigned long, kernel_stack) =
12602- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
12603+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
12604 EXPORT_PER_CPU_SYMBOL(kernel_stack);
12605
12606 DEFINE_PER_CPU(char *, irq_stack_ptr) =
12607@@ -1060,7 +1013,7 @@ struct pt_regs * __cpuinit idle_regs(str
12608 {
12609 memset(regs, 0, sizeof(struct pt_regs));
12610 regs->fs = __KERNEL_PERCPU;
12611- regs->gs = __KERNEL_STACK_CANARY;
12612+ savesegment(gs, regs->gs);
12613
12614 return regs;
12615 }
12616@@ -1101,7 +1054,7 @@ void __cpuinit cpu_init(void)
12617 int i;
12618
12619 cpu = stack_smp_processor_id();
12620- t = &per_cpu(init_tss, cpu);
12621+ t = init_tss + cpu;
12622 orig_ist = &per_cpu(orig_ist, cpu);
12623
12624 #ifdef CONFIG_NUMA
12625@@ -1127,7 +1080,7 @@ void __cpuinit cpu_init(void)
12626 switch_to_new_gdt(cpu);
12627 loadsegment(fs, 0);
12628
12629- load_idt((const struct desc_ptr *)&idt_descr);
12630+ load_idt(&idt_descr);
12631
12632 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
12633 syscall_init();
12634@@ -1136,7 +1089,6 @@ void __cpuinit cpu_init(void)
12635 wrmsrl(MSR_KERNEL_GS_BASE, 0);
12636 barrier();
12637
12638- check_efer();
12639 if (cpu != 0)
12640 enable_x2apic();
12641
12642@@ -1199,7 +1151,7 @@ void __cpuinit cpu_init(void)
12643 {
12644 int cpu = smp_processor_id();
12645 struct task_struct *curr = current;
12646- struct tss_struct *t = &per_cpu(init_tss, cpu);
12647+ struct tss_struct *t = init_tss + cpu;
12648 struct thread_struct *thread = &curr->thread;
12649
12650 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
12651diff -urNp linux-2.6.32.48/arch/x86/kernel/cpu/intel.c linux-2.6.32.48/arch/x86/kernel/cpu/intel.c
12652--- linux-2.6.32.48/arch/x86/kernel/cpu/intel.c 2011-11-08 19:02:43.000000000 -0500
12653+++ linux-2.6.32.48/arch/x86/kernel/cpu/intel.c 2011-11-15 19:59:42.000000000 -0500
12654@@ -162,7 +162,7 @@ static void __cpuinit trap_init_f00f_bug
12655 * Update the IDT descriptor and reload the IDT so that
12656 * it uses the read-only mapped virtual address.
12657 */
12658- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
12659+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
12660 load_idt(&idt_descr);
12661 }
12662 #endif
12663diff -urNp linux-2.6.32.48/arch/x86/kernel/cpu/intel_cacheinfo.c linux-2.6.32.48/arch/x86/kernel/cpu/intel_cacheinfo.c
12664--- linux-2.6.32.48/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-11-08 19:02:43.000000000 -0500
12665+++ linux-2.6.32.48/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-11-15 19:59:42.000000000 -0500
12666@@ -921,7 +921,7 @@ static ssize_t store(struct kobject *kob
12667 return ret;
12668 }
12669
12670-static struct sysfs_ops sysfs_ops = {
12671+static const struct sysfs_ops sysfs_ops = {
12672 .show = show,
12673 .store = store,
12674 };
12675diff -urNp linux-2.6.32.48/arch/x86/kernel/cpu/Makefile linux-2.6.32.48/arch/x86/kernel/cpu/Makefile
12676--- linux-2.6.32.48/arch/x86/kernel/cpu/Makefile 2011-11-08 19:02:43.000000000 -0500
12677+++ linux-2.6.32.48/arch/x86/kernel/cpu/Makefile 2011-11-15 19:59:42.000000000 -0500
12678@@ -7,10 +7,6 @@ ifdef CONFIG_FUNCTION_TRACER
12679 CFLAGS_REMOVE_common.o = -pg
12680 endif
12681
12682-# Make sure load_percpu_segment has no stackprotector
12683-nostackp := $(call cc-option, -fno-stack-protector)
12684-CFLAGS_common.o := $(nostackp)
12685-
12686 obj-y := intel_cacheinfo.o addon_cpuid_features.o
12687 obj-y += proc.o capflags.o powerflags.o common.o
12688 obj-y += vmware.o hypervisor.o sched.o
12689diff -urNp linux-2.6.32.48/arch/x86/kernel/cpu/mcheck/mce_amd.c linux-2.6.32.48/arch/x86/kernel/cpu/mcheck/mce_amd.c
12690--- linux-2.6.32.48/arch/x86/kernel/cpu/mcheck/mce_amd.c 2011-11-08 19:02:43.000000000 -0500
12691+++ linux-2.6.32.48/arch/x86/kernel/cpu/mcheck/mce_amd.c 2011-11-15 19:59:42.000000000 -0500
12692@@ -385,7 +385,7 @@ static ssize_t store(struct kobject *kob
12693 return ret;
12694 }
12695
12696-static struct sysfs_ops threshold_ops = {
12697+static const struct sysfs_ops threshold_ops = {
12698 .show = show,
12699 .store = store,
12700 };
12701diff -urNp linux-2.6.32.48/arch/x86/kernel/cpu/mcheck/mce.c linux-2.6.32.48/arch/x86/kernel/cpu/mcheck/mce.c
12702--- linux-2.6.32.48/arch/x86/kernel/cpu/mcheck/mce.c 2011-11-08 19:02:43.000000000 -0500
12703+++ linux-2.6.32.48/arch/x86/kernel/cpu/mcheck/mce.c 2011-11-15 19:59:42.000000000 -0500
12704@@ -43,6 +43,7 @@
12705 #include <asm/ipi.h>
12706 #include <asm/mce.h>
12707 #include <asm/msr.h>
12708+#include <asm/local.h>
12709
12710 #include "mce-internal.h"
12711
12712@@ -187,7 +188,7 @@ static void print_mce(struct mce *m)
12713 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
12714 m->cs, m->ip);
12715
12716- if (m->cs == __KERNEL_CS)
12717+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
12718 print_symbol("{%s}", m->ip);
12719 pr_cont("\n");
12720 }
12721@@ -221,10 +222,10 @@ static void print_mce_tail(void)
12722
12723 #define PANIC_TIMEOUT 5 /* 5 seconds */
12724
12725-static atomic_t mce_paniced;
12726+static atomic_unchecked_t mce_paniced;
12727
12728 static int fake_panic;
12729-static atomic_t mce_fake_paniced;
12730+static atomic_unchecked_t mce_fake_paniced;
12731
12732 /* Panic in progress. Enable interrupts and wait for final IPI */
12733 static void wait_for_panic(void)
12734@@ -248,7 +249,7 @@ static void mce_panic(char *msg, struct
12735 /*
12736 * Make sure only one CPU runs in machine check panic
12737 */
12738- if (atomic_inc_return(&mce_paniced) > 1)
12739+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
12740 wait_for_panic();
12741 barrier();
12742
12743@@ -256,7 +257,7 @@ static void mce_panic(char *msg, struct
12744 console_verbose();
12745 } else {
12746 /* Don't log too much for fake panic */
12747- if (atomic_inc_return(&mce_fake_paniced) > 1)
12748+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
12749 return;
12750 }
12751 print_mce_head();
12752@@ -616,7 +617,7 @@ static int mce_timed_out(u64 *t)
12753 * might have been modified by someone else.
12754 */
12755 rmb();
12756- if (atomic_read(&mce_paniced))
12757+ if (atomic_read_unchecked(&mce_paniced))
12758 wait_for_panic();
12759 if (!monarch_timeout)
12760 goto out;
12761@@ -1429,14 +1430,14 @@ void __cpuinit mcheck_init(struct cpuinf
12762 */
12763
12764 static DEFINE_SPINLOCK(mce_state_lock);
12765-static int open_count; /* #times opened */
12766+static local_t open_count; /* #times opened */
12767 static int open_exclu; /* already open exclusive? */
12768
12769 static int mce_open(struct inode *inode, struct file *file)
12770 {
12771 spin_lock(&mce_state_lock);
12772
12773- if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
12774+ if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
12775 spin_unlock(&mce_state_lock);
12776
12777 return -EBUSY;
12778@@ -1444,7 +1445,7 @@ static int mce_open(struct inode *inode,
12779
12780 if (file->f_flags & O_EXCL)
12781 open_exclu = 1;
12782- open_count++;
12783+ local_inc(&open_count);
12784
12785 spin_unlock(&mce_state_lock);
12786
12787@@ -1455,7 +1456,7 @@ static int mce_release(struct inode *ino
12788 {
12789 spin_lock(&mce_state_lock);
12790
12791- open_count--;
12792+ local_dec(&open_count);
12793 open_exclu = 0;
12794
12795 spin_unlock(&mce_state_lock);
12796@@ -2082,7 +2083,7 @@ struct dentry *mce_get_debugfs_dir(void)
12797 static void mce_reset(void)
12798 {
12799 cpu_missing = 0;
12800- atomic_set(&mce_fake_paniced, 0);
12801+ atomic_set_unchecked(&mce_fake_paniced, 0);
12802 atomic_set(&mce_executing, 0);
12803 atomic_set(&mce_callin, 0);
12804 atomic_set(&global_nwo, 0);
12805diff -urNp linux-2.6.32.48/arch/x86/kernel/cpu/mcheck/mce-inject.c linux-2.6.32.48/arch/x86/kernel/cpu/mcheck/mce-inject.c
12806--- linux-2.6.32.48/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-11-08 19:02:43.000000000 -0500
12807+++ linux-2.6.32.48/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-11-15 19:59:42.000000000 -0500
12808@@ -211,7 +211,9 @@ static ssize_t mce_write(struct file *fi
12809 static int inject_init(void)
12810 {
12811 printk(KERN_INFO "Machine check injector initialized\n");
12812- mce_chrdev_ops.write = mce_write;
12813+ pax_open_kernel();
12814+ *(void **)&mce_chrdev_ops.write = mce_write;
12815+ pax_close_kernel();
12816 register_die_notifier(&mce_raise_nb);
12817 return 0;
12818 }
12819diff -urNp linux-2.6.32.48/arch/x86/kernel/cpu/mtrr/amd.c linux-2.6.32.48/arch/x86/kernel/cpu/mtrr/amd.c
12820--- linux-2.6.32.48/arch/x86/kernel/cpu/mtrr/amd.c 2011-11-08 19:02:43.000000000 -0500
12821+++ linux-2.6.32.48/arch/x86/kernel/cpu/mtrr/amd.c 2011-11-15 19:59:42.000000000 -0500
12822@@ -108,7 +108,7 @@ amd_validate_add_page(unsigned long base
12823 return 0;
12824 }
12825
12826-static struct mtrr_ops amd_mtrr_ops = {
12827+static const struct mtrr_ops amd_mtrr_ops = {
12828 .vendor = X86_VENDOR_AMD,
12829 .set = amd_set_mtrr,
12830 .get = amd_get_mtrr,
12831diff -urNp linux-2.6.32.48/arch/x86/kernel/cpu/mtrr/centaur.c linux-2.6.32.48/arch/x86/kernel/cpu/mtrr/centaur.c
12832--- linux-2.6.32.48/arch/x86/kernel/cpu/mtrr/centaur.c 2011-11-08 19:02:43.000000000 -0500
12833+++ linux-2.6.32.48/arch/x86/kernel/cpu/mtrr/centaur.c 2011-11-15 19:59:42.000000000 -0500
12834@@ -110,7 +110,7 @@ centaur_validate_add_page(unsigned long
12835 return 0;
12836 }
12837
12838-static struct mtrr_ops centaur_mtrr_ops = {
12839+static const struct mtrr_ops centaur_mtrr_ops = {
12840 .vendor = X86_VENDOR_CENTAUR,
12841 .set = centaur_set_mcr,
12842 .get = centaur_get_mcr,
12843diff -urNp linux-2.6.32.48/arch/x86/kernel/cpu/mtrr/cyrix.c linux-2.6.32.48/arch/x86/kernel/cpu/mtrr/cyrix.c
12844--- linux-2.6.32.48/arch/x86/kernel/cpu/mtrr/cyrix.c 2011-11-08 19:02:43.000000000 -0500
12845+++ linux-2.6.32.48/arch/x86/kernel/cpu/mtrr/cyrix.c 2011-11-15 19:59:42.000000000 -0500
12846@@ -265,7 +265,7 @@ static void cyrix_set_all(void)
12847 post_set();
12848 }
12849
12850-static struct mtrr_ops cyrix_mtrr_ops = {
12851+static const struct mtrr_ops cyrix_mtrr_ops = {
12852 .vendor = X86_VENDOR_CYRIX,
12853 .set_all = cyrix_set_all,
12854 .set = cyrix_set_arr,
12855diff -urNp linux-2.6.32.48/arch/x86/kernel/cpu/mtrr/generic.c linux-2.6.32.48/arch/x86/kernel/cpu/mtrr/generic.c
12856--- linux-2.6.32.48/arch/x86/kernel/cpu/mtrr/generic.c 2011-11-08 19:02:43.000000000 -0500
12857+++ linux-2.6.32.48/arch/x86/kernel/cpu/mtrr/generic.c 2011-11-15 19:59:42.000000000 -0500
12858@@ -752,7 +752,7 @@ int positive_have_wrcomb(void)
12859 /*
12860 * Generic structure...
12861 */
12862-struct mtrr_ops generic_mtrr_ops = {
12863+const struct mtrr_ops generic_mtrr_ops = {
12864 .use_intel_if = 1,
12865 .set_all = generic_set_all,
12866 .get = generic_get_mtrr,
12867diff -urNp linux-2.6.32.48/arch/x86/kernel/cpu/mtrr/main.c linux-2.6.32.48/arch/x86/kernel/cpu/mtrr/main.c
12868--- linux-2.6.32.48/arch/x86/kernel/cpu/mtrr/main.c 2011-11-08 19:02:43.000000000 -0500
12869+++ linux-2.6.32.48/arch/x86/kernel/cpu/mtrr/main.c 2011-11-15 19:59:42.000000000 -0500
12870@@ -60,14 +60,14 @@ static DEFINE_MUTEX(mtrr_mutex);
12871 u64 size_or_mask, size_and_mask;
12872 static bool mtrr_aps_delayed_init;
12873
12874-static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
12875+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
12876
12877-struct mtrr_ops *mtrr_if;
12878+const struct mtrr_ops *mtrr_if;
12879
12880 static void set_mtrr(unsigned int reg, unsigned long base,
12881 unsigned long size, mtrr_type type);
12882
12883-void set_mtrr_ops(struct mtrr_ops *ops)
12884+void set_mtrr_ops(const struct mtrr_ops *ops)
12885 {
12886 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
12887 mtrr_ops[ops->vendor] = ops;
12888diff -urNp linux-2.6.32.48/arch/x86/kernel/cpu/mtrr/mtrr.h linux-2.6.32.48/arch/x86/kernel/cpu/mtrr/mtrr.h
12889--- linux-2.6.32.48/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-11-08 19:02:43.000000000 -0500
12890+++ linux-2.6.32.48/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-11-15 19:59:42.000000000 -0500
12891@@ -25,14 +25,14 @@ struct mtrr_ops {
12892 int (*validate_add_page)(unsigned long base, unsigned long size,
12893 unsigned int type);
12894 int (*have_wrcomb)(void);
12895-};
12896+} __do_const;
12897
12898 extern int generic_get_free_region(unsigned long base, unsigned long size,
12899 int replace_reg);
12900 extern int generic_validate_add_page(unsigned long base, unsigned long size,
12901 unsigned int type);
12902
12903-extern struct mtrr_ops generic_mtrr_ops;
12904+extern const struct mtrr_ops generic_mtrr_ops;
12905
12906 extern int positive_have_wrcomb(void);
12907
12908@@ -53,10 +53,10 @@ void fill_mtrr_var_range(unsigned int in
12909 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
12910 void get_mtrr_state(void);
12911
12912-extern void set_mtrr_ops(struct mtrr_ops *ops);
12913+extern void set_mtrr_ops(const struct mtrr_ops *ops);
12914
12915 extern u64 size_or_mask, size_and_mask;
12916-extern struct mtrr_ops *mtrr_if;
12917+extern const struct mtrr_ops *mtrr_if;
12918
12919 #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
12920 #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
12921diff -urNp linux-2.6.32.48/arch/x86/kernel/cpu/perfctr-watchdog.c linux-2.6.32.48/arch/x86/kernel/cpu/perfctr-watchdog.c
12922--- linux-2.6.32.48/arch/x86/kernel/cpu/perfctr-watchdog.c 2011-11-08 19:02:43.000000000 -0500
12923+++ linux-2.6.32.48/arch/x86/kernel/cpu/perfctr-watchdog.c 2011-11-15 19:59:42.000000000 -0500
12924@@ -30,11 +30,11 @@ struct nmi_watchdog_ctlblk {
12925
12926 /* Interface defining a CPU specific perfctr watchdog */
12927 struct wd_ops {
12928- int (*reserve)(void);
12929- void (*unreserve)(void);
12930- int (*setup)(unsigned nmi_hz);
12931- void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
12932- void (*stop)(void);
12933+ int (* const reserve)(void);
12934+ void (* const unreserve)(void);
12935+ int (* const setup)(unsigned nmi_hz);
12936+ void (* const rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
12937+ void (* const stop)(void);
12938 unsigned perfctr;
12939 unsigned evntsel;
12940 u64 checkbit;
12941@@ -645,6 +645,7 @@ static const struct wd_ops p4_wd_ops = {
12942 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
12943 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
12944
12945+/* cannot be const */
12946 static struct wd_ops intel_arch_wd_ops;
12947
12948 static int setup_intel_arch_watchdog(unsigned nmi_hz)
12949@@ -697,6 +698,7 @@ static int setup_intel_arch_watchdog(uns
12950 return 1;
12951 }
12952
12953+/* cannot be const */
12954 static struct wd_ops intel_arch_wd_ops __read_mostly = {
12955 .reserve = single_msr_reserve,
12956 .unreserve = single_msr_unreserve,
12957diff -urNp linux-2.6.32.48/arch/x86/kernel/cpu/perf_event.c linux-2.6.32.48/arch/x86/kernel/cpu/perf_event.c
12958--- linux-2.6.32.48/arch/x86/kernel/cpu/perf_event.c 2011-11-08 19:02:43.000000000 -0500
12959+++ linux-2.6.32.48/arch/x86/kernel/cpu/perf_event.c 2011-11-15 19:59:42.000000000 -0500
12960@@ -723,10 +723,10 @@ x86_perf_event_update(struct perf_event
12961 * count to the generic event atomically:
12962 */
12963 again:
12964- prev_raw_count = atomic64_read(&hwc->prev_count);
12965+ prev_raw_count = atomic64_read_unchecked(&hwc->prev_count);
12966 rdmsrl(hwc->event_base + idx, new_raw_count);
12967
12968- if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
12969+ if (atomic64_cmpxchg_unchecked(&hwc->prev_count, prev_raw_count,
12970 new_raw_count) != prev_raw_count)
12971 goto again;
12972
12973@@ -741,7 +741,7 @@ again:
12974 delta = (new_raw_count << shift) - (prev_raw_count << shift);
12975 delta >>= shift;
12976
12977- atomic64_add(delta, &event->count);
12978+ atomic64_add_unchecked(delta, &event->count);
12979 atomic64_sub(delta, &hwc->period_left);
12980
12981 return new_raw_count;
12982@@ -1353,7 +1353,7 @@ x86_perf_event_set_period(struct perf_ev
12983 * The hw event starts counting from this event offset,
12984 * mark it to be able to extra future deltas:
12985 */
12986- atomic64_set(&hwc->prev_count, (u64)-left);
12987+ atomic64_set_unchecked(&hwc->prev_count, (u64)-left);
12988
12989 err = checking_wrmsrl(hwc->event_base + idx,
12990 (u64)(-left) & x86_pmu.event_mask);
12991@@ -2357,7 +2357,7 @@ perf_callchain_user(struct pt_regs *regs
12992 break;
12993
12994 callchain_store(entry, frame.return_address);
12995- fp = frame.next_frame;
12996+ fp = (__force const void __user *)frame.next_frame;
12997 }
12998 }
12999
13000diff -urNp linux-2.6.32.48/arch/x86/kernel/crash.c linux-2.6.32.48/arch/x86/kernel/crash.c
13001--- linux-2.6.32.48/arch/x86/kernel/crash.c 2011-11-08 19:02:43.000000000 -0500
13002+++ linux-2.6.32.48/arch/x86/kernel/crash.c 2011-11-15 19:59:42.000000000 -0500
13003@@ -41,7 +41,7 @@ static void kdump_nmi_callback(int cpu,
13004 regs = args->regs;
13005
13006 #ifdef CONFIG_X86_32
13007- if (!user_mode_vm(regs)) {
13008+ if (!user_mode(regs)) {
13009 crash_fixup_ss_esp(&fixed_regs, regs);
13010 regs = &fixed_regs;
13011 }
13012diff -urNp linux-2.6.32.48/arch/x86/kernel/doublefault_32.c linux-2.6.32.48/arch/x86/kernel/doublefault_32.c
13013--- linux-2.6.32.48/arch/x86/kernel/doublefault_32.c 2011-11-08 19:02:43.000000000 -0500
13014+++ linux-2.6.32.48/arch/x86/kernel/doublefault_32.c 2011-11-15 19:59:42.000000000 -0500
13015@@ -11,7 +11,7 @@
13016
13017 #define DOUBLEFAULT_STACKSIZE (1024)
13018 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
13019-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
13020+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
13021
13022 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
13023
13024@@ -21,7 +21,7 @@ static void doublefault_fn(void)
13025 unsigned long gdt, tss;
13026
13027 store_gdt(&gdt_desc);
13028- gdt = gdt_desc.address;
13029+ gdt = (unsigned long)gdt_desc.address;
13030
13031 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
13032
13033@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
13034 /* 0x2 bit is always set */
13035 .flags = X86_EFLAGS_SF | 0x2,
13036 .sp = STACK_START,
13037- .es = __USER_DS,
13038+ .es = __KERNEL_DS,
13039 .cs = __KERNEL_CS,
13040 .ss = __KERNEL_DS,
13041- .ds = __USER_DS,
13042+ .ds = __KERNEL_DS,
13043 .fs = __KERNEL_PERCPU,
13044
13045 .__cr3 = __pa_nodebug(swapper_pg_dir),
13046diff -urNp linux-2.6.32.48/arch/x86/kernel/dumpstack_32.c linux-2.6.32.48/arch/x86/kernel/dumpstack_32.c
13047--- linux-2.6.32.48/arch/x86/kernel/dumpstack_32.c 2011-11-08 19:02:43.000000000 -0500
13048+++ linux-2.6.32.48/arch/x86/kernel/dumpstack_32.c 2011-11-15 19:59:42.000000000 -0500
13049@@ -53,16 +53,12 @@ void dump_trace(struct task_struct *task
13050 #endif
13051
13052 for (;;) {
13053- struct thread_info *context;
13054+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
13055+ bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
13056
13057- context = (struct thread_info *)
13058- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
13059- bp = print_context_stack(context, stack, bp, ops,
13060- data, NULL, &graph);
13061-
13062- stack = (unsigned long *)context->previous_esp;
13063- if (!stack)
13064+ if (stack_start == task_stack_page(task))
13065 break;
13066+ stack = *(unsigned long **)stack_start;
13067 if (ops->stack(data, "IRQ") < 0)
13068 break;
13069 touch_nmi_watchdog();
13070@@ -112,11 +108,12 @@ void show_registers(struct pt_regs *regs
13071 * When in-kernel, we also print out the stack and code at the
13072 * time of the fault..
13073 */
13074- if (!user_mode_vm(regs)) {
13075+ if (!user_mode(regs)) {
13076 unsigned int code_prologue = code_bytes * 43 / 64;
13077 unsigned int code_len = code_bytes;
13078 unsigned char c;
13079 u8 *ip;
13080+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
13081
13082 printk(KERN_EMERG "Stack:\n");
13083 show_stack_log_lvl(NULL, regs, &regs->sp,
13084@@ -124,10 +121,10 @@ void show_registers(struct pt_regs *regs
13085
13086 printk(KERN_EMERG "Code: ");
13087
13088- ip = (u8 *)regs->ip - code_prologue;
13089+ ip = (u8 *)regs->ip - code_prologue + cs_base;
13090 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
13091 /* try starting at IP */
13092- ip = (u8 *)regs->ip;
13093+ ip = (u8 *)regs->ip + cs_base;
13094 code_len = code_len - code_prologue + 1;
13095 }
13096 for (i = 0; i < code_len; i++, ip++) {
13097@@ -136,7 +133,7 @@ void show_registers(struct pt_regs *regs
13098 printk(" Bad EIP value.");
13099 break;
13100 }
13101- if (ip == (u8 *)regs->ip)
13102+ if (ip == (u8 *)regs->ip + cs_base)
13103 printk("<%02x> ", c);
13104 else
13105 printk("%02x ", c);
13106@@ -149,6 +146,7 @@ int is_valid_bugaddr(unsigned long ip)
13107 {
13108 unsigned short ud2;
13109
13110+ ip = ktla_ktva(ip);
13111 if (ip < PAGE_OFFSET)
13112 return 0;
13113 if (probe_kernel_address((unsigned short *)ip, ud2))
13114diff -urNp linux-2.6.32.48/arch/x86/kernel/dumpstack_64.c linux-2.6.32.48/arch/x86/kernel/dumpstack_64.c
13115--- linux-2.6.32.48/arch/x86/kernel/dumpstack_64.c 2011-11-08 19:02:43.000000000 -0500
13116+++ linux-2.6.32.48/arch/x86/kernel/dumpstack_64.c 2011-11-15 19:59:43.000000000 -0500
13117@@ -116,8 +116,8 @@ void dump_trace(struct task_struct *task
13118 unsigned long *irq_stack_end =
13119 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
13120 unsigned used = 0;
13121- struct thread_info *tinfo;
13122 int graph = 0;
13123+ void *stack_start;
13124
13125 if (!task)
13126 task = current;
13127@@ -146,10 +146,10 @@ void dump_trace(struct task_struct *task
13128 * current stack address. If the stacks consist of nested
13129 * exceptions
13130 */
13131- tinfo = task_thread_info(task);
13132 for (;;) {
13133 char *id;
13134 unsigned long *estack_end;
13135+
13136 estack_end = in_exception_stack(cpu, (unsigned long)stack,
13137 &used, &id);
13138
13139@@ -157,7 +157,7 @@ void dump_trace(struct task_struct *task
13140 if (ops->stack(data, id) < 0)
13141 break;
13142
13143- bp = print_context_stack(tinfo, stack, bp, ops,
13144+ bp = print_context_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
13145 data, estack_end, &graph);
13146 ops->stack(data, "<EOE>");
13147 /*
13148@@ -176,7 +176,7 @@ void dump_trace(struct task_struct *task
13149 if (stack >= irq_stack && stack < irq_stack_end) {
13150 if (ops->stack(data, "IRQ") < 0)
13151 break;
13152- bp = print_context_stack(tinfo, stack, bp,
13153+ bp = print_context_stack(task, irq_stack, stack, bp,
13154 ops, data, irq_stack_end, &graph);
13155 /*
13156 * We link to the next stack (which would be
13157@@ -195,7 +195,8 @@ void dump_trace(struct task_struct *task
13158 /*
13159 * This handles the process stack:
13160 */
13161- bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph);
13162+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
13163+ bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
13164 put_cpu();
13165 }
13166 EXPORT_SYMBOL(dump_trace);
13167diff -urNp linux-2.6.32.48/arch/x86/kernel/dumpstack.c linux-2.6.32.48/arch/x86/kernel/dumpstack.c
13168--- linux-2.6.32.48/arch/x86/kernel/dumpstack.c 2011-11-08 19:02:43.000000000 -0500
13169+++ linux-2.6.32.48/arch/x86/kernel/dumpstack.c 2011-11-15 19:59:43.000000000 -0500
13170@@ -2,6 +2,9 @@
13171 * Copyright (C) 1991, 1992 Linus Torvalds
13172 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
13173 */
13174+#ifdef CONFIG_GRKERNSEC_HIDESYM
13175+#define __INCLUDED_BY_HIDESYM 1
13176+#endif
13177 #include <linux/kallsyms.h>
13178 #include <linux/kprobes.h>
13179 #include <linux/uaccess.h>
13180@@ -28,7 +31,7 @@ static int die_counter;
13181
13182 void printk_address(unsigned long address, int reliable)
13183 {
13184- printk(" [<%p>] %s%pS\n", (void *) address,
13185+ printk(" [<%p>] %s%pA\n", (void *) address,
13186 reliable ? "" : "? ", (void *) address);
13187 }
13188
13189@@ -36,9 +39,8 @@ void printk_address(unsigned long addres
13190 static void
13191 print_ftrace_graph_addr(unsigned long addr, void *data,
13192 const struct stacktrace_ops *ops,
13193- struct thread_info *tinfo, int *graph)
13194+ struct task_struct *task, int *graph)
13195 {
13196- struct task_struct *task = tinfo->task;
13197 unsigned long ret_addr;
13198 int index = task->curr_ret_stack;
13199
13200@@ -59,7 +61,7 @@ print_ftrace_graph_addr(unsigned long ad
13201 static inline void
13202 print_ftrace_graph_addr(unsigned long addr, void *data,
13203 const struct stacktrace_ops *ops,
13204- struct thread_info *tinfo, int *graph)
13205+ struct task_struct *task, int *graph)
13206 { }
13207 #endif
13208
13209@@ -70,10 +72,8 @@ print_ftrace_graph_addr(unsigned long ad
13210 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
13211 */
13212
13213-static inline int valid_stack_ptr(struct thread_info *tinfo,
13214- void *p, unsigned int size, void *end)
13215+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
13216 {
13217- void *t = tinfo;
13218 if (end) {
13219 if (p < end && p >= (end-THREAD_SIZE))
13220 return 1;
13221@@ -84,14 +84,14 @@ static inline int valid_stack_ptr(struct
13222 }
13223
13224 unsigned long
13225-print_context_stack(struct thread_info *tinfo,
13226+print_context_stack(struct task_struct *task, void *stack_start,
13227 unsigned long *stack, unsigned long bp,
13228 const struct stacktrace_ops *ops, void *data,
13229 unsigned long *end, int *graph)
13230 {
13231 struct stack_frame *frame = (struct stack_frame *)bp;
13232
13233- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
13234+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
13235 unsigned long addr;
13236
13237 addr = *stack;
13238@@ -103,7 +103,7 @@ print_context_stack(struct thread_info *
13239 } else {
13240 ops->address(data, addr, 0);
13241 }
13242- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
13243+ print_ftrace_graph_addr(addr, data, ops, task, graph);
13244 }
13245 stack++;
13246 }
13247@@ -180,7 +180,7 @@ void dump_stack(void)
13248 #endif
13249
13250 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
13251- current->pid, current->comm, print_tainted(),
13252+ task_pid_nr(current), current->comm, print_tainted(),
13253 init_utsname()->release,
13254 (int)strcspn(init_utsname()->version, " "),
13255 init_utsname()->version);
13256@@ -220,6 +220,8 @@ unsigned __kprobes long oops_begin(void)
13257 return flags;
13258 }
13259
13260+extern void gr_handle_kernel_exploit(void);
13261+
13262 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
13263 {
13264 if (regs && kexec_should_crash(current))
13265@@ -241,7 +243,10 @@ void __kprobes oops_end(unsigned long fl
13266 panic("Fatal exception in interrupt");
13267 if (panic_on_oops)
13268 panic("Fatal exception");
13269- do_exit(signr);
13270+
13271+ gr_handle_kernel_exploit();
13272+
13273+ do_group_exit(signr);
13274 }
13275
13276 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
13277@@ -295,7 +300,7 @@ void die(const char *str, struct pt_regs
13278 unsigned long flags = oops_begin();
13279 int sig = SIGSEGV;
13280
13281- if (!user_mode_vm(regs))
13282+ if (!user_mode(regs))
13283 report_bug(regs->ip, regs);
13284
13285 if (__die(str, regs, err))
13286diff -urNp linux-2.6.32.48/arch/x86/kernel/dumpstack.h linux-2.6.32.48/arch/x86/kernel/dumpstack.h
13287--- linux-2.6.32.48/arch/x86/kernel/dumpstack.h 2011-11-08 19:02:43.000000000 -0500
13288+++ linux-2.6.32.48/arch/x86/kernel/dumpstack.h 2011-11-15 19:59:43.000000000 -0500
13289@@ -15,7 +15,7 @@
13290 #endif
13291
13292 extern unsigned long
13293-print_context_stack(struct thread_info *tinfo,
13294+print_context_stack(struct task_struct *task, void *stack_start,
13295 unsigned long *stack, unsigned long bp,
13296 const struct stacktrace_ops *ops, void *data,
13297 unsigned long *end, int *graph);
13298diff -urNp linux-2.6.32.48/arch/x86/kernel/e820.c linux-2.6.32.48/arch/x86/kernel/e820.c
13299--- linux-2.6.32.48/arch/x86/kernel/e820.c 2011-11-08 19:02:43.000000000 -0500
13300+++ linux-2.6.32.48/arch/x86/kernel/e820.c 2011-11-15 19:59:43.000000000 -0500
13301@@ -733,7 +733,7 @@ struct early_res {
13302 };
13303 static struct early_res early_res[MAX_EARLY_RES] __initdata = {
13304 { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
13305- {}
13306+ { 0, 0, {0}, 0 }
13307 };
13308
13309 static int __init find_overlapped_early(u64 start, u64 end)
13310diff -urNp linux-2.6.32.48/arch/x86/kernel/early_printk.c linux-2.6.32.48/arch/x86/kernel/early_printk.c
13311--- linux-2.6.32.48/arch/x86/kernel/early_printk.c 2011-11-08 19:02:43.000000000 -0500
13312+++ linux-2.6.32.48/arch/x86/kernel/early_printk.c 2011-11-15 19:59:43.000000000 -0500
13313@@ -7,6 +7,7 @@
13314 #include <linux/pci_regs.h>
13315 #include <linux/pci_ids.h>
13316 #include <linux/errno.h>
13317+#include <linux/sched.h>
13318 #include <asm/io.h>
13319 #include <asm/processor.h>
13320 #include <asm/fcntl.h>
13321@@ -170,6 +171,8 @@ asmlinkage void early_printk(const char
13322 int n;
13323 va_list ap;
13324
13325+ pax_track_stack();
13326+
13327 va_start(ap, fmt);
13328 n = vscnprintf(buf, sizeof(buf), fmt, ap);
13329 early_console->write(early_console, buf, n);
13330diff -urNp linux-2.6.32.48/arch/x86/kernel/efi_32.c linux-2.6.32.48/arch/x86/kernel/efi_32.c
13331--- linux-2.6.32.48/arch/x86/kernel/efi_32.c 2011-11-08 19:02:43.000000000 -0500
13332+++ linux-2.6.32.48/arch/x86/kernel/efi_32.c 2011-11-15 19:59:43.000000000 -0500
13333@@ -38,70 +38,56 @@
13334 */
13335
13336 static unsigned long efi_rt_eflags;
13337-static pgd_t efi_bak_pg_dir_pointer[2];
13338+static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
13339
13340-void efi_call_phys_prelog(void)
13341+void __init efi_call_phys_prelog(void)
13342 {
13343- unsigned long cr4;
13344- unsigned long temp;
13345 struct desc_ptr gdt_descr;
13346
13347- local_irq_save(efi_rt_eflags);
13348+#ifdef CONFIG_PAX_KERNEXEC
13349+ struct desc_struct d;
13350+#endif
13351
13352- /*
13353- * If I don't have PAE, I should just duplicate two entries in page
13354- * directory. If I have PAE, I just need to duplicate one entry in
13355- * page directory.
13356- */
13357- cr4 = read_cr4_safe();
13358+ local_irq_save(efi_rt_eflags);
13359
13360- if (cr4 & X86_CR4_PAE) {
13361- efi_bak_pg_dir_pointer[0].pgd =
13362- swapper_pg_dir[pgd_index(0)].pgd;
13363- swapper_pg_dir[0].pgd =
13364- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
13365- } else {
13366- efi_bak_pg_dir_pointer[0].pgd =
13367- swapper_pg_dir[pgd_index(0)].pgd;
13368- efi_bak_pg_dir_pointer[1].pgd =
13369- swapper_pg_dir[pgd_index(0x400000)].pgd;
13370- swapper_pg_dir[pgd_index(0)].pgd =
13371- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
13372- temp = PAGE_OFFSET + 0x400000;
13373- swapper_pg_dir[pgd_index(0x400000)].pgd =
13374- swapper_pg_dir[pgd_index(temp)].pgd;
13375- }
13376+ clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
13377+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
13378+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
13379
13380 /*
13381 * After the lock is released, the original page table is restored.
13382 */
13383 __flush_tlb_all();
13384
13385+#ifdef CONFIG_PAX_KERNEXEC
13386+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
13387+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
13388+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
13389+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
13390+#endif
13391+
13392 gdt_descr.address = __pa(get_cpu_gdt_table(0));
13393 gdt_descr.size = GDT_SIZE - 1;
13394 load_gdt(&gdt_descr);
13395 }
13396
13397-void efi_call_phys_epilog(void)
13398+void __init efi_call_phys_epilog(void)
13399 {
13400- unsigned long cr4;
13401 struct desc_ptr gdt_descr;
13402
13403+#ifdef CONFIG_PAX_KERNEXEC
13404+ struct desc_struct d;
13405+
13406+ memset(&d, 0, sizeof d);
13407+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
13408+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
13409+#endif
13410+
13411 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
13412 gdt_descr.size = GDT_SIZE - 1;
13413 load_gdt(&gdt_descr);
13414
13415- cr4 = read_cr4_safe();
13416-
13417- if (cr4 & X86_CR4_PAE) {
13418- swapper_pg_dir[pgd_index(0)].pgd =
13419- efi_bak_pg_dir_pointer[0].pgd;
13420- } else {
13421- swapper_pg_dir[pgd_index(0)].pgd =
13422- efi_bak_pg_dir_pointer[0].pgd;
13423- swapper_pg_dir[pgd_index(0x400000)].pgd =
13424- efi_bak_pg_dir_pointer[1].pgd;
13425- }
13426+ clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
13427
13428 /*
13429 * After the lock is released, the original page table is restored.
13430diff -urNp linux-2.6.32.48/arch/x86/kernel/efi_stub_32.S linux-2.6.32.48/arch/x86/kernel/efi_stub_32.S
13431--- linux-2.6.32.48/arch/x86/kernel/efi_stub_32.S 2011-11-08 19:02:43.000000000 -0500
13432+++ linux-2.6.32.48/arch/x86/kernel/efi_stub_32.S 2011-11-15 19:59:43.000000000 -0500
13433@@ -6,7 +6,9 @@
13434 */
13435
13436 #include <linux/linkage.h>
13437+#include <linux/init.h>
13438 #include <asm/page_types.h>
13439+#include <asm/segment.h>
13440
13441 /*
13442 * efi_call_phys(void *, ...) is a function with variable parameters.
13443@@ -20,7 +22,7 @@
13444 * service functions will comply with gcc calling convention, too.
13445 */
13446
13447-.text
13448+__INIT
13449 ENTRY(efi_call_phys)
13450 /*
13451 * 0. The function can only be called in Linux kernel. So CS has been
13452@@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
13453 * The mapping of lower virtual memory has been created in prelog and
13454 * epilog.
13455 */
13456- movl $1f, %edx
13457- subl $__PAGE_OFFSET, %edx
13458- jmp *%edx
13459+ movl $(__KERNEXEC_EFI_DS), %edx
13460+ mov %edx, %ds
13461+ mov %edx, %es
13462+ mov %edx, %ss
13463+ ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
13464 1:
13465
13466 /*
13467@@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
13468 * parameter 2, ..., param n. To make things easy, we save the return
13469 * address of efi_call_phys in a global variable.
13470 */
13471- popl %edx
13472- movl %edx, saved_return_addr
13473- /* get the function pointer into ECX*/
13474- popl %ecx
13475- movl %ecx, efi_rt_function_ptr
13476- movl $2f, %edx
13477- subl $__PAGE_OFFSET, %edx
13478- pushl %edx
13479+ popl (saved_return_addr)
13480+ popl (efi_rt_function_ptr)
13481
13482 /*
13483 * 3. Clear PG bit in %CR0.
13484@@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
13485 /*
13486 * 5. Call the physical function.
13487 */
13488- jmp *%ecx
13489+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
13490
13491-2:
13492 /*
13493 * 6. After EFI runtime service returns, control will return to
13494 * following instruction. We'd better readjust stack pointer first.
13495@@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
13496 movl %cr0, %edx
13497 orl $0x80000000, %edx
13498 movl %edx, %cr0
13499- jmp 1f
13500-1:
13501+
13502 /*
13503 * 8. Now restore the virtual mode from flat mode by
13504 * adding EIP with PAGE_OFFSET.
13505 */
13506- movl $1f, %edx
13507- jmp *%edx
13508+ ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
13509 1:
13510+ movl $(__KERNEL_DS), %edx
13511+ mov %edx, %ds
13512+ mov %edx, %es
13513+ mov %edx, %ss
13514
13515 /*
13516 * 9. Balance the stack. And because EAX contain the return value,
13517 * we'd better not clobber it.
13518 */
13519- leal efi_rt_function_ptr, %edx
13520- movl (%edx), %ecx
13521- pushl %ecx
13522+ pushl (efi_rt_function_ptr)
13523
13524 /*
13525- * 10. Push the saved return address onto the stack and return.
13526+ * 10. Return to the saved return address.
13527 */
13528- leal saved_return_addr, %edx
13529- movl (%edx), %ecx
13530- pushl %ecx
13531- ret
13532+ jmpl *(saved_return_addr)
13533 ENDPROC(efi_call_phys)
13534 .previous
13535
13536-.data
13537+__INITDATA
13538 saved_return_addr:
13539 .long 0
13540 efi_rt_function_ptr:
13541diff -urNp linux-2.6.32.48/arch/x86/kernel/efi_stub_64.S linux-2.6.32.48/arch/x86/kernel/efi_stub_64.S
13542--- linux-2.6.32.48/arch/x86/kernel/efi_stub_64.S 2011-11-08 19:02:43.000000000 -0500
13543+++ linux-2.6.32.48/arch/x86/kernel/efi_stub_64.S 2011-11-15 19:59:43.000000000 -0500
13544@@ -7,6 +7,7 @@
13545 */
13546
13547 #include <linux/linkage.h>
13548+#include <asm/alternative-asm.h>
13549
13550 #define SAVE_XMM \
13551 mov %rsp, %rax; \
13552@@ -40,6 +41,7 @@ ENTRY(efi_call0)
13553 call *%rdi
13554 addq $32, %rsp
13555 RESTORE_XMM
13556+ pax_force_retaddr
13557 ret
13558 ENDPROC(efi_call0)
13559
13560@@ -50,6 +52,7 @@ ENTRY(efi_call1)
13561 call *%rdi
13562 addq $32, %rsp
13563 RESTORE_XMM
13564+ pax_force_retaddr
13565 ret
13566 ENDPROC(efi_call1)
13567
13568@@ -60,6 +63,7 @@ ENTRY(efi_call2)
13569 call *%rdi
13570 addq $32, %rsp
13571 RESTORE_XMM
13572+ pax_force_retaddr
13573 ret
13574 ENDPROC(efi_call2)
13575
13576@@ -71,6 +75,7 @@ ENTRY(efi_call3)
13577 call *%rdi
13578 addq $32, %rsp
13579 RESTORE_XMM
13580+ pax_force_retaddr
13581 ret
13582 ENDPROC(efi_call3)
13583
13584@@ -83,6 +88,7 @@ ENTRY(efi_call4)
13585 call *%rdi
13586 addq $32, %rsp
13587 RESTORE_XMM
13588+ pax_force_retaddr
13589 ret
13590 ENDPROC(efi_call4)
13591
13592@@ -96,6 +102,7 @@ ENTRY(efi_call5)
13593 call *%rdi
13594 addq $48, %rsp
13595 RESTORE_XMM
13596+ pax_force_retaddr
13597 ret
13598 ENDPROC(efi_call5)
13599
13600@@ -112,5 +119,6 @@ ENTRY(efi_call6)
13601 call *%rdi
13602 addq $48, %rsp
13603 RESTORE_XMM
13604+ pax_force_retaddr
13605 ret
13606 ENDPROC(efi_call6)
13607diff -urNp linux-2.6.32.48/arch/x86/kernel/entry_32.S linux-2.6.32.48/arch/x86/kernel/entry_32.S
13608--- linux-2.6.32.48/arch/x86/kernel/entry_32.S 2011-11-08 19:02:43.000000000 -0500
13609+++ linux-2.6.32.48/arch/x86/kernel/entry_32.S 2011-11-15 19:59:43.000000000 -0500
13610@@ -185,13 +185,146 @@
13611 /*CFI_REL_OFFSET gs, PT_GS*/
13612 .endm
13613 .macro SET_KERNEL_GS reg
13614+
13615+#ifdef CONFIG_CC_STACKPROTECTOR
13616 movl $(__KERNEL_STACK_CANARY), \reg
13617+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
13618+ movl $(__USER_DS), \reg
13619+#else
13620+ xorl \reg, \reg
13621+#endif
13622+
13623 movl \reg, %gs
13624 .endm
13625
13626 #endif /* CONFIG_X86_32_LAZY_GS */
13627
13628-.macro SAVE_ALL
13629+.macro pax_enter_kernel
13630+#ifdef CONFIG_PAX_KERNEXEC
13631+ call pax_enter_kernel
13632+#endif
13633+.endm
13634+
13635+.macro pax_exit_kernel
13636+#ifdef CONFIG_PAX_KERNEXEC
13637+ call pax_exit_kernel
13638+#endif
13639+.endm
13640+
13641+#ifdef CONFIG_PAX_KERNEXEC
13642+ENTRY(pax_enter_kernel)
13643+#ifdef CONFIG_PARAVIRT
13644+ pushl %eax
13645+ pushl %ecx
13646+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
13647+ mov %eax, %esi
13648+#else
13649+ mov %cr0, %esi
13650+#endif
13651+ bts $16, %esi
13652+ jnc 1f
13653+ mov %cs, %esi
13654+ cmp $__KERNEL_CS, %esi
13655+ jz 3f
13656+ ljmp $__KERNEL_CS, $3f
13657+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
13658+2:
13659+#ifdef CONFIG_PARAVIRT
13660+ mov %esi, %eax
13661+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
13662+#else
13663+ mov %esi, %cr0
13664+#endif
13665+3:
13666+#ifdef CONFIG_PARAVIRT
13667+ popl %ecx
13668+ popl %eax
13669+#endif
13670+ ret
13671+ENDPROC(pax_enter_kernel)
13672+
13673+ENTRY(pax_exit_kernel)
13674+#ifdef CONFIG_PARAVIRT
13675+ pushl %eax
13676+ pushl %ecx
13677+#endif
13678+ mov %cs, %esi
13679+ cmp $__KERNEXEC_KERNEL_CS, %esi
13680+ jnz 2f
13681+#ifdef CONFIG_PARAVIRT
13682+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
13683+ mov %eax, %esi
13684+#else
13685+ mov %cr0, %esi
13686+#endif
13687+ btr $16, %esi
13688+ ljmp $__KERNEL_CS, $1f
13689+1:
13690+#ifdef CONFIG_PARAVIRT
13691+ mov %esi, %eax
13692+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
13693+#else
13694+ mov %esi, %cr0
13695+#endif
13696+2:
13697+#ifdef CONFIG_PARAVIRT
13698+ popl %ecx
13699+ popl %eax
13700+#endif
13701+ ret
13702+ENDPROC(pax_exit_kernel)
13703+#endif
13704+
13705+.macro pax_erase_kstack
13706+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13707+ call pax_erase_kstack
13708+#endif
13709+.endm
13710+
13711+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13712+/*
13713+ * ebp: thread_info
13714+ * ecx, edx: can be clobbered
13715+ */
13716+ENTRY(pax_erase_kstack)
13717+ pushl %edi
13718+ pushl %eax
13719+
13720+ mov TI_lowest_stack(%ebp), %edi
13721+ mov $-0xBEEF, %eax
13722+ std
13723+
13724+1: mov %edi, %ecx
13725+ and $THREAD_SIZE_asm - 1, %ecx
13726+ shr $2, %ecx
13727+ repne scasl
13728+ jecxz 2f
13729+
13730+ cmp $2*16, %ecx
13731+ jc 2f
13732+
13733+ mov $2*16, %ecx
13734+ repe scasl
13735+ jecxz 2f
13736+ jne 1b
13737+
13738+2: cld
13739+ mov %esp, %ecx
13740+ sub %edi, %ecx
13741+ shr $2, %ecx
13742+ rep stosl
13743+
13744+ mov TI_task_thread_sp0(%ebp), %edi
13745+ sub $128, %edi
13746+ mov %edi, TI_lowest_stack(%ebp)
13747+
13748+ popl %eax
13749+ popl %edi
13750+ ret
13751+ENDPROC(pax_erase_kstack)
13752+#endif
13753+
13754+.macro __SAVE_ALL _DS
13755 cld
13756 PUSH_GS
13757 pushl %fs
13758@@ -224,7 +357,7 @@
13759 pushl %ebx
13760 CFI_ADJUST_CFA_OFFSET 4
13761 CFI_REL_OFFSET ebx, 0
13762- movl $(__USER_DS), %edx
13763+ movl $\_DS, %edx
13764 movl %edx, %ds
13765 movl %edx, %es
13766 movl $(__KERNEL_PERCPU), %edx
13767@@ -232,6 +365,15 @@
13768 SET_KERNEL_GS %edx
13769 .endm
13770
13771+.macro SAVE_ALL
13772+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
13773+ __SAVE_ALL __KERNEL_DS
13774+ pax_enter_kernel
13775+#else
13776+ __SAVE_ALL __USER_DS
13777+#endif
13778+.endm
13779+
13780 .macro RESTORE_INT_REGS
13781 popl %ebx
13782 CFI_ADJUST_CFA_OFFSET -4
13783@@ -352,7 +494,15 @@ check_userspace:
13784 movb PT_CS(%esp), %al
13785 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
13786 cmpl $USER_RPL, %eax
13787+
13788+#ifdef CONFIG_PAX_KERNEXEC
13789+ jae resume_userspace
13790+
13791+ PAX_EXIT_KERNEL
13792+ jmp resume_kernel
13793+#else
13794 jb resume_kernel # not returning to v8086 or userspace
13795+#endif
13796
13797 ENTRY(resume_userspace)
13798 LOCKDEP_SYS_EXIT
13799@@ -364,7 +514,7 @@ ENTRY(resume_userspace)
13800 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
13801 # int/exception return?
13802 jne work_pending
13803- jmp restore_all
13804+ jmp restore_all_pax
13805 END(ret_from_exception)
13806
13807 #ifdef CONFIG_PREEMPT
13808@@ -414,25 +564,36 @@ sysenter_past_esp:
13809 /*CFI_REL_OFFSET cs, 0*/
13810 /*
13811 * Push current_thread_info()->sysenter_return to the stack.
13812- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
13813- * pushed above; +8 corresponds to copy_thread's esp0 setting.
13814 */
13815- pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
13816+ pushl $0
13817 CFI_ADJUST_CFA_OFFSET 4
13818 CFI_REL_OFFSET eip, 0
13819
13820 pushl %eax
13821 CFI_ADJUST_CFA_OFFSET 4
13822 SAVE_ALL
13823+ GET_THREAD_INFO(%ebp)
13824+ movl TI_sysenter_return(%ebp),%ebp
13825+ movl %ebp,PT_EIP(%esp)
13826 ENABLE_INTERRUPTS(CLBR_NONE)
13827
13828 /*
13829 * Load the potential sixth argument from user stack.
13830 * Careful about security.
13831 */
13832+ movl PT_OLDESP(%esp),%ebp
13833+
13834+#ifdef CONFIG_PAX_MEMORY_UDEREF
13835+ mov PT_OLDSS(%esp),%ds
13836+1: movl %ds:(%ebp),%ebp
13837+ push %ss
13838+ pop %ds
13839+#else
13840 cmpl $__PAGE_OFFSET-3,%ebp
13841 jae syscall_fault
13842 1: movl (%ebp),%ebp
13843+#endif
13844+
13845 movl %ebp,PT_EBP(%esp)
13846 .section __ex_table,"a"
13847 .align 4
13848@@ -455,12 +616,24 @@ sysenter_do_call:
13849 testl $_TIF_ALLWORK_MASK, %ecx
13850 jne sysexit_audit
13851 sysenter_exit:
13852+
13853+#ifdef CONFIG_PAX_RANDKSTACK
13854+ pushl_cfi %eax
13855+ movl %esp, %eax
13856+ call pax_randomize_kstack
13857+ popl_cfi %eax
13858+#endif
13859+
13860+ pax_erase_kstack
13861+
13862 /* if something modifies registers it must also disable sysexit */
13863 movl PT_EIP(%esp), %edx
13864 movl PT_OLDESP(%esp), %ecx
13865 xorl %ebp,%ebp
13866 TRACE_IRQS_ON
13867 1: mov PT_FS(%esp), %fs
13868+2: mov PT_DS(%esp), %ds
13869+3: mov PT_ES(%esp), %es
13870 PTGS_TO_GS
13871 ENABLE_INTERRUPTS_SYSEXIT
13872
13873@@ -477,6 +650,9 @@ sysenter_audit:
13874 movl %eax,%edx /* 2nd arg: syscall number */
13875 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
13876 call audit_syscall_entry
13877+
13878+ pax_erase_kstack
13879+
13880 pushl %ebx
13881 CFI_ADJUST_CFA_OFFSET 4
13882 movl PT_EAX(%esp),%eax /* reload syscall number */
13883@@ -504,11 +680,17 @@ sysexit_audit:
13884
13885 CFI_ENDPROC
13886 .pushsection .fixup,"ax"
13887-2: movl $0,PT_FS(%esp)
13888+4: movl $0,PT_FS(%esp)
13889+ jmp 1b
13890+5: movl $0,PT_DS(%esp)
13891+ jmp 1b
13892+6: movl $0,PT_ES(%esp)
13893 jmp 1b
13894 .section __ex_table,"a"
13895 .align 4
13896- .long 1b,2b
13897+ .long 1b,4b
13898+ .long 2b,5b
13899+ .long 3b,6b
13900 .popsection
13901 PTGS_TO_GS_EX
13902 ENDPROC(ia32_sysenter_target)
13903@@ -538,6 +720,15 @@ syscall_exit:
13904 testl $_TIF_ALLWORK_MASK, %ecx # current->work
13905 jne syscall_exit_work
13906
13907+restore_all_pax:
13908+
13909+#ifdef CONFIG_PAX_RANDKSTACK
13910+ movl %esp, %eax
13911+ call pax_randomize_kstack
13912+#endif
13913+
13914+ pax_erase_kstack
13915+
13916 restore_all:
13917 TRACE_IRQS_IRET
13918 restore_all_notrace:
13919@@ -602,10 +793,29 @@ ldt_ss:
13920 mov PT_OLDESP(%esp), %eax /* load userspace esp */
13921 mov %dx, %ax /* eax: new kernel esp */
13922 sub %eax, %edx /* offset (low word is 0) */
13923- PER_CPU(gdt_page, %ebx)
13924+#ifdef CONFIG_SMP
13925+ movl PER_CPU_VAR(cpu_number), %ebx
13926+ shll $PAGE_SHIFT_asm, %ebx
13927+ addl $cpu_gdt_table, %ebx
13928+#else
13929+ movl $cpu_gdt_table, %ebx
13930+#endif
13931 shr $16, %edx
13932+
13933+#ifdef CONFIG_PAX_KERNEXEC
13934+ mov %cr0, %esi
13935+ btr $16, %esi
13936+ mov %esi, %cr0
13937+#endif
13938+
13939 mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
13940 mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
13941+
13942+#ifdef CONFIG_PAX_KERNEXEC
13943+ bts $16, %esi
13944+ mov %esi, %cr0
13945+#endif
13946+
13947 pushl $__ESPFIX_SS
13948 CFI_ADJUST_CFA_OFFSET 4
13949 push %eax /* new kernel esp */
13950@@ -636,31 +846,25 @@ work_resched:
13951 movl TI_flags(%ebp), %ecx
13952 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
13953 # than syscall tracing?
13954- jz restore_all
13955+ jz restore_all_pax
13956 testb $_TIF_NEED_RESCHED, %cl
13957 jnz work_resched
13958
13959 work_notifysig: # deal with pending signals and
13960 # notify-resume requests
13961+ movl %esp, %eax
13962 #ifdef CONFIG_VM86
13963 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
13964- movl %esp, %eax
13965- jne work_notifysig_v86 # returning to kernel-space or
13966+ jz 1f # returning to kernel-space or
13967 # vm86-space
13968- xorl %edx, %edx
13969- call do_notify_resume
13970- jmp resume_userspace_sig
13971
13972- ALIGN
13973-work_notifysig_v86:
13974 pushl %ecx # save ti_flags for do_notify_resume
13975 CFI_ADJUST_CFA_OFFSET 4
13976 call save_v86_state # %eax contains pt_regs pointer
13977 popl %ecx
13978 CFI_ADJUST_CFA_OFFSET -4
13979 movl %eax, %esp
13980-#else
13981- movl %esp, %eax
13982+1:
13983 #endif
13984 xorl %edx, %edx
13985 call do_notify_resume
13986@@ -673,6 +877,9 @@ syscall_trace_entry:
13987 movl $-ENOSYS,PT_EAX(%esp)
13988 movl %esp, %eax
13989 call syscall_trace_enter
13990+
13991+ pax_erase_kstack
13992+
13993 /* What it returned is what we'll actually use. */
13994 cmpl $(nr_syscalls), %eax
13995 jnae syscall_call
13996@@ -695,6 +902,10 @@ END(syscall_exit_work)
13997
13998 RING0_INT_FRAME # can't unwind into user space anyway
13999 syscall_fault:
14000+#ifdef CONFIG_PAX_MEMORY_UDEREF
14001+ push %ss
14002+ pop %ds
14003+#endif
14004 GET_THREAD_INFO(%ebp)
14005 movl $-EFAULT,PT_EAX(%esp)
14006 jmp resume_userspace
14007@@ -726,6 +937,33 @@ PTREGSCALL(rt_sigreturn)
14008 PTREGSCALL(vm86)
14009 PTREGSCALL(vm86old)
14010
14011+ ALIGN;
14012+ENTRY(kernel_execve)
14013+ push %ebp
14014+ sub $PT_OLDSS+4,%esp
14015+ push %edi
14016+ push %ecx
14017+ push %eax
14018+ lea 3*4(%esp),%edi
14019+ mov $PT_OLDSS/4+1,%ecx
14020+ xorl %eax,%eax
14021+ rep stosl
14022+ pop %eax
14023+ pop %ecx
14024+ pop %edi
14025+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
14026+ mov %eax,PT_EBX(%esp)
14027+ mov %edx,PT_ECX(%esp)
14028+ mov %ecx,PT_EDX(%esp)
14029+ mov %esp,%eax
14030+ call sys_execve
14031+ GET_THREAD_INFO(%ebp)
14032+ test %eax,%eax
14033+ jz syscall_exit
14034+ add $PT_OLDSS+4,%esp
14035+ pop %ebp
14036+ ret
14037+
14038 .macro FIXUP_ESPFIX_STACK
14039 /*
14040 * Switch back for ESPFIX stack to the normal zerobased stack
14041@@ -735,7 +973,13 @@ PTREGSCALL(vm86old)
14042 * normal stack and adjusts ESP with the matching offset.
14043 */
14044 /* fixup the stack */
14045- PER_CPU(gdt_page, %ebx)
14046+#ifdef CONFIG_SMP
14047+ movl PER_CPU_VAR(cpu_number), %ebx
14048+ shll $PAGE_SHIFT_asm, %ebx
14049+ addl $cpu_gdt_table, %ebx
14050+#else
14051+ movl $cpu_gdt_table, %ebx
14052+#endif
14053 mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
14054 mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
14055 shl $16, %eax
14056@@ -1198,7 +1442,6 @@ return_to_handler:
14057 ret
14058 #endif
14059
14060-.section .rodata,"a"
14061 #include "syscall_table_32.S"
14062
14063 syscall_table_size=(.-sys_call_table)
14064@@ -1255,9 +1498,12 @@ error_code:
14065 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
14066 REG_TO_PTGS %ecx
14067 SET_KERNEL_GS %ecx
14068- movl $(__USER_DS), %ecx
14069+ movl $(__KERNEL_DS), %ecx
14070 movl %ecx, %ds
14071 movl %ecx, %es
14072+
14073+ pax_enter_kernel
14074+
14075 TRACE_IRQS_OFF
14076 movl %esp,%eax # pt_regs pointer
14077 call *%edi
14078@@ -1351,6 +1597,9 @@ nmi_stack_correct:
14079 xorl %edx,%edx # zero error code
14080 movl %esp,%eax # pt_regs pointer
14081 call do_nmi
14082+
14083+ pax_exit_kernel
14084+
14085 jmp restore_all_notrace
14086 CFI_ENDPROC
14087
14088@@ -1391,6 +1640,9 @@ nmi_espfix_stack:
14089 FIXUP_ESPFIX_STACK # %eax == %esp
14090 xorl %edx,%edx # zero error code
14091 call do_nmi
14092+
14093+ pax_exit_kernel
14094+
14095 RESTORE_REGS
14096 lss 12+4(%esp), %esp # back to espfix stack
14097 CFI_ADJUST_CFA_OFFSET -24
14098diff -urNp linux-2.6.32.48/arch/x86/kernel/entry_64.S linux-2.6.32.48/arch/x86/kernel/entry_64.S
14099--- linux-2.6.32.48/arch/x86/kernel/entry_64.S 2011-11-08 19:02:43.000000000 -0500
14100+++ linux-2.6.32.48/arch/x86/kernel/entry_64.S 2011-11-15 19:59:43.000000000 -0500
14101@@ -53,6 +53,8 @@
14102 #include <asm/paravirt.h>
14103 #include <asm/ftrace.h>
14104 #include <asm/percpu.h>
14105+#include <asm/pgtable.h>
14106+#include <asm/alternative-asm.h>
14107
14108 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
14109 #include <linux/elf-em.h>
14110@@ -64,6 +66,7 @@
14111 #ifdef CONFIG_FUNCTION_TRACER
14112 #ifdef CONFIG_DYNAMIC_FTRACE
14113 ENTRY(mcount)
14114+ pax_force_retaddr
14115 retq
14116 END(mcount)
14117
14118@@ -88,6 +91,7 @@ GLOBAL(ftrace_graph_call)
14119 #endif
14120
14121 GLOBAL(ftrace_stub)
14122+ pax_force_retaddr
14123 retq
14124 END(ftrace_caller)
14125
14126@@ -108,6 +112,7 @@ ENTRY(mcount)
14127 #endif
14128
14129 GLOBAL(ftrace_stub)
14130+ pax_force_retaddr
14131 retq
14132
14133 trace:
14134@@ -117,6 +122,7 @@ trace:
14135 movq 8(%rbp), %rsi
14136 subq $MCOUNT_INSN_SIZE, %rdi
14137
14138+ pax_force_fptr ftrace_trace_function
14139 call *ftrace_trace_function
14140
14141 MCOUNT_RESTORE_FRAME
14142@@ -142,6 +148,7 @@ ENTRY(ftrace_graph_caller)
14143
14144 MCOUNT_RESTORE_FRAME
14145
14146+ pax_force_retaddr
14147 retq
14148 END(ftrace_graph_caller)
14149
14150@@ -159,6 +166,7 @@ GLOBAL(return_to_handler)
14151 movq 8(%rsp), %rdx
14152 movq (%rsp), %rax
14153 addq $16, %rsp
14154+ pax_force_retaddr
14155 retq
14156 #endif
14157
14158@@ -174,6 +182,269 @@ ENTRY(native_usergs_sysret64)
14159 ENDPROC(native_usergs_sysret64)
14160 #endif /* CONFIG_PARAVIRT */
14161
14162+ .macro ljmpq sel, off
14163+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
14164+ .byte 0x48; ljmp *1234f(%rip)
14165+ .pushsection .rodata
14166+ .align 16
14167+ 1234: .quad \off; .word \sel
14168+ .popsection
14169+#else
14170+ pushq $\sel
14171+ pushq $\off
14172+ lretq
14173+#endif
14174+ .endm
14175+
14176+ .macro pax_enter_kernel
14177+#ifdef CONFIG_PAX_KERNEXEC
14178+ call pax_enter_kernel
14179+#endif
14180+ .endm
14181+
14182+ .macro pax_exit_kernel
14183+#ifdef CONFIG_PAX_KERNEXEC
14184+ call pax_exit_kernel
14185+#endif
14186+ .endm
14187+
14188+#ifdef CONFIG_PAX_KERNEXEC
14189+ENTRY(pax_enter_kernel)
14190+ pushq %rdi
14191+
14192+#ifdef CONFIG_PARAVIRT
14193+ PV_SAVE_REGS(CLBR_RDI)
14194+#endif
14195+
14196+ GET_CR0_INTO_RDI
14197+ bts $16,%rdi
14198+ jnc 1f
14199+ mov %cs,%edi
14200+ cmp $__KERNEL_CS,%edi
14201+ jz 3f
14202+ ljmpq __KERNEL_CS,3f
14203+1: ljmpq __KERNEXEC_KERNEL_CS,2f
14204+2: SET_RDI_INTO_CR0
14205+3:
14206+
14207+#ifdef CONFIG_PARAVIRT
14208+ PV_RESTORE_REGS(CLBR_RDI)
14209+#endif
14210+
14211+ popq %rdi
14212+ pax_force_retaddr
14213+ retq
14214+ENDPROC(pax_enter_kernel)
14215+
14216+ENTRY(pax_exit_kernel)
14217+ pushq %rdi
14218+
14219+#ifdef CONFIG_PARAVIRT
14220+ PV_SAVE_REGS(CLBR_RDI)
14221+#endif
14222+
14223+ mov %cs,%rdi
14224+ cmp $__KERNEXEC_KERNEL_CS,%edi
14225+ jnz 2f
14226+ GET_CR0_INTO_RDI
14227+ btr $16,%rdi
14228+ ljmpq __KERNEL_CS,1f
14229+1: SET_RDI_INTO_CR0
14230+2:
14231+
14232+#ifdef CONFIG_PARAVIRT
14233+ PV_RESTORE_REGS(CLBR_RDI);
14234+#endif
14235+
14236+ popq %rdi
14237+ pax_force_retaddr
14238+ retq
14239+ENDPROC(pax_exit_kernel)
14240+#endif
14241+
14242+ .macro pax_enter_kernel_user
14243+#ifdef CONFIG_PAX_MEMORY_UDEREF
14244+ call pax_enter_kernel_user
14245+#endif
14246+ .endm
14247+
14248+ .macro pax_exit_kernel_user
14249+#ifdef CONFIG_PAX_MEMORY_UDEREF
14250+ call pax_exit_kernel_user
14251+#endif
14252+#ifdef CONFIG_PAX_RANDKSTACK
14253+ push %rax
14254+ call pax_randomize_kstack
14255+ pop %rax
14256+#endif
14257+ .endm
14258+
14259+#ifdef CONFIG_PAX_MEMORY_UDEREF
14260+ENTRY(pax_enter_kernel_user)
14261+ pushq %rdi
14262+ pushq %rbx
14263+
14264+#ifdef CONFIG_PARAVIRT
14265+ PV_SAVE_REGS(CLBR_RDI)
14266+#endif
14267+
14268+ GET_CR3_INTO_RDI
14269+ mov %rdi,%rbx
14270+ add $__START_KERNEL_map,%rbx
14271+ sub phys_base(%rip),%rbx
14272+
14273+#ifdef CONFIG_PARAVIRT
14274+ pushq %rdi
14275+ cmpl $0, pv_info+PARAVIRT_enabled
14276+ jz 1f
14277+ i = 0
14278+ .rept USER_PGD_PTRS
14279+ mov i*8(%rbx),%rsi
14280+ mov $0,%sil
14281+ lea i*8(%rbx),%rdi
14282+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
14283+ i = i + 1
14284+ .endr
14285+ jmp 2f
14286+1:
14287+#endif
14288+
14289+ i = 0
14290+ .rept USER_PGD_PTRS
14291+ movb $0,i*8(%rbx)
14292+ i = i + 1
14293+ .endr
14294+
14295+#ifdef CONFIG_PARAVIRT
14296+2: popq %rdi
14297+#endif
14298+ SET_RDI_INTO_CR3
14299+
14300+#ifdef CONFIG_PAX_KERNEXEC
14301+ GET_CR0_INTO_RDI
14302+ bts $16,%rdi
14303+ SET_RDI_INTO_CR0
14304+#endif
14305+
14306+#ifdef CONFIG_PARAVIRT
14307+ PV_RESTORE_REGS(CLBR_RDI)
14308+#endif
14309+
14310+ popq %rbx
14311+ popq %rdi
14312+ pax_force_retaddr
14313+ retq
14314+ENDPROC(pax_enter_kernel_user)
14315+
14316+ENTRY(pax_exit_kernel_user)
14317+ push %rdi
14318+
14319+#ifdef CONFIG_PARAVIRT
14320+ pushq %rbx
14321+ PV_SAVE_REGS(CLBR_RDI)
14322+#endif
14323+
14324+#ifdef CONFIG_PAX_KERNEXEC
14325+ GET_CR0_INTO_RDI
14326+ btr $16,%rdi
14327+ SET_RDI_INTO_CR0
14328+#endif
14329+
14330+ GET_CR3_INTO_RDI
14331+ add $__START_KERNEL_map,%rdi
14332+ sub phys_base(%rip),%rdi
14333+
14334+#ifdef CONFIG_PARAVIRT
14335+ cmpl $0, pv_info+PARAVIRT_enabled
14336+ jz 1f
14337+ mov %rdi,%rbx
14338+ i = 0
14339+ .rept USER_PGD_PTRS
14340+ mov i*8(%rbx),%rsi
14341+ mov $0x67,%sil
14342+ lea i*8(%rbx),%rdi
14343+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
14344+ i = i + 1
14345+ .endr
14346+ jmp 2f
14347+1:
14348+#endif
14349+
14350+ i = 0
14351+ .rept USER_PGD_PTRS
14352+ movb $0x67,i*8(%rdi)
14353+ i = i + 1
14354+ .endr
14355+
14356+#ifdef CONFIG_PARAVIRT
14357+2: PV_RESTORE_REGS(CLBR_RDI)
14358+ popq %rbx
14359+#endif
14360+
14361+ popq %rdi
14362+ pax_force_retaddr
14363+ retq
14364+ENDPROC(pax_exit_kernel_user)
14365+#endif
14366+
14367+.macro pax_erase_kstack
14368+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14369+ call pax_erase_kstack
14370+#endif
14371+.endm
14372+
14373+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14374+/*
14375+ * r10: thread_info
14376+ * rcx, rdx: can be clobbered
14377+ */
14378+ENTRY(pax_erase_kstack)
14379+ pushq %rdi
14380+ pushq %rax
14381+ pushq %r10
14382+
14383+ GET_THREAD_INFO(%r10)
14384+ mov TI_lowest_stack(%r10), %rdi
14385+ mov $-0xBEEF, %rax
14386+ std
14387+
14388+1: mov %edi, %ecx
14389+ and $THREAD_SIZE_asm - 1, %ecx
14390+ shr $3, %ecx
14391+ repne scasq
14392+ jecxz 2f
14393+
14394+ cmp $2*8, %ecx
14395+ jc 2f
14396+
14397+ mov $2*8, %ecx
14398+ repe scasq
14399+ jecxz 2f
14400+ jne 1b
14401+
14402+2: cld
14403+ mov %esp, %ecx
14404+ sub %edi, %ecx
14405+
14406+ cmp $THREAD_SIZE_asm, %rcx
14407+ jb 3f
14408+ ud2
14409+3:
14410+
14411+ shr $3, %ecx
14412+ rep stosq
14413+
14414+ mov TI_task_thread_sp0(%r10), %rdi
14415+ sub $256, %rdi
14416+ mov %rdi, TI_lowest_stack(%r10)
14417+
14418+ popq %r10
14419+ popq %rax
14420+ popq %rdi
14421+ pax_force_retaddr
14422+ ret
14423+ENDPROC(pax_erase_kstack)
14424+#endif
14425
14426 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
14427 #ifdef CONFIG_TRACE_IRQFLAGS
14428@@ -317,7 +588,7 @@ ENTRY(save_args)
14429 leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
14430 movq_cfi rbp, 8 /* push %rbp */
14431 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
14432- testl $3, CS(%rdi)
14433+ testb $3, CS(%rdi)
14434 je 1f
14435 SWAPGS
14436 /*
14437@@ -337,6 +608,7 @@ ENTRY(save_args)
14438 * We entered an interrupt context - irqs are off:
14439 */
14440 2: TRACE_IRQS_OFF
14441+ pax_force_retaddr
14442 ret
14443 CFI_ENDPROC
14444 END(save_args)
14445@@ -352,6 +624,7 @@ ENTRY(save_rest)
14446 movq_cfi r15, R15+16
14447 movq %r11, 8(%rsp) /* return address */
14448 FIXUP_TOP_OF_STACK %r11, 16
14449+ pax_force_retaddr
14450 ret
14451 CFI_ENDPROC
14452 END(save_rest)
14453@@ -383,7 +656,8 @@ ENTRY(save_paranoid)
14454 js 1f /* negative -> in kernel */
14455 SWAPGS
14456 xorl %ebx,%ebx
14457-1: ret
14458+1: pax_force_retaddr
14459+ ret
14460 CFI_ENDPROC
14461 END(save_paranoid)
14462 .popsection
14463@@ -409,7 +683,7 @@ ENTRY(ret_from_fork)
14464
14465 RESTORE_REST
14466
14467- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
14468+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
14469 je int_ret_from_sys_call
14470
14471 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
14472@@ -455,7 +729,7 @@ END(ret_from_fork)
14473 ENTRY(system_call)
14474 CFI_STARTPROC simple
14475 CFI_SIGNAL_FRAME
14476- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
14477+ CFI_DEF_CFA rsp,0
14478 CFI_REGISTER rip,rcx
14479 /*CFI_REGISTER rflags,r11*/
14480 SWAPGS_UNSAFE_STACK
14481@@ -468,12 +742,13 @@ ENTRY(system_call_after_swapgs)
14482
14483 movq %rsp,PER_CPU_VAR(old_rsp)
14484 movq PER_CPU_VAR(kernel_stack),%rsp
14485+ pax_enter_kernel_user
14486 /*
14487 * No need to follow this irqs off/on section - it's straight
14488 * and short:
14489 */
14490 ENABLE_INTERRUPTS(CLBR_NONE)
14491- SAVE_ARGS 8,1
14492+ SAVE_ARGS 8*6,1
14493 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
14494 movq %rcx,RIP-ARGOFFSET(%rsp)
14495 CFI_REL_OFFSET rip,RIP-ARGOFFSET
14496@@ -502,6 +777,8 @@ sysret_check:
14497 andl %edi,%edx
14498 jnz sysret_careful
14499 CFI_REMEMBER_STATE
14500+ pax_exit_kernel_user
14501+ pax_erase_kstack
14502 /*
14503 * sysretq will re-enable interrupts:
14504 */
14505@@ -562,6 +839,9 @@ auditsys:
14506 movq %rax,%rsi /* 2nd arg: syscall number */
14507 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
14508 call audit_syscall_entry
14509+
14510+ pax_erase_kstack
14511+
14512 LOAD_ARGS 0 /* reload call-clobbered registers */
14513 jmp system_call_fastpath
14514
14515@@ -592,6 +872,9 @@ tracesys:
14516 FIXUP_TOP_OF_STACK %rdi
14517 movq %rsp,%rdi
14518 call syscall_trace_enter
14519+
14520+ pax_erase_kstack
14521+
14522 /*
14523 * Reload arg registers from stack in case ptrace changed them.
14524 * We don't reload %rax because syscall_trace_enter() returned
14525@@ -613,7 +896,7 @@ tracesys:
14526 GLOBAL(int_ret_from_sys_call)
14527 DISABLE_INTERRUPTS(CLBR_NONE)
14528 TRACE_IRQS_OFF
14529- testl $3,CS-ARGOFFSET(%rsp)
14530+ testb $3,CS-ARGOFFSET(%rsp)
14531 je retint_restore_args
14532 movl $_TIF_ALLWORK_MASK,%edi
14533 /* edi: mask to check */
14534@@ -708,6 +991,7 @@ ENTRY(ptregscall_common)
14535 movq_cfi_restore R12+8, r12
14536 movq_cfi_restore RBP+8, rbp
14537 movq_cfi_restore RBX+8, rbx
14538+ pax_force_retaddr
14539 ret $REST_SKIP /* pop extended registers */
14540 CFI_ENDPROC
14541 END(ptregscall_common)
14542@@ -800,6 +1084,16 @@ END(interrupt)
14543 CFI_ADJUST_CFA_OFFSET 10*8
14544 call save_args
14545 PARTIAL_FRAME 0
14546+#ifdef CONFIG_PAX_MEMORY_UDEREF
14547+ testb $3, CS(%rdi)
14548+ jnz 1f
14549+ pax_enter_kernel
14550+ jmp 2f
14551+1: pax_enter_kernel_user
14552+2:
14553+#else
14554+ pax_enter_kernel
14555+#endif
14556 call \func
14557 .endm
14558
14559@@ -822,7 +1116,7 @@ ret_from_intr:
14560 CFI_ADJUST_CFA_OFFSET -8
14561 exit_intr:
14562 GET_THREAD_INFO(%rcx)
14563- testl $3,CS-ARGOFFSET(%rsp)
14564+ testb $3,CS-ARGOFFSET(%rsp)
14565 je retint_kernel
14566
14567 /* Interrupt came from user space */
14568@@ -844,12 +1138,16 @@ retint_swapgs: /* return to user-space
14569 * The iretq could re-enable interrupts:
14570 */
14571 DISABLE_INTERRUPTS(CLBR_ANY)
14572+ pax_exit_kernel_user
14573+ pax_erase_kstack
14574 TRACE_IRQS_IRETQ
14575 SWAPGS
14576 jmp restore_args
14577
14578 retint_restore_args: /* return to kernel space */
14579 DISABLE_INTERRUPTS(CLBR_ANY)
14580+ pax_exit_kernel
14581+ pax_force_retaddr RIP-ARGOFFSET
14582 /*
14583 * The iretq could re-enable interrupts:
14584 */
14585@@ -1032,6 +1330,16 @@ ENTRY(\sym)
14586 CFI_ADJUST_CFA_OFFSET 15*8
14587 call error_entry
14588 DEFAULT_FRAME 0
14589+#ifdef CONFIG_PAX_MEMORY_UDEREF
14590+ testb $3, CS(%rsp)
14591+ jnz 1f
14592+ pax_enter_kernel
14593+ jmp 2f
14594+1: pax_enter_kernel_user
14595+2:
14596+#else
14597+ pax_enter_kernel
14598+#endif
14599 movq %rsp,%rdi /* pt_regs pointer */
14600 xorl %esi,%esi /* no error code */
14601 call \do_sym
14602@@ -1049,6 +1357,16 @@ ENTRY(\sym)
14603 subq $15*8, %rsp
14604 call save_paranoid
14605 TRACE_IRQS_OFF
14606+#ifdef CONFIG_PAX_MEMORY_UDEREF
14607+ testb $3, CS(%rsp)
14608+ jnz 1f
14609+ pax_enter_kernel
14610+ jmp 2f
14611+1: pax_enter_kernel_user
14612+2:
14613+#else
14614+ pax_enter_kernel
14615+#endif
14616 movq %rsp,%rdi /* pt_regs pointer */
14617 xorl %esi,%esi /* no error code */
14618 call \do_sym
14619@@ -1066,9 +1384,24 @@ ENTRY(\sym)
14620 subq $15*8, %rsp
14621 call save_paranoid
14622 TRACE_IRQS_OFF
14623+#ifdef CONFIG_PAX_MEMORY_UDEREF
14624+ testb $3, CS(%rsp)
14625+ jnz 1f
14626+ pax_enter_kernel
14627+ jmp 2f
14628+1: pax_enter_kernel_user
14629+2:
14630+#else
14631+ pax_enter_kernel
14632+#endif
14633 movq %rsp,%rdi /* pt_regs pointer */
14634 xorl %esi,%esi /* no error code */
14635- PER_CPU(init_tss, %rbp)
14636+#ifdef CONFIG_SMP
14637+ imul $TSS_size, PER_CPU_VAR(cpu_number), %ebp
14638+ lea init_tss(%rbp), %rbp
14639+#else
14640+ lea init_tss(%rip), %rbp
14641+#endif
14642 subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
14643 call \do_sym
14644 addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
14645@@ -1085,6 +1418,16 @@ ENTRY(\sym)
14646 CFI_ADJUST_CFA_OFFSET 15*8
14647 call error_entry
14648 DEFAULT_FRAME 0
14649+#ifdef CONFIG_PAX_MEMORY_UDEREF
14650+ testb $3, CS(%rsp)
14651+ jnz 1f
14652+ pax_enter_kernel
14653+ jmp 2f
14654+1: pax_enter_kernel_user
14655+2:
14656+#else
14657+ pax_enter_kernel
14658+#endif
14659 movq %rsp,%rdi /* pt_regs pointer */
14660 movq ORIG_RAX(%rsp),%rsi /* get error code */
14661 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14662@@ -1104,6 +1447,16 @@ ENTRY(\sym)
14663 call save_paranoid
14664 DEFAULT_FRAME 0
14665 TRACE_IRQS_OFF
14666+#ifdef CONFIG_PAX_MEMORY_UDEREF
14667+ testb $3, CS(%rsp)
14668+ jnz 1f
14669+ pax_enter_kernel
14670+ jmp 2f
14671+1: pax_enter_kernel_user
14672+2:
14673+#else
14674+ pax_enter_kernel
14675+#endif
14676 movq %rsp,%rdi /* pt_regs pointer */
14677 movq ORIG_RAX(%rsp),%rsi /* get error code */
14678 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14679@@ -1141,6 +1494,7 @@ gs_change:
14680 SWAPGS
14681 popf
14682 CFI_ADJUST_CFA_OFFSET -8
14683+ pax_force_retaddr
14684 ret
14685 CFI_ENDPROC
14686 END(native_load_gs_index)
14687@@ -1195,6 +1549,7 @@ ENTRY(kernel_thread)
14688 */
14689 RESTORE_ALL
14690 UNFAKE_STACK_FRAME
14691+ pax_force_retaddr
14692 ret
14693 CFI_ENDPROC
14694 END(kernel_thread)
14695@@ -1208,6 +1563,7 @@ ENTRY(child_rip)
14696 */
14697 movq %rdi, %rax
14698 movq %rsi, %rdi
14699+ pax_force_fptr %rax
14700 call *%rax
14701 # exit
14702 mov %eax, %edi
14703@@ -1243,6 +1599,7 @@ ENTRY(kernel_execve)
14704 je int_ret_from_sys_call
14705 RESTORE_ARGS
14706 UNFAKE_STACK_FRAME
14707+ pax_force_retaddr
14708 ret
14709 CFI_ENDPROC
14710 END(kernel_execve)
14711@@ -1263,6 +1620,7 @@ ENTRY(call_softirq)
14712 CFI_DEF_CFA_REGISTER rsp
14713 CFI_ADJUST_CFA_OFFSET -8
14714 decl PER_CPU_VAR(irq_count)
14715+ pax_force_retaddr
14716 ret
14717 CFI_ENDPROC
14718 END(call_softirq)
14719@@ -1405,16 +1763,31 @@ ENTRY(paranoid_exit)
14720 TRACE_IRQS_OFF
14721 testl %ebx,%ebx /* swapgs needed? */
14722 jnz paranoid_restore
14723- testl $3,CS(%rsp)
14724+ testb $3,CS(%rsp)
14725 jnz paranoid_userspace
14726+#ifdef CONFIG_PAX_MEMORY_UDEREF
14727+ pax_exit_kernel
14728+ TRACE_IRQS_IRETQ 0
14729+ SWAPGS_UNSAFE_STACK
14730+ RESTORE_ALL 8
14731+ pax_force_retaddr
14732+ jmp irq_return
14733+#endif
14734 paranoid_swapgs:
14735+#ifdef CONFIG_PAX_MEMORY_UDEREF
14736+ pax_exit_kernel_user
14737+#else
14738+ pax_exit_kernel
14739+#endif
14740 TRACE_IRQS_IRETQ 0
14741 SWAPGS_UNSAFE_STACK
14742 RESTORE_ALL 8
14743 jmp irq_return
14744 paranoid_restore:
14745+ pax_exit_kernel
14746 TRACE_IRQS_IRETQ 0
14747 RESTORE_ALL 8
14748+ pax_force_retaddr
14749 jmp irq_return
14750 paranoid_userspace:
14751 GET_THREAD_INFO(%rcx)
14752@@ -1470,12 +1843,13 @@ ENTRY(error_entry)
14753 movq_cfi r14, R14+8
14754 movq_cfi r15, R15+8
14755 xorl %ebx,%ebx
14756- testl $3,CS+8(%rsp)
14757+ testb $3,CS+8(%rsp)
14758 je error_kernelspace
14759 error_swapgs:
14760 SWAPGS
14761 error_sti:
14762 TRACE_IRQS_OFF
14763+ pax_force_retaddr
14764 ret
14765 CFI_ENDPROC
14766
14767@@ -1529,6 +1903,16 @@ ENTRY(nmi)
14768 CFI_ADJUST_CFA_OFFSET 15*8
14769 call save_paranoid
14770 DEFAULT_FRAME 0
14771+#ifdef CONFIG_PAX_MEMORY_UDEREF
14772+ testb $3, CS(%rsp)
14773+ jnz 1f
14774+ pax_enter_kernel
14775+ jmp 2f
14776+1: pax_enter_kernel_user
14777+2:
14778+#else
14779+ pax_enter_kernel
14780+#endif
14781 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
14782 movq %rsp,%rdi
14783 movq $-1,%rsi
14784@@ -1539,12 +1923,28 @@ ENTRY(nmi)
14785 DISABLE_INTERRUPTS(CLBR_NONE)
14786 testl %ebx,%ebx /* swapgs needed? */
14787 jnz nmi_restore
14788- testl $3,CS(%rsp)
14789+ testb $3,CS(%rsp)
14790 jnz nmi_userspace
14791+#ifdef CONFIG_PAX_MEMORY_UDEREF
14792+ pax_exit_kernel
14793+ SWAPGS_UNSAFE_STACK
14794+ RESTORE_ALL 8
14795+ pax_force_retaddr
14796+ jmp irq_return
14797+#endif
14798 nmi_swapgs:
14799+#ifdef CONFIG_PAX_MEMORY_UDEREF
14800+ pax_exit_kernel_user
14801+#else
14802+ pax_exit_kernel
14803+#endif
14804 SWAPGS_UNSAFE_STACK
14805+ RESTORE_ALL 8
14806+ jmp irq_return
14807 nmi_restore:
14808+ pax_exit_kernel
14809 RESTORE_ALL 8
14810+ pax_force_retaddr
14811 jmp irq_return
14812 nmi_userspace:
14813 GET_THREAD_INFO(%rcx)
14814diff -urNp linux-2.6.32.48/arch/x86/kernel/ftrace.c linux-2.6.32.48/arch/x86/kernel/ftrace.c
14815--- linux-2.6.32.48/arch/x86/kernel/ftrace.c 2011-11-08 19:02:43.000000000 -0500
14816+++ linux-2.6.32.48/arch/x86/kernel/ftrace.c 2011-11-15 19:59:43.000000000 -0500
14817@@ -103,7 +103,7 @@ static void *mod_code_ip; /* holds the
14818 static void *mod_code_newcode; /* holds the text to write to the IP */
14819
14820 static unsigned nmi_wait_count;
14821-static atomic_t nmi_update_count = ATOMIC_INIT(0);
14822+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
14823
14824 int ftrace_arch_read_dyn_info(char *buf, int size)
14825 {
14826@@ -111,7 +111,7 @@ int ftrace_arch_read_dyn_info(char *buf,
14827
14828 r = snprintf(buf, size, "%u %u",
14829 nmi_wait_count,
14830- atomic_read(&nmi_update_count));
14831+ atomic_read_unchecked(&nmi_update_count));
14832 return r;
14833 }
14834
14835@@ -149,8 +149,10 @@ void ftrace_nmi_enter(void)
14836 {
14837 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
14838 smp_rmb();
14839+ pax_open_kernel();
14840 ftrace_mod_code();
14841- atomic_inc(&nmi_update_count);
14842+ pax_close_kernel();
14843+ atomic_inc_unchecked(&nmi_update_count);
14844 }
14845 /* Must have previous changes seen before executions */
14846 smp_mb();
14847@@ -215,7 +217,7 @@ do_ftrace_mod_code(unsigned long ip, voi
14848
14849
14850
14851-static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
14852+static unsigned char ftrace_nop[MCOUNT_INSN_SIZE] __read_only;
14853
14854 static unsigned char *ftrace_nop_replace(void)
14855 {
14856@@ -228,6 +230,8 @@ ftrace_modify_code(unsigned long ip, uns
14857 {
14858 unsigned char replaced[MCOUNT_INSN_SIZE];
14859
14860+ ip = ktla_ktva(ip);
14861+
14862 /*
14863 * Note: Due to modules and __init, code can
14864 * disappear and change, we need to protect against faulting
14865@@ -284,7 +288,7 @@ int ftrace_update_ftrace_func(ftrace_fun
14866 unsigned char old[MCOUNT_INSN_SIZE], *new;
14867 int ret;
14868
14869- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
14870+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
14871 new = ftrace_call_replace(ip, (unsigned long)func);
14872 ret = ftrace_modify_code(ip, old, new);
14873
14874@@ -337,15 +341,15 @@ int __init ftrace_dyn_arch_init(void *da
14875 switch (faulted) {
14876 case 0:
14877 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
14878- memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
14879+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_p6nop), MCOUNT_INSN_SIZE);
14880 break;
14881 case 1:
14882 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
14883- memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
14884+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_nop5), MCOUNT_INSN_SIZE);
14885 break;
14886 case 2:
14887 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
14888- memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
14889+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_jmp), MCOUNT_INSN_SIZE);
14890 break;
14891 }
14892
14893@@ -366,6 +370,8 @@ static int ftrace_mod_jmp(unsigned long
14894 {
14895 unsigned char code[MCOUNT_INSN_SIZE];
14896
14897+ ip = ktla_ktva(ip);
14898+
14899 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
14900 return -EFAULT;
14901
14902diff -urNp linux-2.6.32.48/arch/x86/kernel/head32.c linux-2.6.32.48/arch/x86/kernel/head32.c
14903--- linux-2.6.32.48/arch/x86/kernel/head32.c 2011-11-08 19:02:43.000000000 -0500
14904+++ linux-2.6.32.48/arch/x86/kernel/head32.c 2011-11-15 19:59:43.000000000 -0500
14905@@ -16,6 +16,7 @@
14906 #include <asm/apic.h>
14907 #include <asm/io_apic.h>
14908 #include <asm/bios_ebda.h>
14909+#include <asm/boot.h>
14910
14911 static void __init i386_default_early_setup(void)
14912 {
14913@@ -31,7 +32,7 @@ void __init i386_start_kernel(void)
14914 {
14915 reserve_trampoline_memory();
14916
14917- reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14918+ reserve_early(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14919
14920 #ifdef CONFIG_BLK_DEV_INITRD
14921 /* Reserve INITRD */
14922diff -urNp linux-2.6.32.48/arch/x86/kernel/head_32.S linux-2.6.32.48/arch/x86/kernel/head_32.S
14923--- linux-2.6.32.48/arch/x86/kernel/head_32.S 2011-11-08 19:02:43.000000000 -0500
14924+++ linux-2.6.32.48/arch/x86/kernel/head_32.S 2011-11-15 19:59:43.000000000 -0500
14925@@ -19,10 +19,17 @@
14926 #include <asm/setup.h>
14927 #include <asm/processor-flags.h>
14928 #include <asm/percpu.h>
14929+#include <asm/msr-index.h>
14930
14931 /* Physical address */
14932 #define pa(X) ((X) - __PAGE_OFFSET)
14933
14934+#ifdef CONFIG_PAX_KERNEXEC
14935+#define ta(X) (X)
14936+#else
14937+#define ta(X) ((X) - __PAGE_OFFSET)
14938+#endif
14939+
14940 /*
14941 * References to members of the new_cpu_data structure.
14942 */
14943@@ -52,11 +59,7 @@
14944 * and small than max_low_pfn, otherwise will waste some page table entries
14945 */
14946
14947-#if PTRS_PER_PMD > 1
14948-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
14949-#else
14950-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
14951-#endif
14952+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
14953
14954 /* Enough space to fit pagetables for the low memory linear map */
14955 MAPPING_BEYOND_END = \
14956@@ -73,6 +76,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
14957 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14958
14959 /*
14960+ * Real beginning of normal "text" segment
14961+ */
14962+ENTRY(stext)
14963+ENTRY(_stext)
14964+
14965+/*
14966 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
14967 * %esi points to the real-mode code as a 32-bit pointer.
14968 * CS and DS must be 4 GB flat segments, but we don't depend on
14969@@ -80,7 +89,16 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14970 * can.
14971 */
14972 __HEAD
14973+
14974+#ifdef CONFIG_PAX_KERNEXEC
14975+ jmp startup_32
14976+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
14977+.fill PAGE_SIZE-5,1,0xcc
14978+#endif
14979+
14980 ENTRY(startup_32)
14981+ movl pa(stack_start),%ecx
14982+
14983 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
14984 us to not reload segments */
14985 testb $(1<<6), BP_loadflags(%esi)
14986@@ -95,7 +113,60 @@ ENTRY(startup_32)
14987 movl %eax,%es
14988 movl %eax,%fs
14989 movl %eax,%gs
14990+ movl %eax,%ss
14991 2:
14992+ leal -__PAGE_OFFSET(%ecx),%esp
14993+
14994+#ifdef CONFIG_SMP
14995+ movl $pa(cpu_gdt_table),%edi
14996+ movl $__per_cpu_load,%eax
14997+ movw %ax,__KERNEL_PERCPU + 2(%edi)
14998+ rorl $16,%eax
14999+ movb %al,__KERNEL_PERCPU + 4(%edi)
15000+ movb %ah,__KERNEL_PERCPU + 7(%edi)
15001+ movl $__per_cpu_end - 1,%eax
15002+ subl $__per_cpu_start,%eax
15003+ movw %ax,__KERNEL_PERCPU + 0(%edi)
15004+#endif
15005+
15006+#ifdef CONFIG_PAX_MEMORY_UDEREF
15007+ movl $NR_CPUS,%ecx
15008+ movl $pa(cpu_gdt_table),%edi
15009+1:
15010+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
15011+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
15012+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
15013+ addl $PAGE_SIZE_asm,%edi
15014+ loop 1b
15015+#endif
15016+
15017+#ifdef CONFIG_PAX_KERNEXEC
15018+ movl $pa(boot_gdt),%edi
15019+ movl $__LOAD_PHYSICAL_ADDR,%eax
15020+ movw %ax,__BOOT_CS + 2(%edi)
15021+ rorl $16,%eax
15022+ movb %al,__BOOT_CS + 4(%edi)
15023+ movb %ah,__BOOT_CS + 7(%edi)
15024+ rorl $16,%eax
15025+
15026+ ljmp $(__BOOT_CS),$1f
15027+1:
15028+
15029+ movl $NR_CPUS,%ecx
15030+ movl $pa(cpu_gdt_table),%edi
15031+ addl $__PAGE_OFFSET,%eax
15032+1:
15033+ movw %ax,__KERNEL_CS + 2(%edi)
15034+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
15035+ rorl $16,%eax
15036+ movb %al,__KERNEL_CS + 4(%edi)
15037+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
15038+ movb %ah,__KERNEL_CS + 7(%edi)
15039+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
15040+ rorl $16,%eax
15041+ addl $PAGE_SIZE_asm,%edi
15042+ loop 1b
15043+#endif
15044
15045 /*
15046 * Clear BSS first so that there are no surprises...
15047@@ -140,9 +211,7 @@ ENTRY(startup_32)
15048 cmpl $num_subarch_entries, %eax
15049 jae bad_subarch
15050
15051- movl pa(subarch_entries)(,%eax,4), %eax
15052- subl $__PAGE_OFFSET, %eax
15053- jmp *%eax
15054+ jmp *pa(subarch_entries)(,%eax,4)
15055
15056 bad_subarch:
15057 WEAK(lguest_entry)
15058@@ -154,10 +223,10 @@ WEAK(xen_entry)
15059 __INITDATA
15060
15061 subarch_entries:
15062- .long default_entry /* normal x86/PC */
15063- .long lguest_entry /* lguest hypervisor */
15064- .long xen_entry /* Xen hypervisor */
15065- .long default_entry /* Moorestown MID */
15066+ .long ta(default_entry) /* normal x86/PC */
15067+ .long ta(lguest_entry) /* lguest hypervisor */
15068+ .long ta(xen_entry) /* Xen hypervisor */
15069+ .long ta(default_entry) /* Moorestown MID */
15070 num_subarch_entries = (. - subarch_entries) / 4
15071 .previous
15072 #endif /* CONFIG_PARAVIRT */
15073@@ -218,8 +287,11 @@ default_entry:
15074 movl %eax, pa(max_pfn_mapped)
15075
15076 /* Do early initialization of the fixmap area */
15077- movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
15078- movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
15079+#ifdef CONFIG_COMPAT_VDSO
15080+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_pmd+0x1000*KPMDS-8)
15081+#else
15082+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_pmd+0x1000*KPMDS-8)
15083+#endif
15084 #else /* Not PAE */
15085
15086 page_pde_offset = (__PAGE_OFFSET >> 20);
15087@@ -249,8 +321,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
15088 movl %eax, pa(max_pfn_mapped)
15089
15090 /* Do early initialization of the fixmap area */
15091- movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
15092- movl %eax,pa(swapper_pg_dir+0xffc)
15093+#ifdef CONFIG_COMPAT_VDSO
15094+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_dir+0xffc)
15095+#else
15096+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_dir+0xffc)
15097+#endif
15098 #endif
15099 jmp 3f
15100 /*
15101@@ -272,6 +347,9 @@ ENTRY(startup_32_smp)
15102 movl %eax,%es
15103 movl %eax,%fs
15104 movl %eax,%gs
15105+ movl pa(stack_start),%ecx
15106+ movl %eax,%ss
15107+ leal -__PAGE_OFFSET(%ecx),%esp
15108 #endif /* CONFIG_SMP */
15109 3:
15110
15111@@ -297,6 +375,7 @@ ENTRY(startup_32_smp)
15112 orl %edx,%eax
15113 movl %eax,%cr4
15114
15115+#ifdef CONFIG_X86_PAE
15116 btl $5, %eax # check if PAE is enabled
15117 jnc 6f
15118
15119@@ -305,6 +384,10 @@ ENTRY(startup_32_smp)
15120 cpuid
15121 cmpl $0x80000000, %eax
15122 jbe 6f
15123+
15124+ /* Clear bogus XD_DISABLE bits */
15125+ call verify_cpu
15126+
15127 mov $0x80000001, %eax
15128 cpuid
15129 /* Execute Disable bit supported? */
15130@@ -312,13 +395,17 @@ ENTRY(startup_32_smp)
15131 jnc 6f
15132
15133 /* Setup EFER (Extended Feature Enable Register) */
15134- movl $0xc0000080, %ecx
15135+ movl $MSR_EFER, %ecx
15136 rdmsr
15137
15138 btsl $11, %eax
15139 /* Make changes effective */
15140 wrmsr
15141
15142+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
15143+ movl $1,pa(nx_enabled)
15144+#endif
15145+
15146 6:
15147
15148 /*
15149@@ -331,8 +418,8 @@ ENTRY(startup_32_smp)
15150 movl %eax,%cr0 /* ..and set paging (PG) bit */
15151 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
15152 1:
15153- /* Set up the stack pointer */
15154- lss stack_start,%esp
15155+ /* Shift the stack pointer to a virtual address */
15156+ addl $__PAGE_OFFSET, %esp
15157
15158 /*
15159 * Initialize eflags. Some BIOS's leave bits like NT set. This would
15160@@ -344,9 +431,7 @@ ENTRY(startup_32_smp)
15161
15162 #ifdef CONFIG_SMP
15163 cmpb $0, ready
15164- jz 1f /* Initial CPU cleans BSS */
15165- jmp checkCPUtype
15166-1:
15167+ jnz checkCPUtype
15168 #endif /* CONFIG_SMP */
15169
15170 /*
15171@@ -424,7 +509,7 @@ is386: movl $2,%ecx # set MP
15172 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
15173 movl %eax,%ss # after changing gdt.
15174
15175- movl $(__USER_DS),%eax # DS/ES contains default USER segment
15176+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
15177 movl %eax,%ds
15178 movl %eax,%es
15179
15180@@ -438,15 +523,22 @@ is386: movl $2,%ecx # set MP
15181 */
15182 cmpb $0,ready
15183 jne 1f
15184- movl $per_cpu__gdt_page,%eax
15185+ movl $cpu_gdt_table,%eax
15186 movl $per_cpu__stack_canary,%ecx
15187+#ifdef CONFIG_SMP
15188+ addl $__per_cpu_load,%ecx
15189+#endif
15190 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
15191 shrl $16, %ecx
15192 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
15193 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
15194 1:
15195-#endif
15196 movl $(__KERNEL_STACK_CANARY),%eax
15197+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
15198+ movl $(__USER_DS),%eax
15199+#else
15200+ xorl %eax,%eax
15201+#endif
15202 movl %eax,%gs
15203
15204 xorl %eax,%eax # Clear LDT
15205@@ -454,14 +546,7 @@ is386: movl $2,%ecx # set MP
15206
15207 cld # gcc2 wants the direction flag cleared at all times
15208 pushl $0 # fake return address for unwinder
15209-#ifdef CONFIG_SMP
15210- movb ready, %cl
15211 movb $1, ready
15212- cmpb $0,%cl # the first CPU calls start_kernel
15213- je 1f
15214- movl (stack_start), %esp
15215-1:
15216-#endif /* CONFIG_SMP */
15217 jmp *(initial_code)
15218
15219 /*
15220@@ -546,22 +631,22 @@ early_page_fault:
15221 jmp early_fault
15222
15223 early_fault:
15224- cld
15225 #ifdef CONFIG_PRINTK
15226+ cmpl $1,%ss:early_recursion_flag
15227+ je hlt_loop
15228+ incl %ss:early_recursion_flag
15229+ cld
15230 pusha
15231 movl $(__KERNEL_DS),%eax
15232 movl %eax,%ds
15233 movl %eax,%es
15234- cmpl $2,early_recursion_flag
15235- je hlt_loop
15236- incl early_recursion_flag
15237 movl %cr2,%eax
15238 pushl %eax
15239 pushl %edx /* trapno */
15240 pushl $fault_msg
15241 call printk
15242+; call dump_stack
15243 #endif
15244- call dump_stack
15245 hlt_loop:
15246 hlt
15247 jmp hlt_loop
15248@@ -569,8 +654,11 @@ hlt_loop:
15249 /* This is the default interrupt "handler" :-) */
15250 ALIGN
15251 ignore_int:
15252- cld
15253 #ifdef CONFIG_PRINTK
15254+ cmpl $2,%ss:early_recursion_flag
15255+ je hlt_loop
15256+ incl %ss:early_recursion_flag
15257+ cld
15258 pushl %eax
15259 pushl %ecx
15260 pushl %edx
15261@@ -579,9 +667,6 @@ ignore_int:
15262 movl $(__KERNEL_DS),%eax
15263 movl %eax,%ds
15264 movl %eax,%es
15265- cmpl $2,early_recursion_flag
15266- je hlt_loop
15267- incl early_recursion_flag
15268 pushl 16(%esp)
15269 pushl 24(%esp)
15270 pushl 32(%esp)
15271@@ -600,6 +685,8 @@ ignore_int:
15272 #endif
15273 iret
15274
15275+#include "verify_cpu.S"
15276+
15277 __REFDATA
15278 .align 4
15279 ENTRY(initial_code)
15280@@ -610,31 +697,47 @@ ENTRY(initial_page_table)
15281 /*
15282 * BSS section
15283 */
15284-__PAGE_ALIGNED_BSS
15285- .align PAGE_SIZE_asm
15286 #ifdef CONFIG_X86_PAE
15287+.section .swapper_pg_pmd,"a",@progbits
15288 swapper_pg_pmd:
15289 .fill 1024*KPMDS,4,0
15290 #else
15291+.section .swapper_pg_dir,"a",@progbits
15292 ENTRY(swapper_pg_dir)
15293 .fill 1024,4,0
15294 #endif
15295+.section .swapper_pg_fixmap,"a",@progbits
15296 swapper_pg_fixmap:
15297 .fill 1024,4,0
15298 #ifdef CONFIG_X86_TRAMPOLINE
15299+.section .trampoline_pg_dir,"a",@progbits
15300 ENTRY(trampoline_pg_dir)
15301+#ifdef CONFIG_X86_PAE
15302+ .fill 4,8,0
15303+#else
15304 .fill 1024,4,0
15305 #endif
15306+#endif
15307+
15308+.section .empty_zero_page,"a",@progbits
15309 ENTRY(empty_zero_page)
15310 .fill 4096,1,0
15311
15312 /*
15313+ * The IDT has to be page-aligned to simplify the Pentium
15314+ * F0 0F bug workaround.. We have a special link segment
15315+ * for this.
15316+ */
15317+.section .idt,"a",@progbits
15318+ENTRY(idt_table)
15319+ .fill 256,8,0
15320+
15321+/*
15322 * This starts the data section.
15323 */
15324 #ifdef CONFIG_X86_PAE
15325-__PAGE_ALIGNED_DATA
15326- /* Page-aligned for the benefit of paravirt? */
15327- .align PAGE_SIZE_asm
15328+.section .swapper_pg_dir,"a",@progbits
15329+
15330 ENTRY(swapper_pg_dir)
15331 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
15332 # if KPMDS == 3
15333@@ -653,15 +756,24 @@ ENTRY(swapper_pg_dir)
15334 # error "Kernel PMDs should be 1, 2 or 3"
15335 # endif
15336 .align PAGE_SIZE_asm /* needs to be page-sized too */
15337+
15338+#ifdef CONFIG_PAX_PER_CPU_PGD
15339+ENTRY(cpu_pgd)
15340+ .rept NR_CPUS
15341+ .fill 4,8,0
15342+ .endr
15343+#endif
15344+
15345 #endif
15346
15347 .data
15348+.balign 4
15349 ENTRY(stack_start)
15350- .long init_thread_union+THREAD_SIZE
15351- .long __BOOT_DS
15352+ .long init_thread_union+THREAD_SIZE-8
15353
15354 ready: .byte 0
15355
15356+.section .rodata,"a",@progbits
15357 early_recursion_flag:
15358 .long 0
15359
15360@@ -697,7 +809,7 @@ fault_msg:
15361 .word 0 # 32 bit align gdt_desc.address
15362 boot_gdt_descr:
15363 .word __BOOT_DS+7
15364- .long boot_gdt - __PAGE_OFFSET
15365+ .long pa(boot_gdt)
15366
15367 .word 0 # 32-bit align idt_desc.address
15368 idt_descr:
15369@@ -708,7 +820,7 @@ idt_descr:
15370 .word 0 # 32 bit align gdt_desc.address
15371 ENTRY(early_gdt_descr)
15372 .word GDT_ENTRIES*8-1
15373- .long per_cpu__gdt_page /* Overwritten for secondary CPUs */
15374+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
15375
15376 /*
15377 * The boot_gdt must mirror the equivalent in setup.S and is
15378@@ -717,5 +829,65 @@ ENTRY(early_gdt_descr)
15379 .align L1_CACHE_BYTES
15380 ENTRY(boot_gdt)
15381 .fill GDT_ENTRY_BOOT_CS,8,0
15382- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
15383- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
15384+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
15385+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
15386+
15387+ .align PAGE_SIZE_asm
15388+ENTRY(cpu_gdt_table)
15389+ .rept NR_CPUS
15390+ .quad 0x0000000000000000 /* NULL descriptor */
15391+ .quad 0x0000000000000000 /* 0x0b reserved */
15392+ .quad 0x0000000000000000 /* 0x13 reserved */
15393+ .quad 0x0000000000000000 /* 0x1b reserved */
15394+
15395+#ifdef CONFIG_PAX_KERNEXEC
15396+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
15397+#else
15398+ .quad 0x0000000000000000 /* 0x20 unused */
15399+#endif
15400+
15401+ .quad 0x0000000000000000 /* 0x28 unused */
15402+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
15403+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
15404+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
15405+ .quad 0x0000000000000000 /* 0x4b reserved */
15406+ .quad 0x0000000000000000 /* 0x53 reserved */
15407+ .quad 0x0000000000000000 /* 0x5b reserved */
15408+
15409+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
15410+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
15411+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
15412+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
15413+
15414+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
15415+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
15416+
15417+ /*
15418+ * Segments used for calling PnP BIOS have byte granularity.
15419+ * The code segments and data segments have fixed 64k limits,
15420+ * the transfer segment sizes are set at run time.
15421+ */
15422+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
15423+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
15424+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
15425+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
15426+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
15427+
15428+ /*
15429+ * The APM segments have byte granularity and their bases
15430+ * are set at run time. All have 64k limits.
15431+ */
15432+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
15433+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
15434+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
15435+
15436+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
15437+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
15438+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
15439+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
15440+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
15441+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
15442+
15443+ /* Be sure this is zeroed to avoid false validations in Xen */
15444+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
15445+ .endr
15446diff -urNp linux-2.6.32.48/arch/x86/kernel/head_64.S linux-2.6.32.48/arch/x86/kernel/head_64.S
15447--- linux-2.6.32.48/arch/x86/kernel/head_64.S 2011-11-08 19:02:43.000000000 -0500
15448+++ linux-2.6.32.48/arch/x86/kernel/head_64.S 2011-11-15 19:59:43.000000000 -0500
15449@@ -19,6 +19,7 @@
15450 #include <asm/cache.h>
15451 #include <asm/processor-flags.h>
15452 #include <asm/percpu.h>
15453+#include <asm/cpufeature.h>
15454
15455 #ifdef CONFIG_PARAVIRT
15456 #include <asm/asm-offsets.h>
15457@@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
15458 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
15459 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
15460 L3_START_KERNEL = pud_index(__START_KERNEL_map)
15461+L4_VMALLOC_START = pgd_index(VMALLOC_START)
15462+L3_VMALLOC_START = pud_index(VMALLOC_START)
15463+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
15464+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
15465
15466 .text
15467 __HEAD
15468@@ -85,35 +90,22 @@ startup_64:
15469 */
15470 addq %rbp, init_level4_pgt + 0(%rip)
15471 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
15472+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
15473+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
15474 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
15475
15476 addq %rbp, level3_ident_pgt + 0(%rip)
15477+#ifndef CONFIG_XEN
15478+ addq %rbp, level3_ident_pgt + 8(%rip)
15479+#endif
15480
15481- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
15482- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
15483+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
15484
15485- addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
15486+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
15487+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
15488
15489- /* Add an Identity mapping if I am above 1G */
15490- leaq _text(%rip), %rdi
15491- andq $PMD_PAGE_MASK, %rdi
15492-
15493- movq %rdi, %rax
15494- shrq $PUD_SHIFT, %rax
15495- andq $(PTRS_PER_PUD - 1), %rax
15496- jz ident_complete
15497-
15498- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
15499- leaq level3_ident_pgt(%rip), %rbx
15500- movq %rdx, 0(%rbx, %rax, 8)
15501-
15502- movq %rdi, %rax
15503- shrq $PMD_SHIFT, %rax
15504- andq $(PTRS_PER_PMD - 1), %rax
15505- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
15506- leaq level2_spare_pgt(%rip), %rbx
15507- movq %rdx, 0(%rbx, %rax, 8)
15508-ident_complete:
15509+ addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
15510+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
15511
15512 /*
15513 * Fixup the kernel text+data virtual addresses. Note that
15514@@ -161,8 +153,8 @@ ENTRY(secondary_startup_64)
15515 * after the boot processor executes this code.
15516 */
15517
15518- /* Enable PAE mode and PGE */
15519- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
15520+ /* Enable PAE mode and PSE/PGE */
15521+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
15522 movq %rax, %cr4
15523
15524 /* Setup early boot stage 4 level pagetables. */
15525@@ -184,9 +176,13 @@ ENTRY(secondary_startup_64)
15526 movl $MSR_EFER, %ecx
15527 rdmsr
15528 btsl $_EFER_SCE, %eax /* Enable System Call */
15529- btl $20,%edi /* No Execute supported? */
15530+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
15531 jnc 1f
15532 btsl $_EFER_NX, %eax
15533+ leaq init_level4_pgt(%rip), %rdi
15534+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
15535+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
15536+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
15537 1: wrmsr /* Make changes effective */
15538
15539 /* Setup cr0 */
15540@@ -262,16 +258,16 @@ ENTRY(secondary_startup_64)
15541 .quad x86_64_start_kernel
15542 ENTRY(initial_gs)
15543 .quad INIT_PER_CPU_VAR(irq_stack_union)
15544- __FINITDATA
15545
15546 ENTRY(stack_start)
15547 .quad init_thread_union+THREAD_SIZE-8
15548 .word 0
15549+ __FINITDATA
15550
15551 bad_address:
15552 jmp bad_address
15553
15554- .section ".init.text","ax"
15555+ __INIT
15556 #ifdef CONFIG_EARLY_PRINTK
15557 .globl early_idt_handlers
15558 early_idt_handlers:
15559@@ -316,18 +312,23 @@ ENTRY(early_idt_handler)
15560 #endif /* EARLY_PRINTK */
15561 1: hlt
15562 jmp 1b
15563+ .previous
15564
15565 #ifdef CONFIG_EARLY_PRINTK
15566+ __INITDATA
15567 early_recursion_flag:
15568 .long 0
15569+ .previous
15570
15571+ .section .rodata,"a",@progbits
15572 early_idt_msg:
15573 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
15574 early_idt_ripmsg:
15575 .asciz "RIP %s\n"
15576-#endif /* CONFIG_EARLY_PRINTK */
15577 .previous
15578+#endif /* CONFIG_EARLY_PRINTK */
15579
15580+ .section .rodata,"a",@progbits
15581 #define NEXT_PAGE(name) \
15582 .balign PAGE_SIZE; \
15583 ENTRY(name)
15584@@ -350,13 +351,36 @@ NEXT_PAGE(init_level4_pgt)
15585 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15586 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
15587 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15588+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
15589+ .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
15590+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
15591+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
15592 .org init_level4_pgt + L4_START_KERNEL*8, 0
15593 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
15594 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
15595
15596+#ifdef CONFIG_PAX_PER_CPU_PGD
15597+NEXT_PAGE(cpu_pgd)
15598+ .rept NR_CPUS
15599+ .fill 512,8,0
15600+ .endr
15601+#endif
15602+
15603 NEXT_PAGE(level3_ident_pgt)
15604 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15605+#ifdef CONFIG_XEN
15606 .fill 511,8,0
15607+#else
15608+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
15609+ .fill 510,8,0
15610+#endif
15611+
15612+NEXT_PAGE(level3_vmalloc_pgt)
15613+ .fill 512,8,0
15614+
15615+NEXT_PAGE(level3_vmemmap_pgt)
15616+ .fill L3_VMEMMAP_START,8,0
15617+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
15618
15619 NEXT_PAGE(level3_kernel_pgt)
15620 .fill L3_START_KERNEL,8,0
15621@@ -364,20 +388,23 @@ NEXT_PAGE(level3_kernel_pgt)
15622 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
15623 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
15624
15625+NEXT_PAGE(level2_vmemmap_pgt)
15626+ .fill 512,8,0
15627+
15628 NEXT_PAGE(level2_fixmap_pgt)
15629- .fill 506,8,0
15630- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
15631- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
15632- .fill 5,8,0
15633+ .fill 507,8,0
15634+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
15635+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
15636+ .fill 4,8,0
15637
15638-NEXT_PAGE(level1_fixmap_pgt)
15639+NEXT_PAGE(level1_vsyscall_pgt)
15640 .fill 512,8,0
15641
15642-NEXT_PAGE(level2_ident_pgt)
15643- /* Since I easily can, map the first 1G.
15644+ /* Since I easily can, map the first 2G.
15645 * Don't set NX because code runs from these pages.
15646 */
15647- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
15648+NEXT_PAGE(level2_ident_pgt)
15649+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
15650
15651 NEXT_PAGE(level2_kernel_pgt)
15652 /*
15653@@ -390,33 +417,55 @@ NEXT_PAGE(level2_kernel_pgt)
15654 * If you want to increase this then increase MODULES_VADDR
15655 * too.)
15656 */
15657- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
15658- KERNEL_IMAGE_SIZE/PMD_SIZE)
15659-
15660-NEXT_PAGE(level2_spare_pgt)
15661- .fill 512, 8, 0
15662+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
15663
15664 #undef PMDS
15665 #undef NEXT_PAGE
15666
15667- .data
15668+ .align PAGE_SIZE
15669+ENTRY(cpu_gdt_table)
15670+ .rept NR_CPUS
15671+ .quad 0x0000000000000000 /* NULL descriptor */
15672+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
15673+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
15674+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
15675+ .quad 0x00cffb000000ffff /* __USER32_CS */
15676+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
15677+ .quad 0x00affb000000ffff /* __USER_CS */
15678+
15679+#ifdef CONFIG_PAX_KERNEXEC
15680+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
15681+#else
15682+ .quad 0x0 /* unused */
15683+#endif
15684+
15685+ .quad 0,0 /* TSS */
15686+ .quad 0,0 /* LDT */
15687+ .quad 0,0,0 /* three TLS descriptors */
15688+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
15689+ /* asm/segment.h:GDT_ENTRIES must match this */
15690+
15691+ /* zero the remaining page */
15692+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
15693+ .endr
15694+
15695 .align 16
15696 .globl early_gdt_descr
15697 early_gdt_descr:
15698 .word GDT_ENTRIES*8-1
15699 early_gdt_descr_base:
15700- .quad INIT_PER_CPU_VAR(gdt_page)
15701+ .quad cpu_gdt_table
15702
15703 ENTRY(phys_base)
15704 /* This must match the first entry in level2_kernel_pgt */
15705 .quad 0x0000000000000000
15706
15707 #include "../../x86/xen/xen-head.S"
15708-
15709- .section .bss, "aw", @nobits
15710+
15711+ .section .rodata,"a",@progbits
15712 .align L1_CACHE_BYTES
15713 ENTRY(idt_table)
15714- .skip IDT_ENTRIES * 16
15715+ .fill 512,8,0
15716
15717 __PAGE_ALIGNED_BSS
15718 .align PAGE_SIZE
15719diff -urNp linux-2.6.32.48/arch/x86/kernel/i386_ksyms_32.c linux-2.6.32.48/arch/x86/kernel/i386_ksyms_32.c
15720--- linux-2.6.32.48/arch/x86/kernel/i386_ksyms_32.c 2011-11-08 19:02:43.000000000 -0500
15721+++ linux-2.6.32.48/arch/x86/kernel/i386_ksyms_32.c 2011-11-15 19:59:43.000000000 -0500
15722@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
15723 EXPORT_SYMBOL(cmpxchg8b_emu);
15724 #endif
15725
15726+EXPORT_SYMBOL_GPL(cpu_gdt_table);
15727+
15728 /* Networking helper routines. */
15729 EXPORT_SYMBOL(csum_partial_copy_generic);
15730+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
15731+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
15732
15733 EXPORT_SYMBOL(__get_user_1);
15734 EXPORT_SYMBOL(__get_user_2);
15735@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
15736
15737 EXPORT_SYMBOL(csum_partial);
15738 EXPORT_SYMBOL(empty_zero_page);
15739+
15740+#ifdef CONFIG_PAX_KERNEXEC
15741+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
15742+#endif
15743diff -urNp linux-2.6.32.48/arch/x86/kernel/i8259.c linux-2.6.32.48/arch/x86/kernel/i8259.c
15744--- linux-2.6.32.48/arch/x86/kernel/i8259.c 2011-11-08 19:02:43.000000000 -0500
15745+++ linux-2.6.32.48/arch/x86/kernel/i8259.c 2011-11-15 19:59:43.000000000 -0500
15746@@ -208,7 +208,7 @@ spurious_8259A_irq:
15747 "spurious 8259A interrupt: IRQ%d.\n", irq);
15748 spurious_irq_mask |= irqmask;
15749 }
15750- atomic_inc(&irq_err_count);
15751+ atomic_inc_unchecked(&irq_err_count);
15752 /*
15753 * Theoretically we do not have to handle this IRQ,
15754 * but in Linux this does not cause problems and is
15755diff -urNp linux-2.6.32.48/arch/x86/kernel/init_task.c linux-2.6.32.48/arch/x86/kernel/init_task.c
15756--- linux-2.6.32.48/arch/x86/kernel/init_task.c 2011-11-08 19:02:43.000000000 -0500
15757+++ linux-2.6.32.48/arch/x86/kernel/init_task.c 2011-11-15 19:59:43.000000000 -0500
15758@@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
15759 * way process stacks are handled. This is done by having a special
15760 * "init_task" linker map entry..
15761 */
15762-union thread_union init_thread_union __init_task_data =
15763- { INIT_THREAD_INFO(init_task) };
15764+union thread_union init_thread_union __init_task_data;
15765
15766 /*
15767 * Initial task structure.
15768@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
15769 * section. Since TSS's are completely CPU-local, we want them
15770 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
15771 */
15772-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
15773-
15774+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
15775+EXPORT_SYMBOL(init_tss);
15776diff -urNp linux-2.6.32.48/arch/x86/kernel/ioport.c linux-2.6.32.48/arch/x86/kernel/ioport.c
15777--- linux-2.6.32.48/arch/x86/kernel/ioport.c 2011-11-08 19:02:43.000000000 -0500
15778+++ linux-2.6.32.48/arch/x86/kernel/ioport.c 2011-11-15 19:59:43.000000000 -0500
15779@@ -6,6 +6,7 @@
15780 #include <linux/sched.h>
15781 #include <linux/kernel.h>
15782 #include <linux/capability.h>
15783+#include <linux/security.h>
15784 #include <linux/errno.h>
15785 #include <linux/types.h>
15786 #include <linux/ioport.h>
15787@@ -41,6 +42,12 @@ asmlinkage long sys_ioperm(unsigned long
15788
15789 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
15790 return -EINVAL;
15791+#ifdef CONFIG_GRKERNSEC_IO
15792+ if (turn_on && grsec_disable_privio) {
15793+ gr_handle_ioperm();
15794+ return -EPERM;
15795+ }
15796+#endif
15797 if (turn_on && !capable(CAP_SYS_RAWIO))
15798 return -EPERM;
15799
15800@@ -67,7 +74,7 @@ asmlinkage long sys_ioperm(unsigned long
15801 * because the ->io_bitmap_max value must match the bitmap
15802 * contents:
15803 */
15804- tss = &per_cpu(init_tss, get_cpu());
15805+ tss = init_tss + get_cpu();
15806
15807 set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
15808
15809@@ -111,6 +118,12 @@ static int do_iopl(unsigned int level, s
15810 return -EINVAL;
15811 /* Trying to gain more privileges? */
15812 if (level > old) {
15813+#ifdef CONFIG_GRKERNSEC_IO
15814+ if (grsec_disable_privio) {
15815+ gr_handle_iopl();
15816+ return -EPERM;
15817+ }
15818+#endif
15819 if (!capable(CAP_SYS_RAWIO))
15820 return -EPERM;
15821 }
15822diff -urNp linux-2.6.32.48/arch/x86/kernel/irq_32.c linux-2.6.32.48/arch/x86/kernel/irq_32.c
15823--- linux-2.6.32.48/arch/x86/kernel/irq_32.c 2011-11-08 19:02:43.000000000 -0500
15824+++ linux-2.6.32.48/arch/x86/kernel/irq_32.c 2011-11-15 19:59:43.000000000 -0500
15825@@ -35,7 +35,7 @@ static int check_stack_overflow(void)
15826 __asm__ __volatile__("andl %%esp,%0" :
15827 "=r" (sp) : "0" (THREAD_SIZE - 1));
15828
15829- return sp < (sizeof(struct thread_info) + STACK_WARN);
15830+ return sp < STACK_WARN;
15831 }
15832
15833 static void print_stack_overflow(void)
15834@@ -54,9 +54,9 @@ static inline void print_stack_overflow(
15835 * per-CPU IRQ handling contexts (thread information and stack)
15836 */
15837 union irq_ctx {
15838- struct thread_info tinfo;
15839- u32 stack[THREAD_SIZE/sizeof(u32)];
15840-} __attribute__((aligned(PAGE_SIZE)));
15841+ unsigned long previous_esp;
15842+ u32 stack[THREAD_SIZE/sizeof(u32)];
15843+} __attribute__((aligned(THREAD_SIZE)));
15844
15845 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
15846 static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
15847@@ -78,10 +78,9 @@ static void call_on_stack(void *func, vo
15848 static inline int
15849 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15850 {
15851- union irq_ctx *curctx, *irqctx;
15852+ union irq_ctx *irqctx;
15853 u32 *isp, arg1, arg2;
15854
15855- curctx = (union irq_ctx *) current_thread_info();
15856 irqctx = __get_cpu_var(hardirq_ctx);
15857
15858 /*
15859@@ -90,21 +89,16 @@ execute_on_irq_stack(int overflow, struc
15860 * handler) we can't do that and just have to keep using the
15861 * current stack (which is the irq stack already after all)
15862 */
15863- if (unlikely(curctx == irqctx))
15864+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
15865 return 0;
15866
15867 /* build the stack frame on the IRQ stack */
15868- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15869- irqctx->tinfo.task = curctx->tinfo.task;
15870- irqctx->tinfo.previous_esp = current_stack_pointer;
15871+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15872+ irqctx->previous_esp = current_stack_pointer;
15873
15874- /*
15875- * Copy the softirq bits in preempt_count so that the
15876- * softirq checks work in the hardirq context.
15877- */
15878- irqctx->tinfo.preempt_count =
15879- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
15880- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
15881+#ifdef CONFIG_PAX_MEMORY_UDEREF
15882+ __set_fs(MAKE_MM_SEG(0));
15883+#endif
15884
15885 if (unlikely(overflow))
15886 call_on_stack(print_stack_overflow, isp);
15887@@ -116,6 +110,11 @@ execute_on_irq_stack(int overflow, struc
15888 : "0" (irq), "1" (desc), "2" (isp),
15889 "D" (desc->handle_irq)
15890 : "memory", "cc", "ecx");
15891+
15892+#ifdef CONFIG_PAX_MEMORY_UDEREF
15893+ __set_fs(current_thread_info()->addr_limit);
15894+#endif
15895+
15896 return 1;
15897 }
15898
15899@@ -124,28 +123,11 @@ execute_on_irq_stack(int overflow, struc
15900 */
15901 void __cpuinit irq_ctx_init(int cpu)
15902 {
15903- union irq_ctx *irqctx;
15904-
15905 if (per_cpu(hardirq_ctx, cpu))
15906 return;
15907
15908- irqctx = &per_cpu(hardirq_stack, cpu);
15909- irqctx->tinfo.task = NULL;
15910- irqctx->tinfo.exec_domain = NULL;
15911- irqctx->tinfo.cpu = cpu;
15912- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
15913- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15914-
15915- per_cpu(hardirq_ctx, cpu) = irqctx;
15916-
15917- irqctx = &per_cpu(softirq_stack, cpu);
15918- irqctx->tinfo.task = NULL;
15919- irqctx->tinfo.exec_domain = NULL;
15920- irqctx->tinfo.cpu = cpu;
15921- irqctx->tinfo.preempt_count = 0;
15922- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15923-
15924- per_cpu(softirq_ctx, cpu) = irqctx;
15925+ per_cpu(hardirq_ctx, cpu) = &per_cpu(hardirq_stack, cpu);
15926+ per_cpu(softirq_ctx, cpu) = &per_cpu(softirq_stack, cpu);
15927
15928 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
15929 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
15930@@ -159,7 +141,6 @@ void irq_ctx_exit(int cpu)
15931 asmlinkage void do_softirq(void)
15932 {
15933 unsigned long flags;
15934- struct thread_info *curctx;
15935 union irq_ctx *irqctx;
15936 u32 *isp;
15937
15938@@ -169,15 +150,22 @@ asmlinkage void do_softirq(void)
15939 local_irq_save(flags);
15940
15941 if (local_softirq_pending()) {
15942- curctx = current_thread_info();
15943 irqctx = __get_cpu_var(softirq_ctx);
15944- irqctx->tinfo.task = curctx->task;
15945- irqctx->tinfo.previous_esp = current_stack_pointer;
15946+ irqctx->previous_esp = current_stack_pointer;
15947
15948 /* build the stack frame on the softirq stack */
15949- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15950+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15951+
15952+#ifdef CONFIG_PAX_MEMORY_UDEREF
15953+ __set_fs(MAKE_MM_SEG(0));
15954+#endif
15955
15956 call_on_stack(__do_softirq, isp);
15957+
15958+#ifdef CONFIG_PAX_MEMORY_UDEREF
15959+ __set_fs(current_thread_info()->addr_limit);
15960+#endif
15961+
15962 /*
15963 * Shouldnt happen, we returned above if in_interrupt():
15964 */
15965diff -urNp linux-2.6.32.48/arch/x86/kernel/irq.c linux-2.6.32.48/arch/x86/kernel/irq.c
15966--- linux-2.6.32.48/arch/x86/kernel/irq.c 2011-11-08 19:02:43.000000000 -0500
15967+++ linux-2.6.32.48/arch/x86/kernel/irq.c 2011-11-15 19:59:43.000000000 -0500
15968@@ -15,7 +15,7 @@
15969 #include <asm/mce.h>
15970 #include <asm/hw_irq.h>
15971
15972-atomic_t irq_err_count;
15973+atomic_unchecked_t irq_err_count;
15974
15975 /* Function pointer for generic interrupt vector handling */
15976 void (*generic_interrupt_extension)(void) = NULL;
15977@@ -114,9 +114,9 @@ static int show_other_interrupts(struct
15978 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
15979 seq_printf(p, " Machine check polls\n");
15980 #endif
15981- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
15982+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
15983 #if defined(CONFIG_X86_IO_APIC)
15984- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
15985+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
15986 #endif
15987 return 0;
15988 }
15989@@ -209,10 +209,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
15990
15991 u64 arch_irq_stat(void)
15992 {
15993- u64 sum = atomic_read(&irq_err_count);
15994+ u64 sum = atomic_read_unchecked(&irq_err_count);
15995
15996 #ifdef CONFIG_X86_IO_APIC
15997- sum += atomic_read(&irq_mis_count);
15998+ sum += atomic_read_unchecked(&irq_mis_count);
15999 #endif
16000 return sum;
16001 }
16002diff -urNp linux-2.6.32.48/arch/x86/kernel/kgdb.c linux-2.6.32.48/arch/x86/kernel/kgdb.c
16003--- linux-2.6.32.48/arch/x86/kernel/kgdb.c 2011-11-08 19:02:43.000000000 -0500
16004+++ linux-2.6.32.48/arch/x86/kernel/kgdb.c 2011-11-15 19:59:43.000000000 -0500
16005@@ -390,13 +390,13 @@ int kgdb_arch_handle_exception(int e_vec
16006
16007 /* clear the trace bit */
16008 linux_regs->flags &= ~X86_EFLAGS_TF;
16009- atomic_set(&kgdb_cpu_doing_single_step, -1);
16010+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
16011
16012 /* set the trace bit if we're stepping */
16013 if (remcomInBuffer[0] == 's') {
16014 linux_regs->flags |= X86_EFLAGS_TF;
16015 kgdb_single_step = 1;
16016- atomic_set(&kgdb_cpu_doing_single_step,
16017+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
16018 raw_smp_processor_id());
16019 }
16020
16021@@ -476,7 +476,7 @@ static int __kgdb_notify(struct die_args
16022 break;
16023
16024 case DIE_DEBUG:
16025- if (atomic_read(&kgdb_cpu_doing_single_step) ==
16026+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) ==
16027 raw_smp_processor_id()) {
16028 if (user_mode(regs))
16029 return single_step_cont(regs, args);
16030@@ -573,7 +573,7 @@ unsigned long kgdb_arch_pc(int exception
16031 return instruction_pointer(regs);
16032 }
16033
16034-struct kgdb_arch arch_kgdb_ops = {
16035+const struct kgdb_arch arch_kgdb_ops = {
16036 /* Breakpoint instruction: */
16037 .gdb_bpt_instr = { 0xcc },
16038 .flags = KGDB_HW_BREAKPOINT,
16039diff -urNp linux-2.6.32.48/arch/x86/kernel/kprobes.c linux-2.6.32.48/arch/x86/kernel/kprobes.c
16040--- linux-2.6.32.48/arch/x86/kernel/kprobes.c 2011-11-08 19:02:43.000000000 -0500
16041+++ linux-2.6.32.48/arch/x86/kernel/kprobes.c 2011-11-15 19:59:43.000000000 -0500
16042@@ -168,9 +168,13 @@ static void __kprobes set_jmp_op(void *f
16043 char op;
16044 s32 raddr;
16045 } __attribute__((packed)) * jop;
16046- jop = (struct __arch_jmp_op *)from;
16047+
16048+ jop = (struct __arch_jmp_op *)(ktla_ktva(from));
16049+
16050+ pax_open_kernel();
16051 jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
16052 jop->op = RELATIVEJUMP_INSTRUCTION;
16053+ pax_close_kernel();
16054 }
16055
16056 /*
16057@@ -195,7 +199,7 @@ static int __kprobes can_boost(kprobe_op
16058 kprobe_opcode_t opcode;
16059 kprobe_opcode_t *orig_opcodes = opcodes;
16060
16061- if (search_exception_tables((unsigned long)opcodes))
16062+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
16063 return 0; /* Page fault may occur on this address. */
16064
16065 retry:
16066@@ -339,7 +343,9 @@ static void __kprobes fix_riprel(struct
16067 disp = (u8 *) p->addr + *((s32 *) insn) -
16068 (u8 *) p->ainsn.insn;
16069 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
16070+ pax_open_kernel();
16071 *(s32 *)insn = (s32) disp;
16072+ pax_close_kernel();
16073 }
16074 }
16075 #endif
16076@@ -347,16 +353,18 @@ static void __kprobes fix_riprel(struct
16077
16078 static void __kprobes arch_copy_kprobe(struct kprobe *p)
16079 {
16080- memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
16081+ pax_open_kernel();
16082+ memcpy(p->ainsn.insn, ktla_ktva(p->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
16083+ pax_close_kernel();
16084
16085 fix_riprel(p);
16086
16087- if (can_boost(p->addr))
16088+ if (can_boost(ktla_ktva(p->addr)))
16089 p->ainsn.boostable = 0;
16090 else
16091 p->ainsn.boostable = -1;
16092
16093- p->opcode = *p->addr;
16094+ p->opcode = *(ktla_ktva(p->addr));
16095 }
16096
16097 int __kprobes arch_prepare_kprobe(struct kprobe *p)
16098@@ -434,7 +442,7 @@ static void __kprobes prepare_singlestep
16099 if (p->opcode == BREAKPOINT_INSTRUCTION)
16100 regs->ip = (unsigned long)p->addr;
16101 else
16102- regs->ip = (unsigned long)p->ainsn.insn;
16103+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
16104 }
16105
16106 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
16107@@ -455,7 +463,7 @@ static void __kprobes setup_singlestep(s
16108 if (p->ainsn.boostable == 1 && !p->post_handler) {
16109 /* Boost up -- we can execute copied instructions directly */
16110 reset_current_kprobe();
16111- regs->ip = (unsigned long)p->ainsn.insn;
16112+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
16113 preempt_enable_no_resched();
16114 return;
16115 }
16116@@ -525,7 +533,7 @@ static int __kprobes kprobe_handler(stru
16117 struct kprobe_ctlblk *kcb;
16118
16119 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
16120- if (*addr != BREAKPOINT_INSTRUCTION) {
16121+ if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
16122 /*
16123 * The breakpoint instruction was removed right
16124 * after we hit it. Another cpu has removed
16125@@ -777,7 +785,7 @@ static void __kprobes resume_execution(s
16126 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
16127 {
16128 unsigned long *tos = stack_addr(regs);
16129- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
16130+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
16131 unsigned long orig_ip = (unsigned long)p->addr;
16132 kprobe_opcode_t *insn = p->ainsn.insn;
16133
16134@@ -960,7 +968,7 @@ int __kprobes kprobe_exceptions_notify(s
16135 struct die_args *args = data;
16136 int ret = NOTIFY_DONE;
16137
16138- if (args->regs && user_mode_vm(args->regs))
16139+ if (args->regs && user_mode(args->regs))
16140 return ret;
16141
16142 switch (val) {
16143diff -urNp linux-2.6.32.48/arch/x86/kernel/kvm.c linux-2.6.32.48/arch/x86/kernel/kvm.c
16144--- linux-2.6.32.48/arch/x86/kernel/kvm.c 2011-11-08 19:02:43.000000000 -0500
16145+++ linux-2.6.32.48/arch/x86/kernel/kvm.c 2011-11-15 19:59:43.000000000 -0500
16146@@ -216,6 +216,7 @@ static void __init paravirt_ops_setup(vo
16147 pv_mmu_ops.set_pud = kvm_set_pud;
16148 #if PAGETABLE_LEVELS == 4
16149 pv_mmu_ops.set_pgd = kvm_set_pgd;
16150+ pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
16151 #endif
16152 #endif
16153 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
16154diff -urNp linux-2.6.32.48/arch/x86/kernel/ldt.c linux-2.6.32.48/arch/x86/kernel/ldt.c
16155--- linux-2.6.32.48/arch/x86/kernel/ldt.c 2011-11-08 19:02:43.000000000 -0500
16156+++ linux-2.6.32.48/arch/x86/kernel/ldt.c 2011-11-15 19:59:43.000000000 -0500
16157@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, i
16158 if (reload) {
16159 #ifdef CONFIG_SMP
16160 preempt_disable();
16161- load_LDT(pc);
16162+ load_LDT_nolock(pc);
16163 if (!cpumask_equal(mm_cpumask(current->mm),
16164 cpumask_of(smp_processor_id())))
16165 smp_call_function(flush_ldt, current->mm, 1);
16166 preempt_enable();
16167 #else
16168- load_LDT(pc);
16169+ load_LDT_nolock(pc);
16170 #endif
16171 }
16172 if (oldsize) {
16173@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t
16174 return err;
16175
16176 for (i = 0; i < old->size; i++)
16177- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
16178+ write_ldt_entry(new->ldt, i, old->ldt + i);
16179 return 0;
16180 }
16181
16182@@ -115,6 +115,24 @@ int init_new_context(struct task_struct
16183 retval = copy_ldt(&mm->context, &old_mm->context);
16184 mutex_unlock(&old_mm->context.lock);
16185 }
16186+
16187+ if (tsk == current) {
16188+ mm->context.vdso = 0;
16189+
16190+#ifdef CONFIG_X86_32
16191+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
16192+ mm->context.user_cs_base = 0UL;
16193+ mm->context.user_cs_limit = ~0UL;
16194+
16195+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
16196+ cpus_clear(mm->context.cpu_user_cs_mask);
16197+#endif
16198+
16199+#endif
16200+#endif
16201+
16202+ }
16203+
16204 return retval;
16205 }
16206
16207@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, u
16208 }
16209 }
16210
16211+#ifdef CONFIG_PAX_SEGMEXEC
16212+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
16213+ error = -EINVAL;
16214+ goto out_unlock;
16215+ }
16216+#endif
16217+
16218 fill_ldt(&ldt, &ldt_info);
16219 if (oldmode)
16220 ldt.avl = 0;
16221diff -urNp linux-2.6.32.48/arch/x86/kernel/machine_kexec_32.c linux-2.6.32.48/arch/x86/kernel/machine_kexec_32.c
16222--- linux-2.6.32.48/arch/x86/kernel/machine_kexec_32.c 2011-11-08 19:02:43.000000000 -0500
16223+++ linux-2.6.32.48/arch/x86/kernel/machine_kexec_32.c 2011-11-15 19:59:43.000000000 -0500
16224@@ -26,7 +26,7 @@
16225 #include <asm/system.h>
16226 #include <asm/cacheflush.h>
16227
16228-static void set_idt(void *newidt, __u16 limit)
16229+static void set_idt(struct desc_struct *newidt, __u16 limit)
16230 {
16231 struct desc_ptr curidt;
16232
16233@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16
16234 }
16235
16236
16237-static void set_gdt(void *newgdt, __u16 limit)
16238+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
16239 {
16240 struct desc_ptr curgdt;
16241
16242@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
16243 }
16244
16245 control_page = page_address(image->control_code_page);
16246- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
16247+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
16248
16249 relocate_kernel_ptr = control_page;
16250 page_list[PA_CONTROL_PAGE] = __pa(control_page);
16251diff -urNp linux-2.6.32.48/arch/x86/kernel/microcode_amd.c linux-2.6.32.48/arch/x86/kernel/microcode_amd.c
16252--- linux-2.6.32.48/arch/x86/kernel/microcode_amd.c 2011-11-08 19:02:43.000000000 -0500
16253+++ linux-2.6.32.48/arch/x86/kernel/microcode_amd.c 2011-11-15 19:59:43.000000000 -0500
16254@@ -364,7 +364,7 @@ static void microcode_fini_cpu_amd(int c
16255 uci->mc = NULL;
16256 }
16257
16258-static struct microcode_ops microcode_amd_ops = {
16259+static const struct microcode_ops microcode_amd_ops = {
16260 .request_microcode_user = request_microcode_user,
16261 .request_microcode_fw = request_microcode_fw,
16262 .collect_cpu_info = collect_cpu_info_amd,
16263@@ -372,7 +372,7 @@ static struct microcode_ops microcode_am
16264 .microcode_fini_cpu = microcode_fini_cpu_amd,
16265 };
16266
16267-struct microcode_ops * __init init_amd_microcode(void)
16268+const struct microcode_ops * __init init_amd_microcode(void)
16269 {
16270 return &microcode_amd_ops;
16271 }
16272diff -urNp linux-2.6.32.48/arch/x86/kernel/microcode_core.c linux-2.6.32.48/arch/x86/kernel/microcode_core.c
16273--- linux-2.6.32.48/arch/x86/kernel/microcode_core.c 2011-11-08 19:02:43.000000000 -0500
16274+++ linux-2.6.32.48/arch/x86/kernel/microcode_core.c 2011-11-15 19:59:43.000000000 -0500
16275@@ -90,7 +90,7 @@ MODULE_LICENSE("GPL");
16276
16277 #define MICROCODE_VERSION "2.00"
16278
16279-static struct microcode_ops *microcode_ops;
16280+static const struct microcode_ops *microcode_ops;
16281
16282 /*
16283 * Synchronization.
16284diff -urNp linux-2.6.32.48/arch/x86/kernel/microcode_intel.c linux-2.6.32.48/arch/x86/kernel/microcode_intel.c
16285--- linux-2.6.32.48/arch/x86/kernel/microcode_intel.c 2011-11-08 19:02:43.000000000 -0500
16286+++ linux-2.6.32.48/arch/x86/kernel/microcode_intel.c 2011-11-15 19:59:43.000000000 -0500
16287@@ -443,13 +443,13 @@ static enum ucode_state request_microcod
16288
16289 static int get_ucode_user(void *to, const void *from, size_t n)
16290 {
16291- return copy_from_user(to, from, n);
16292+ return copy_from_user(to, (const void __force_user *)from, n);
16293 }
16294
16295 static enum ucode_state
16296 request_microcode_user(int cpu, const void __user *buf, size_t size)
16297 {
16298- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
16299+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
16300 }
16301
16302 static void microcode_fini_cpu(int cpu)
16303@@ -460,7 +460,7 @@ static void microcode_fini_cpu(int cpu)
16304 uci->mc = NULL;
16305 }
16306
16307-static struct microcode_ops microcode_intel_ops = {
16308+static const struct microcode_ops microcode_intel_ops = {
16309 .request_microcode_user = request_microcode_user,
16310 .request_microcode_fw = request_microcode_fw,
16311 .collect_cpu_info = collect_cpu_info,
16312@@ -468,7 +468,7 @@ static struct microcode_ops microcode_in
16313 .microcode_fini_cpu = microcode_fini_cpu,
16314 };
16315
16316-struct microcode_ops * __init init_intel_microcode(void)
16317+const struct microcode_ops * __init init_intel_microcode(void)
16318 {
16319 return &microcode_intel_ops;
16320 }
16321diff -urNp linux-2.6.32.48/arch/x86/kernel/module.c linux-2.6.32.48/arch/x86/kernel/module.c
16322--- linux-2.6.32.48/arch/x86/kernel/module.c 2011-11-08 19:02:43.000000000 -0500
16323+++ linux-2.6.32.48/arch/x86/kernel/module.c 2011-11-15 19:59:43.000000000 -0500
16324@@ -34,7 +34,7 @@
16325 #define DEBUGP(fmt...)
16326 #endif
16327
16328-void *module_alloc(unsigned long size)
16329+static void *__module_alloc(unsigned long size, pgprot_t prot)
16330 {
16331 struct vm_struct *area;
16332
16333@@ -48,8 +48,18 @@ void *module_alloc(unsigned long size)
16334 if (!area)
16335 return NULL;
16336
16337- return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM,
16338- PAGE_KERNEL_EXEC);
16339+ return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot);
16340+}
16341+
16342+void *module_alloc(unsigned long size)
16343+{
16344+
16345+#ifdef CONFIG_PAX_KERNEXEC
16346+ return __module_alloc(size, PAGE_KERNEL);
16347+#else
16348+ return __module_alloc(size, PAGE_KERNEL_EXEC);
16349+#endif
16350+
16351 }
16352
16353 /* Free memory returned from module_alloc */
16354@@ -58,6 +68,40 @@ void module_free(struct module *mod, voi
16355 vfree(module_region);
16356 }
16357
16358+#ifdef CONFIG_PAX_KERNEXEC
16359+#ifdef CONFIG_X86_32
16360+void *module_alloc_exec(unsigned long size)
16361+{
16362+ struct vm_struct *area;
16363+
16364+ if (size == 0)
16365+ return NULL;
16366+
16367+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
16368+ return area ? area->addr : NULL;
16369+}
16370+EXPORT_SYMBOL(module_alloc_exec);
16371+
16372+void module_free_exec(struct module *mod, void *module_region)
16373+{
16374+ vunmap(module_region);
16375+}
16376+EXPORT_SYMBOL(module_free_exec);
16377+#else
16378+void module_free_exec(struct module *mod, void *module_region)
16379+{
16380+ module_free(mod, module_region);
16381+}
16382+EXPORT_SYMBOL(module_free_exec);
16383+
16384+void *module_alloc_exec(unsigned long size)
16385+{
16386+ return __module_alloc(size, PAGE_KERNEL_RX);
16387+}
16388+EXPORT_SYMBOL(module_alloc_exec);
16389+#endif
16390+#endif
16391+
16392 /* We don't need anything special. */
16393 int module_frob_arch_sections(Elf_Ehdr *hdr,
16394 Elf_Shdr *sechdrs,
16395@@ -77,14 +121,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
16396 unsigned int i;
16397 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
16398 Elf32_Sym *sym;
16399- uint32_t *location;
16400+ uint32_t *plocation, location;
16401
16402 DEBUGP("Applying relocate section %u to %u\n", relsec,
16403 sechdrs[relsec].sh_info);
16404 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
16405 /* This is where to make the change */
16406- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
16407- + rel[i].r_offset;
16408+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
16409+ location = (uint32_t)plocation;
16410+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
16411+ plocation = ktla_ktva((void *)plocation);
16412 /* This is the symbol it is referring to. Note that all
16413 undefined symbols have been resolved. */
16414 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
16415@@ -93,11 +139,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
16416 switch (ELF32_R_TYPE(rel[i].r_info)) {
16417 case R_386_32:
16418 /* We add the value into the location given */
16419- *location += sym->st_value;
16420+ pax_open_kernel();
16421+ *plocation += sym->st_value;
16422+ pax_close_kernel();
16423 break;
16424 case R_386_PC32:
16425 /* Add the value, subtract its postition */
16426- *location += sym->st_value - (uint32_t)location;
16427+ pax_open_kernel();
16428+ *plocation += sym->st_value - location;
16429+ pax_close_kernel();
16430 break;
16431 default:
16432 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
16433@@ -153,21 +203,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
16434 case R_X86_64_NONE:
16435 break;
16436 case R_X86_64_64:
16437+ pax_open_kernel();
16438 *(u64 *)loc = val;
16439+ pax_close_kernel();
16440 break;
16441 case R_X86_64_32:
16442+ pax_open_kernel();
16443 *(u32 *)loc = val;
16444+ pax_close_kernel();
16445 if (val != *(u32 *)loc)
16446 goto overflow;
16447 break;
16448 case R_X86_64_32S:
16449+ pax_open_kernel();
16450 *(s32 *)loc = val;
16451+ pax_close_kernel();
16452 if ((s64)val != *(s32 *)loc)
16453 goto overflow;
16454 break;
16455 case R_X86_64_PC32:
16456 val -= (u64)loc;
16457+ pax_open_kernel();
16458 *(u32 *)loc = val;
16459+ pax_close_kernel();
16460+
16461 #if 0
16462 if ((s64)val != *(s32 *)loc)
16463 goto overflow;
16464diff -urNp linux-2.6.32.48/arch/x86/kernel/paravirt.c linux-2.6.32.48/arch/x86/kernel/paravirt.c
16465--- linux-2.6.32.48/arch/x86/kernel/paravirt.c 2011-11-08 19:02:43.000000000 -0500
16466+++ linux-2.6.32.48/arch/x86/kernel/paravirt.c 2011-11-15 19:59:43.000000000 -0500
16467@@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
16468 {
16469 return x;
16470 }
16471+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
16472+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
16473+#endif
16474
16475 void __init default_banner(void)
16476 {
16477@@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbu
16478 * corresponding structure. */
16479 static void *get_call_destination(u8 type)
16480 {
16481- struct paravirt_patch_template tmpl = {
16482+ const struct paravirt_patch_template tmpl = {
16483 .pv_init_ops = pv_init_ops,
16484 .pv_time_ops = pv_time_ops,
16485 .pv_cpu_ops = pv_cpu_ops,
16486@@ -133,6 +136,8 @@ static void *get_call_destination(u8 typ
16487 .pv_lock_ops = pv_lock_ops,
16488 #endif
16489 };
16490+
16491+ pax_track_stack();
16492 return *((void **)&tmpl + type);
16493 }
16494
16495@@ -145,15 +150,19 @@ unsigned paravirt_patch_default(u8 type,
16496 if (opfunc == NULL)
16497 /* If there's no function, patch it with a ud2a (BUG) */
16498 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
16499- else if (opfunc == _paravirt_nop)
16500+ else if (opfunc == (void *)_paravirt_nop)
16501 /* If the operation is a nop, then nop the callsite */
16502 ret = paravirt_patch_nop();
16503
16504 /* identity functions just return their single argument */
16505- else if (opfunc == _paravirt_ident_32)
16506+ else if (opfunc == (void *)_paravirt_ident_32)
16507 ret = paravirt_patch_ident_32(insnbuf, len);
16508- else if (opfunc == _paravirt_ident_64)
16509+ else if (opfunc == (void *)_paravirt_ident_64)
16510+ ret = paravirt_patch_ident_64(insnbuf, len);
16511+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
16512+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
16513 ret = paravirt_patch_ident_64(insnbuf, len);
16514+#endif
16515
16516 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
16517 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
16518@@ -178,7 +187,7 @@ unsigned paravirt_patch_insns(void *insn
16519 if (insn_len > len || start == NULL)
16520 insn_len = len;
16521 else
16522- memcpy(insnbuf, start, insn_len);
16523+ memcpy(insnbuf, ktla_ktva(start), insn_len);
16524
16525 return insn_len;
16526 }
16527@@ -294,22 +303,22 @@ void arch_flush_lazy_mmu_mode(void)
16528 preempt_enable();
16529 }
16530
16531-struct pv_info pv_info = {
16532+struct pv_info pv_info __read_only = {
16533 .name = "bare hardware",
16534 .paravirt_enabled = 0,
16535 .kernel_rpl = 0,
16536 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
16537 };
16538
16539-struct pv_init_ops pv_init_ops = {
16540+struct pv_init_ops pv_init_ops __read_only = {
16541 .patch = native_patch,
16542 };
16543
16544-struct pv_time_ops pv_time_ops = {
16545+struct pv_time_ops pv_time_ops __read_only = {
16546 .sched_clock = native_sched_clock,
16547 };
16548
16549-struct pv_irq_ops pv_irq_ops = {
16550+struct pv_irq_ops pv_irq_ops __read_only = {
16551 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
16552 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
16553 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
16554@@ -321,7 +330,7 @@ struct pv_irq_ops pv_irq_ops = {
16555 #endif
16556 };
16557
16558-struct pv_cpu_ops pv_cpu_ops = {
16559+struct pv_cpu_ops pv_cpu_ops __read_only = {
16560 .cpuid = native_cpuid,
16561 .get_debugreg = native_get_debugreg,
16562 .set_debugreg = native_set_debugreg,
16563@@ -382,21 +391,26 @@ struct pv_cpu_ops pv_cpu_ops = {
16564 .end_context_switch = paravirt_nop,
16565 };
16566
16567-struct pv_apic_ops pv_apic_ops = {
16568+struct pv_apic_ops pv_apic_ops __read_only = {
16569 #ifdef CONFIG_X86_LOCAL_APIC
16570 .startup_ipi_hook = paravirt_nop,
16571 #endif
16572 };
16573
16574-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
16575+#ifdef CONFIG_X86_32
16576+#ifdef CONFIG_X86_PAE
16577+/* 64-bit pagetable entries */
16578+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
16579+#else
16580 /* 32-bit pagetable entries */
16581 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
16582+#endif
16583 #else
16584 /* 64-bit pagetable entries */
16585 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
16586 #endif
16587
16588-struct pv_mmu_ops pv_mmu_ops = {
16589+struct pv_mmu_ops pv_mmu_ops __read_only = {
16590
16591 .read_cr2 = native_read_cr2,
16592 .write_cr2 = native_write_cr2,
16593@@ -448,6 +462,7 @@ struct pv_mmu_ops pv_mmu_ops = {
16594 .make_pud = PTE_IDENT,
16595
16596 .set_pgd = native_set_pgd,
16597+ .set_pgd_batched = native_set_pgd_batched,
16598 #endif
16599 #endif /* PAGETABLE_LEVELS >= 3 */
16600
16601@@ -467,6 +482,12 @@ struct pv_mmu_ops pv_mmu_ops = {
16602 },
16603
16604 .set_fixmap = native_set_fixmap,
16605+
16606+#ifdef CONFIG_PAX_KERNEXEC
16607+ .pax_open_kernel = native_pax_open_kernel,
16608+ .pax_close_kernel = native_pax_close_kernel,
16609+#endif
16610+
16611 };
16612
16613 EXPORT_SYMBOL_GPL(pv_time_ops);
16614diff -urNp linux-2.6.32.48/arch/x86/kernel/paravirt-spinlocks.c linux-2.6.32.48/arch/x86/kernel/paravirt-spinlocks.c
16615--- linux-2.6.32.48/arch/x86/kernel/paravirt-spinlocks.c 2011-11-08 19:02:43.000000000 -0500
16616+++ linux-2.6.32.48/arch/x86/kernel/paravirt-spinlocks.c 2011-11-15 19:59:43.000000000 -0500
16617@@ -13,7 +13,7 @@ default_spin_lock_flags(raw_spinlock_t *
16618 __raw_spin_lock(lock);
16619 }
16620
16621-struct pv_lock_ops pv_lock_ops = {
16622+struct pv_lock_ops pv_lock_ops __read_only = {
16623 #ifdef CONFIG_SMP
16624 .spin_is_locked = __ticket_spin_is_locked,
16625 .spin_is_contended = __ticket_spin_is_contended,
16626diff -urNp linux-2.6.32.48/arch/x86/kernel/pci-calgary_64.c linux-2.6.32.48/arch/x86/kernel/pci-calgary_64.c
16627--- linux-2.6.32.48/arch/x86/kernel/pci-calgary_64.c 2011-11-08 19:02:43.000000000 -0500
16628+++ linux-2.6.32.48/arch/x86/kernel/pci-calgary_64.c 2011-11-15 19:59:43.000000000 -0500
16629@@ -477,7 +477,7 @@ static void calgary_free_coherent(struct
16630 free_pages((unsigned long)vaddr, get_order(size));
16631 }
16632
16633-static struct dma_map_ops calgary_dma_ops = {
16634+static const struct dma_map_ops calgary_dma_ops = {
16635 .alloc_coherent = calgary_alloc_coherent,
16636 .free_coherent = calgary_free_coherent,
16637 .map_sg = calgary_map_sg,
16638diff -urNp linux-2.6.32.48/arch/x86/kernel/pci-dma.c linux-2.6.32.48/arch/x86/kernel/pci-dma.c
16639--- linux-2.6.32.48/arch/x86/kernel/pci-dma.c 2011-11-08 19:02:43.000000000 -0500
16640+++ linux-2.6.32.48/arch/x86/kernel/pci-dma.c 2011-11-15 19:59:43.000000000 -0500
16641@@ -14,7 +14,7 @@
16642
16643 static int forbid_dac __read_mostly;
16644
16645-struct dma_map_ops *dma_ops;
16646+const struct dma_map_ops *dma_ops;
16647 EXPORT_SYMBOL(dma_ops);
16648
16649 static int iommu_sac_force __read_mostly;
16650@@ -243,7 +243,7 @@ early_param("iommu", iommu_setup);
16651
16652 int dma_supported(struct device *dev, u64 mask)
16653 {
16654- struct dma_map_ops *ops = get_dma_ops(dev);
16655+ const struct dma_map_ops *ops = get_dma_ops(dev);
16656
16657 #ifdef CONFIG_PCI
16658 if (mask > 0xffffffff && forbid_dac > 0) {
16659diff -urNp linux-2.6.32.48/arch/x86/kernel/pci-gart_64.c linux-2.6.32.48/arch/x86/kernel/pci-gart_64.c
16660--- linux-2.6.32.48/arch/x86/kernel/pci-gart_64.c 2011-11-08 19:02:43.000000000 -0500
16661+++ linux-2.6.32.48/arch/x86/kernel/pci-gart_64.c 2011-11-15 19:59:43.000000000 -0500
16662@@ -682,7 +682,7 @@ static __init int init_k8_gatt(struct ag
16663 return -1;
16664 }
16665
16666-static struct dma_map_ops gart_dma_ops = {
16667+static const struct dma_map_ops gart_dma_ops = {
16668 .map_sg = gart_map_sg,
16669 .unmap_sg = gart_unmap_sg,
16670 .map_page = gart_map_page,
16671diff -urNp linux-2.6.32.48/arch/x86/kernel/pci-nommu.c linux-2.6.32.48/arch/x86/kernel/pci-nommu.c
16672--- linux-2.6.32.48/arch/x86/kernel/pci-nommu.c 2011-11-08 19:02:43.000000000 -0500
16673+++ linux-2.6.32.48/arch/x86/kernel/pci-nommu.c 2011-11-15 19:59:43.000000000 -0500
16674@@ -94,7 +94,7 @@ static void nommu_sync_sg_for_device(str
16675 flush_write_buffers();
16676 }
16677
16678-struct dma_map_ops nommu_dma_ops = {
16679+const struct dma_map_ops nommu_dma_ops = {
16680 .alloc_coherent = dma_generic_alloc_coherent,
16681 .free_coherent = nommu_free_coherent,
16682 .map_sg = nommu_map_sg,
16683diff -urNp linux-2.6.32.48/arch/x86/kernel/pci-swiotlb.c linux-2.6.32.48/arch/x86/kernel/pci-swiotlb.c
16684--- linux-2.6.32.48/arch/x86/kernel/pci-swiotlb.c 2011-11-08 19:02:43.000000000 -0500
16685+++ linux-2.6.32.48/arch/x86/kernel/pci-swiotlb.c 2011-11-15 19:59:43.000000000 -0500
16686@@ -25,7 +25,7 @@ static void *x86_swiotlb_alloc_coherent(
16687 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
16688 }
16689
16690-static struct dma_map_ops swiotlb_dma_ops = {
16691+static const struct dma_map_ops swiotlb_dma_ops = {
16692 .mapping_error = swiotlb_dma_mapping_error,
16693 .alloc_coherent = x86_swiotlb_alloc_coherent,
16694 .free_coherent = swiotlb_free_coherent,
16695diff -urNp linux-2.6.32.48/arch/x86/kernel/process_32.c linux-2.6.32.48/arch/x86/kernel/process_32.c
16696--- linux-2.6.32.48/arch/x86/kernel/process_32.c 2011-11-08 19:02:43.000000000 -0500
16697+++ linux-2.6.32.48/arch/x86/kernel/process_32.c 2011-11-15 19:59:43.000000000 -0500
16698@@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __as
16699 unsigned long thread_saved_pc(struct task_struct *tsk)
16700 {
16701 return ((unsigned long *)tsk->thread.sp)[3];
16702+//XXX return tsk->thread.eip;
16703 }
16704
16705 #ifndef CONFIG_SMP
16706@@ -129,15 +130,14 @@ void __show_regs(struct pt_regs *regs, i
16707 unsigned short ss, gs;
16708 const char *board;
16709
16710- if (user_mode_vm(regs)) {
16711+ if (user_mode(regs)) {
16712 sp = regs->sp;
16713 ss = regs->ss & 0xffff;
16714- gs = get_user_gs(regs);
16715 } else {
16716 sp = (unsigned long) (&regs->sp);
16717 savesegment(ss, ss);
16718- savesegment(gs, gs);
16719 }
16720+ gs = get_user_gs(regs);
16721
16722 printk("\n");
16723
16724@@ -210,10 +210,10 @@ int kernel_thread(int (*fn)(void *), voi
16725 regs.bx = (unsigned long) fn;
16726 regs.dx = (unsigned long) arg;
16727
16728- regs.ds = __USER_DS;
16729- regs.es = __USER_DS;
16730+ regs.ds = __KERNEL_DS;
16731+ regs.es = __KERNEL_DS;
16732 regs.fs = __KERNEL_PERCPU;
16733- regs.gs = __KERNEL_STACK_CANARY;
16734+ savesegment(gs, regs.gs);
16735 regs.orig_ax = -1;
16736 regs.ip = (unsigned long) kernel_thread_helper;
16737 regs.cs = __KERNEL_CS | get_kernel_rpl();
16738@@ -247,13 +247,14 @@ int copy_thread(unsigned long clone_flag
16739 struct task_struct *tsk;
16740 int err;
16741
16742- childregs = task_pt_regs(p);
16743+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
16744 *childregs = *regs;
16745 childregs->ax = 0;
16746 childregs->sp = sp;
16747
16748 p->thread.sp = (unsigned long) childregs;
16749 p->thread.sp0 = (unsigned long) (childregs+1);
16750+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16751
16752 p->thread.ip = (unsigned long) ret_from_fork;
16753
16754@@ -345,7 +346,7 @@ __switch_to(struct task_struct *prev_p,
16755 struct thread_struct *prev = &prev_p->thread,
16756 *next = &next_p->thread;
16757 int cpu = smp_processor_id();
16758- struct tss_struct *tss = &per_cpu(init_tss, cpu);
16759+ struct tss_struct *tss = init_tss + cpu;
16760 bool preload_fpu;
16761
16762 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
16763@@ -380,6 +381,10 @@ __switch_to(struct task_struct *prev_p,
16764 */
16765 lazy_save_gs(prev->gs);
16766
16767+#ifdef CONFIG_PAX_MEMORY_UDEREF
16768+ __set_fs(task_thread_info(next_p)->addr_limit);
16769+#endif
16770+
16771 /*
16772 * Load the per-thread Thread-Local Storage descriptor.
16773 */
16774@@ -415,6 +420,9 @@ __switch_to(struct task_struct *prev_p,
16775 */
16776 arch_end_context_switch(next_p);
16777
16778+ percpu_write(current_task, next_p);
16779+ percpu_write(current_tinfo, &next_p->tinfo);
16780+
16781 if (preload_fpu)
16782 __math_state_restore();
16783
16784@@ -424,8 +432,6 @@ __switch_to(struct task_struct *prev_p,
16785 if (prev->gs | next->gs)
16786 lazy_load_gs(next->gs);
16787
16788- percpu_write(current_task, next_p);
16789-
16790 return prev_p;
16791 }
16792
16793@@ -495,4 +501,3 @@ unsigned long get_wchan(struct task_stru
16794 } while (count++ < 16);
16795 return 0;
16796 }
16797-
16798diff -urNp linux-2.6.32.48/arch/x86/kernel/process_64.c linux-2.6.32.48/arch/x86/kernel/process_64.c
16799--- linux-2.6.32.48/arch/x86/kernel/process_64.c 2011-11-08 19:02:43.000000000 -0500
16800+++ linux-2.6.32.48/arch/x86/kernel/process_64.c 2011-11-15 19:59:43.000000000 -0500
16801@@ -91,7 +91,7 @@ static void __exit_idle(void)
16802 void exit_idle(void)
16803 {
16804 /* idle loop has pid 0 */
16805- if (current->pid)
16806+ if (task_pid_nr(current))
16807 return;
16808 __exit_idle();
16809 }
16810@@ -170,7 +170,7 @@ void __show_regs(struct pt_regs *regs, i
16811 if (!board)
16812 board = "";
16813 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n",
16814- current->pid, current->comm, print_tainted(),
16815+ task_pid_nr(current), current->comm, print_tainted(),
16816 init_utsname()->release,
16817 (int)strcspn(init_utsname()->version, " "),
16818 init_utsname()->version, board);
16819@@ -280,8 +280,7 @@ int copy_thread(unsigned long clone_flag
16820 struct pt_regs *childregs;
16821 struct task_struct *me = current;
16822
16823- childregs = ((struct pt_regs *)
16824- (THREAD_SIZE + task_stack_page(p))) - 1;
16825+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
16826 *childregs = *regs;
16827
16828 childregs->ax = 0;
16829@@ -292,6 +291,7 @@ int copy_thread(unsigned long clone_flag
16830 p->thread.sp = (unsigned long) childregs;
16831 p->thread.sp0 = (unsigned long) (childregs+1);
16832 p->thread.usersp = me->thread.usersp;
16833+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16834
16835 set_tsk_thread_flag(p, TIF_FORK);
16836
16837@@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p,
16838 struct thread_struct *prev = &prev_p->thread;
16839 struct thread_struct *next = &next_p->thread;
16840 int cpu = smp_processor_id();
16841- struct tss_struct *tss = &per_cpu(init_tss, cpu);
16842+ struct tss_struct *tss = init_tss + cpu;
16843 unsigned fsindex, gsindex;
16844 bool preload_fpu;
16845
16846@@ -475,10 +475,9 @@ __switch_to(struct task_struct *prev_p,
16847 prev->usersp = percpu_read(old_rsp);
16848 percpu_write(old_rsp, next->usersp);
16849 percpu_write(current_task, next_p);
16850+ percpu_write(current_tinfo, &next_p->tinfo);
16851
16852- percpu_write(kernel_stack,
16853- (unsigned long)task_stack_page(next_p) +
16854- THREAD_SIZE - KERNEL_STACK_OFFSET);
16855+ percpu_write(kernel_stack, next->sp0);
16856
16857 /*
16858 * Now maybe reload the debug registers and handle I/O bitmaps
16859@@ -559,12 +558,11 @@ unsigned long get_wchan(struct task_stru
16860 if (!p || p == current || p->state == TASK_RUNNING)
16861 return 0;
16862 stack = (unsigned long)task_stack_page(p);
16863- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
16864+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
16865 return 0;
16866 fp = *(u64 *)(p->thread.sp);
16867 do {
16868- if (fp < (unsigned long)stack ||
16869- fp >= (unsigned long)stack+THREAD_SIZE)
16870+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
16871 return 0;
16872 ip = *(u64 *)(fp+8);
16873 if (!in_sched_functions(ip))
16874diff -urNp linux-2.6.32.48/arch/x86/kernel/process.c linux-2.6.32.48/arch/x86/kernel/process.c
16875--- linux-2.6.32.48/arch/x86/kernel/process.c 2011-11-08 19:02:43.000000000 -0500
16876+++ linux-2.6.32.48/arch/x86/kernel/process.c 2011-11-15 19:59:43.000000000 -0500
16877@@ -51,16 +51,33 @@ void free_thread_xstate(struct task_stru
16878
16879 void free_thread_info(struct thread_info *ti)
16880 {
16881- free_thread_xstate(ti->task);
16882 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
16883 }
16884
16885+static struct kmem_cache *task_struct_cachep;
16886+
16887 void arch_task_cache_init(void)
16888 {
16889- task_xstate_cachep =
16890- kmem_cache_create("task_xstate", xstate_size,
16891+ /* create a slab on which task_structs can be allocated */
16892+ task_struct_cachep =
16893+ kmem_cache_create("task_struct", sizeof(struct task_struct),
16894+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
16895+
16896+ task_xstate_cachep =
16897+ kmem_cache_create("task_xstate", xstate_size,
16898 __alignof__(union thread_xstate),
16899- SLAB_PANIC | SLAB_NOTRACK, NULL);
16900+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
16901+}
16902+
16903+struct task_struct *alloc_task_struct(void)
16904+{
16905+ return kmem_cache_alloc(task_struct_cachep, GFP_KERNEL);
16906+}
16907+
16908+void free_task_struct(struct task_struct *task)
16909+{
16910+ free_thread_xstate(task);
16911+ kmem_cache_free(task_struct_cachep, task);
16912 }
16913
16914 /*
16915@@ -73,7 +90,7 @@ void exit_thread(void)
16916 unsigned long *bp = t->io_bitmap_ptr;
16917
16918 if (bp) {
16919- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
16920+ struct tss_struct *tss = init_tss + get_cpu();
16921
16922 t->io_bitmap_ptr = NULL;
16923 clear_thread_flag(TIF_IO_BITMAP);
16924@@ -93,6 +110,9 @@ void flush_thread(void)
16925
16926 clear_tsk_thread_flag(tsk, TIF_DEBUG);
16927
16928+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
16929+ loadsegment(gs, 0);
16930+#endif
16931 tsk->thread.debugreg0 = 0;
16932 tsk->thread.debugreg1 = 0;
16933 tsk->thread.debugreg2 = 0;
16934@@ -307,7 +327,7 @@ void default_idle(void)
16935 EXPORT_SYMBOL(default_idle);
16936 #endif
16937
16938-void stop_this_cpu(void *dummy)
16939+__noreturn void stop_this_cpu(void *dummy)
16940 {
16941 local_irq_disable();
16942 /*
16943@@ -568,16 +588,38 @@ static int __init idle_setup(char *str)
16944 }
16945 early_param("idle", idle_setup);
16946
16947-unsigned long arch_align_stack(unsigned long sp)
16948+#ifdef CONFIG_PAX_RANDKSTACK
16949+void pax_randomize_kstack(struct pt_regs *regs)
16950 {
16951- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
16952- sp -= get_random_int() % 8192;
16953- return sp & ~0xf;
16954-}
16955+ struct thread_struct *thread = &current->thread;
16956+ unsigned long time;
16957
16958-unsigned long arch_randomize_brk(struct mm_struct *mm)
16959-{
16960- unsigned long range_end = mm->brk + 0x02000000;
16961- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
16962+ if (!randomize_va_space)
16963+ return;
16964+
16965+ if (v8086_mode(regs))
16966+ return;
16967+
16968+ rdtscl(time);
16969+
16970+ /* P4 seems to return a 0 LSB, ignore it */
16971+#ifdef CONFIG_MPENTIUM4
16972+ time &= 0x3EUL;
16973+ time <<= 2;
16974+#elif defined(CONFIG_X86_64)
16975+ time &= 0xFUL;
16976+ time <<= 4;
16977+#else
16978+ time &= 0x1FUL;
16979+ time <<= 3;
16980+#endif
16981+
16982+ thread->sp0 ^= time;
16983+ load_sp0(init_tss + smp_processor_id(), thread);
16984+
16985+#ifdef CONFIG_X86_64
16986+ percpu_write(kernel_stack, thread->sp0);
16987+#endif
16988 }
16989+#endif
16990
16991diff -urNp linux-2.6.32.48/arch/x86/kernel/ptrace.c linux-2.6.32.48/arch/x86/kernel/ptrace.c
16992--- linux-2.6.32.48/arch/x86/kernel/ptrace.c 2011-11-08 19:02:43.000000000 -0500
16993+++ linux-2.6.32.48/arch/x86/kernel/ptrace.c 2011-11-15 19:59:43.000000000 -0500
16994@@ -925,7 +925,7 @@ static const struct user_regset_view use
16995 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
16996 {
16997 int ret;
16998- unsigned long __user *datap = (unsigned long __user *)data;
16999+ unsigned long __user *datap = (__force unsigned long __user *)data;
17000
17001 switch (request) {
17002 /* read the word at location addr in the USER area. */
17003@@ -1012,14 +1012,14 @@ long arch_ptrace(struct task_struct *chi
17004 if (addr < 0)
17005 return -EIO;
17006 ret = do_get_thread_area(child, addr,
17007- (struct user_desc __user *) data);
17008+ (__force struct user_desc __user *) data);
17009 break;
17010
17011 case PTRACE_SET_THREAD_AREA:
17012 if (addr < 0)
17013 return -EIO;
17014 ret = do_set_thread_area(child, addr,
17015- (struct user_desc __user *) data, 0);
17016+ (__force struct user_desc __user *) data, 0);
17017 break;
17018 #endif
17019
17020@@ -1038,12 +1038,12 @@ long arch_ptrace(struct task_struct *chi
17021 #ifdef CONFIG_X86_PTRACE_BTS
17022 case PTRACE_BTS_CONFIG:
17023 ret = ptrace_bts_config
17024- (child, data, (struct ptrace_bts_config __user *)addr);
17025+ (child, data, (__force struct ptrace_bts_config __user *)addr);
17026 break;
17027
17028 case PTRACE_BTS_STATUS:
17029 ret = ptrace_bts_status
17030- (child, data, (struct ptrace_bts_config __user *)addr);
17031+ (child, data, (__force struct ptrace_bts_config __user *)addr);
17032 break;
17033
17034 case PTRACE_BTS_SIZE:
17035@@ -1052,7 +1052,7 @@ long arch_ptrace(struct task_struct *chi
17036
17037 case PTRACE_BTS_GET:
17038 ret = ptrace_bts_read_record
17039- (child, data, (struct bts_struct __user *) addr);
17040+ (child, data, (__force struct bts_struct __user *) addr);
17041 break;
17042
17043 case PTRACE_BTS_CLEAR:
17044@@ -1061,7 +1061,7 @@ long arch_ptrace(struct task_struct *chi
17045
17046 case PTRACE_BTS_DRAIN:
17047 ret = ptrace_bts_drain
17048- (child, data, (struct bts_struct __user *) addr);
17049+ (child, data, (__force struct bts_struct __user *) addr);
17050 break;
17051 #endif /* CONFIG_X86_PTRACE_BTS */
17052
17053@@ -1450,7 +1450,7 @@ void send_sigtrap(struct task_struct *ts
17054 info.si_code = si_code;
17055
17056 /* User-mode ip? */
17057- info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
17058+ info.si_addr = user_mode(regs) ? (__force void __user *) regs->ip : NULL;
17059
17060 /* Send us the fake SIGTRAP */
17061 force_sig_info(SIGTRAP, &info, tsk);
17062@@ -1469,7 +1469,7 @@ void send_sigtrap(struct task_struct *ts
17063 * We must return the syscall number to actually look up in the table.
17064 * This can be -1L to skip running any syscall at all.
17065 */
17066-asmregparm long syscall_trace_enter(struct pt_regs *regs)
17067+long syscall_trace_enter(struct pt_regs *regs)
17068 {
17069 long ret = 0;
17070
17071@@ -1514,7 +1514,7 @@ asmregparm long syscall_trace_enter(stru
17072 return ret ?: regs->orig_ax;
17073 }
17074
17075-asmregparm void syscall_trace_leave(struct pt_regs *regs)
17076+void syscall_trace_leave(struct pt_regs *regs)
17077 {
17078 if (unlikely(current->audit_context))
17079 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
17080diff -urNp linux-2.6.32.48/arch/x86/kernel/reboot.c linux-2.6.32.48/arch/x86/kernel/reboot.c
17081--- linux-2.6.32.48/arch/x86/kernel/reboot.c 2011-11-08 19:02:43.000000000 -0500
17082+++ linux-2.6.32.48/arch/x86/kernel/reboot.c 2011-11-15 19:59:43.000000000 -0500
17083@@ -33,7 +33,7 @@ void (*pm_power_off)(void);
17084 EXPORT_SYMBOL(pm_power_off);
17085
17086 static const struct desc_ptr no_idt = {};
17087-static int reboot_mode;
17088+static unsigned short reboot_mode;
17089 enum reboot_type reboot_type = BOOT_KBD;
17090 int reboot_force;
17091
17092@@ -292,12 +292,12 @@ core_initcall(reboot_init);
17093 controller to pulse the CPU reset line, which is more thorough, but
17094 doesn't work with at least one type of 486 motherboard. It is easy
17095 to stop this code working; hence the copious comments. */
17096-static const unsigned long long
17097-real_mode_gdt_entries [3] =
17098+static struct desc_struct
17099+real_mode_gdt_entries [3] __read_only =
17100 {
17101- 0x0000000000000000ULL, /* Null descriptor */
17102- 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
17103- 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
17104+ GDT_ENTRY_INIT(0, 0, 0), /* Null descriptor */
17105+ GDT_ENTRY_INIT(0x9b, 0, 0xffff), /* 16-bit real-mode 64k code at 0x00000000 */
17106+ GDT_ENTRY_INIT(0x93, 0x100, 0xffff) /* 16-bit real-mode 64k data at 0x00000100 */
17107 };
17108
17109 static const struct desc_ptr
17110@@ -346,7 +346,7 @@ static const unsigned char jump_to_bios
17111 * specified by the code and length parameters.
17112 * We assume that length will aways be less that 100!
17113 */
17114-void machine_real_restart(const unsigned char *code, int length)
17115+__noreturn void machine_real_restart(const unsigned char *code, unsigned int length)
17116 {
17117 local_irq_disable();
17118
17119@@ -366,8 +366,8 @@ void machine_real_restart(const unsigned
17120 /* Remap the kernel at virtual address zero, as well as offset zero
17121 from the kernel segment. This assumes the kernel segment starts at
17122 virtual address PAGE_OFFSET. */
17123- memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
17124- sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
17125+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
17126+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
17127
17128 /*
17129 * Use `swapper_pg_dir' as our page directory.
17130@@ -379,16 +379,15 @@ void machine_real_restart(const unsigned
17131 boot)". This seems like a fairly standard thing that gets set by
17132 REBOOT.COM programs, and the previous reset routine did this
17133 too. */
17134- *((unsigned short *)0x472) = reboot_mode;
17135+ *(unsigned short *)(__va(0x472)) = reboot_mode;
17136
17137 /* For the switch to real mode, copy some code to low memory. It has
17138 to be in the first 64k because it is running in 16-bit mode, and it
17139 has to have the same physical and virtual address, because it turns
17140 off paging. Copy it near the end of the first page, out of the way
17141 of BIOS variables. */
17142- memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100),
17143- real_mode_switch, sizeof (real_mode_switch));
17144- memcpy((void *)(0x1000 - 100), code, length);
17145+ memcpy(__va(0x1000 - sizeof (real_mode_switch) - 100), real_mode_switch, sizeof (real_mode_switch));
17146+ memcpy(__va(0x1000 - 100), code, length);
17147
17148 /* Set up the IDT for real mode. */
17149 load_idt(&real_mode_idt);
17150@@ -416,6 +415,7 @@ void machine_real_restart(const unsigned
17151 __asm__ __volatile__ ("ljmp $0x0008,%0"
17152 :
17153 : "i" ((void *)(0x1000 - sizeof (real_mode_switch) - 100)));
17154+ do { } while (1);
17155 }
17156 #ifdef CONFIG_APM_MODULE
17157 EXPORT_SYMBOL(machine_real_restart);
17158@@ -544,7 +544,7 @@ void __attribute__((weak)) mach_reboot_f
17159 {
17160 }
17161
17162-static void native_machine_emergency_restart(void)
17163+__noreturn static void native_machine_emergency_restart(void)
17164 {
17165 int i;
17166
17167@@ -659,13 +659,13 @@ void native_machine_shutdown(void)
17168 #endif
17169 }
17170
17171-static void __machine_emergency_restart(int emergency)
17172+static __noreturn void __machine_emergency_restart(int emergency)
17173 {
17174 reboot_emergency = emergency;
17175 machine_ops.emergency_restart();
17176 }
17177
17178-static void native_machine_restart(char *__unused)
17179+static __noreturn void native_machine_restart(char *__unused)
17180 {
17181 printk("machine restart\n");
17182
17183@@ -674,7 +674,7 @@ static void native_machine_restart(char
17184 __machine_emergency_restart(0);
17185 }
17186
17187-static void native_machine_halt(void)
17188+static __noreturn void native_machine_halt(void)
17189 {
17190 /* stop other cpus and apics */
17191 machine_shutdown();
17192@@ -685,7 +685,7 @@ static void native_machine_halt(void)
17193 stop_this_cpu(NULL);
17194 }
17195
17196-static void native_machine_power_off(void)
17197+__noreturn static void native_machine_power_off(void)
17198 {
17199 if (pm_power_off) {
17200 if (!reboot_force)
17201@@ -694,6 +694,7 @@ static void native_machine_power_off(voi
17202 }
17203 /* a fallback in case there is no PM info available */
17204 tboot_shutdown(TB_SHUTDOWN_HALT);
17205+ do { } while (1);
17206 }
17207
17208 struct machine_ops machine_ops = {
17209diff -urNp linux-2.6.32.48/arch/x86/kernel/setup.c linux-2.6.32.48/arch/x86/kernel/setup.c
17210--- linux-2.6.32.48/arch/x86/kernel/setup.c 2011-11-08 19:02:43.000000000 -0500
17211+++ linux-2.6.32.48/arch/x86/kernel/setup.c 2011-11-15 19:59:43.000000000 -0500
17212@@ -783,14 +783,14 @@ void __init setup_arch(char **cmdline_p)
17213
17214 if (!boot_params.hdr.root_flags)
17215 root_mountflags &= ~MS_RDONLY;
17216- init_mm.start_code = (unsigned long) _text;
17217- init_mm.end_code = (unsigned long) _etext;
17218+ init_mm.start_code = ktla_ktva((unsigned long) _text);
17219+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
17220 init_mm.end_data = (unsigned long) _edata;
17221 init_mm.brk = _brk_end;
17222
17223- code_resource.start = virt_to_phys(_text);
17224- code_resource.end = virt_to_phys(_etext)-1;
17225- data_resource.start = virt_to_phys(_etext);
17226+ code_resource.start = virt_to_phys(ktla_ktva(_text));
17227+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
17228+ data_resource.start = virt_to_phys(_sdata);
17229 data_resource.end = virt_to_phys(_edata)-1;
17230 bss_resource.start = virt_to_phys(&__bss_start);
17231 bss_resource.end = virt_to_phys(&__bss_stop)-1;
17232diff -urNp linux-2.6.32.48/arch/x86/kernel/setup_percpu.c linux-2.6.32.48/arch/x86/kernel/setup_percpu.c
17233--- linux-2.6.32.48/arch/x86/kernel/setup_percpu.c 2011-11-08 19:02:43.000000000 -0500
17234+++ linux-2.6.32.48/arch/x86/kernel/setup_percpu.c 2011-11-15 19:59:43.000000000 -0500
17235@@ -25,19 +25,17 @@
17236 # define DBG(x...)
17237 #endif
17238
17239-DEFINE_PER_CPU(int, cpu_number);
17240+#ifdef CONFIG_SMP
17241+DEFINE_PER_CPU(unsigned int, cpu_number);
17242 EXPORT_PER_CPU_SYMBOL(cpu_number);
17243+#endif
17244
17245-#ifdef CONFIG_X86_64
17246 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
17247-#else
17248-#define BOOT_PERCPU_OFFSET 0
17249-#endif
17250
17251 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
17252 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
17253
17254-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
17255+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
17256 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
17257 };
17258 EXPORT_SYMBOL(__per_cpu_offset);
17259@@ -159,10 +157,10 @@ static inline void setup_percpu_segment(
17260 {
17261 #ifdef CONFIG_X86_32
17262 struct desc_struct gdt;
17263+ unsigned long base = per_cpu_offset(cpu);
17264
17265- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
17266- 0x2 | DESCTYPE_S, 0x8);
17267- gdt.s = 1;
17268+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
17269+ 0x83 | DESCTYPE_S, 0xC);
17270 write_gdt_entry(get_cpu_gdt_table(cpu),
17271 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
17272 #endif
17273@@ -212,6 +210,11 @@ void __init setup_per_cpu_areas(void)
17274 /* alrighty, percpu areas up and running */
17275 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
17276 for_each_possible_cpu(cpu) {
17277+#ifdef CONFIG_CC_STACKPROTECTOR
17278+#ifdef CONFIG_X86_32
17279+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
17280+#endif
17281+#endif
17282 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
17283 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
17284 per_cpu(cpu_number, cpu) = cpu;
17285@@ -239,6 +242,12 @@ void __init setup_per_cpu_areas(void)
17286 early_per_cpu_map(x86_cpu_to_node_map, cpu);
17287 #endif
17288 #endif
17289+#ifdef CONFIG_CC_STACKPROTECTOR
17290+#ifdef CONFIG_X86_32
17291+ if (!cpu)
17292+ per_cpu(stack_canary.canary, cpu) = canary;
17293+#endif
17294+#endif
17295 /*
17296 * Up to this point, the boot CPU has been using .data.init
17297 * area. Reload any changed state for the boot CPU.
17298diff -urNp linux-2.6.32.48/arch/x86/kernel/signal.c linux-2.6.32.48/arch/x86/kernel/signal.c
17299--- linux-2.6.32.48/arch/x86/kernel/signal.c 2011-11-08 19:02:43.000000000 -0500
17300+++ linux-2.6.32.48/arch/x86/kernel/signal.c 2011-11-15 19:59:43.000000000 -0500
17301@@ -197,7 +197,7 @@ static unsigned long align_sigframe(unsi
17302 * Align the stack pointer according to the i386 ABI,
17303 * i.e. so that on function entry ((sp + 4) & 15) == 0.
17304 */
17305- sp = ((sp + 4) & -16ul) - 4;
17306+ sp = ((sp - 12) & -16ul) - 4;
17307 #else /* !CONFIG_X86_32 */
17308 sp = round_down(sp, 16) - 8;
17309 #endif
17310@@ -248,11 +248,11 @@ get_sigframe(struct k_sigaction *ka, str
17311 * Return an always-bogus address instead so we will die with SIGSEGV.
17312 */
17313 if (onsigstack && !likely(on_sig_stack(sp)))
17314- return (void __user *)-1L;
17315+ return (__force void __user *)-1L;
17316
17317 /* save i387 state */
17318 if (used_math() && save_i387_xstate(*fpstate) < 0)
17319- return (void __user *)-1L;
17320+ return (__force void __user *)-1L;
17321
17322 return (void __user *)sp;
17323 }
17324@@ -307,9 +307,9 @@ __setup_frame(int sig, struct k_sigactio
17325 }
17326
17327 if (current->mm->context.vdso)
17328- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
17329+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
17330 else
17331- restorer = &frame->retcode;
17332+ restorer = (void __user *)&frame->retcode;
17333 if (ka->sa.sa_flags & SA_RESTORER)
17334 restorer = ka->sa.sa_restorer;
17335
17336@@ -323,7 +323,7 @@ __setup_frame(int sig, struct k_sigactio
17337 * reasons and because gdb uses it as a signature to notice
17338 * signal handler stack frames.
17339 */
17340- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
17341+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
17342
17343 if (err)
17344 return -EFAULT;
17345@@ -377,7 +377,10 @@ static int __setup_rt_frame(int sig, str
17346 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
17347
17348 /* Set up to return from userspace. */
17349- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
17350+ if (current->mm->context.vdso)
17351+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
17352+ else
17353+ restorer = (void __user *)&frame->retcode;
17354 if (ka->sa.sa_flags & SA_RESTORER)
17355 restorer = ka->sa.sa_restorer;
17356 put_user_ex(restorer, &frame->pretcode);
17357@@ -389,7 +392,7 @@ static int __setup_rt_frame(int sig, str
17358 * reasons and because gdb uses it as a signature to notice
17359 * signal handler stack frames.
17360 */
17361- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
17362+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
17363 } put_user_catch(err);
17364
17365 if (err)
17366@@ -782,6 +785,8 @@ static void do_signal(struct pt_regs *re
17367 int signr;
17368 sigset_t *oldset;
17369
17370+ pax_track_stack();
17371+
17372 /*
17373 * We want the common case to go fast, which is why we may in certain
17374 * cases get here from kernel mode. Just return without doing anything
17375@@ -789,7 +794,7 @@ static void do_signal(struct pt_regs *re
17376 * X86_32: vm86 regs switched out by assembly code before reaching
17377 * here, so testing against kernel CS suffices.
17378 */
17379- if (!user_mode(regs))
17380+ if (!user_mode_novm(regs))
17381 return;
17382
17383 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
17384diff -urNp linux-2.6.32.48/arch/x86/kernel/smpboot.c linux-2.6.32.48/arch/x86/kernel/smpboot.c
17385--- linux-2.6.32.48/arch/x86/kernel/smpboot.c 2011-11-08 19:02:43.000000000 -0500
17386+++ linux-2.6.32.48/arch/x86/kernel/smpboot.c 2011-11-15 19:59:43.000000000 -0500
17387@@ -94,14 +94,14 @@ static DEFINE_PER_CPU(struct task_struct
17388 */
17389 static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
17390
17391-void cpu_hotplug_driver_lock()
17392+void cpu_hotplug_driver_lock(void)
17393 {
17394- mutex_lock(&x86_cpu_hotplug_driver_mutex);
17395+ mutex_lock(&x86_cpu_hotplug_driver_mutex);
17396 }
17397
17398-void cpu_hotplug_driver_unlock()
17399+void cpu_hotplug_driver_unlock(void)
17400 {
17401- mutex_unlock(&x86_cpu_hotplug_driver_mutex);
17402+ mutex_unlock(&x86_cpu_hotplug_driver_mutex);
17403 }
17404
17405 ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
17406@@ -625,7 +625,7 @@ wakeup_secondary_cpu_via_init(int phys_a
17407 * target processor state.
17408 */
17409 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
17410- (unsigned long)stack_start.sp);
17411+ stack_start);
17412
17413 /*
17414 * Run STARTUP IPI loop.
17415@@ -743,6 +743,7 @@ static int __cpuinit do_boot_cpu(int api
17416 set_idle_for_cpu(cpu, c_idle.idle);
17417 do_rest:
17418 per_cpu(current_task, cpu) = c_idle.idle;
17419+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
17420 #ifdef CONFIG_X86_32
17421 /* Stack for startup_32 can be just as for start_secondary onwards */
17422 irq_ctx_init(cpu);
17423@@ -750,13 +751,15 @@ do_rest:
17424 #else
17425 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
17426 initial_gs = per_cpu_offset(cpu);
17427- per_cpu(kernel_stack, cpu) =
17428- (unsigned long)task_stack_page(c_idle.idle) -
17429- KERNEL_STACK_OFFSET + THREAD_SIZE;
17430+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
17431 #endif
17432+
17433+ pax_open_kernel();
17434 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
17435+ pax_close_kernel();
17436+
17437 initial_code = (unsigned long)start_secondary;
17438- stack_start.sp = (void *) c_idle.idle->thread.sp;
17439+ stack_start = c_idle.idle->thread.sp;
17440
17441 /* start_ip had better be page-aligned! */
17442 start_ip = setup_trampoline();
17443@@ -891,6 +894,12 @@ int __cpuinit native_cpu_up(unsigned int
17444
17445 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
17446
17447+#ifdef CONFIG_PAX_PER_CPU_PGD
17448+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
17449+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
17450+ KERNEL_PGD_PTRS);
17451+#endif
17452+
17453 err = do_boot_cpu(apicid, cpu);
17454
17455 if (err) {
17456diff -urNp linux-2.6.32.48/arch/x86/kernel/step.c linux-2.6.32.48/arch/x86/kernel/step.c
17457--- linux-2.6.32.48/arch/x86/kernel/step.c 2011-11-08 19:02:43.000000000 -0500
17458+++ linux-2.6.32.48/arch/x86/kernel/step.c 2011-11-15 19:59:43.000000000 -0500
17459@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
17460 struct desc_struct *desc;
17461 unsigned long base;
17462
17463- seg &= ~7UL;
17464+ seg >>= 3;
17465
17466 mutex_lock(&child->mm->context.lock);
17467- if (unlikely((seg >> 3) >= child->mm->context.size))
17468+ if (unlikely(seg >= child->mm->context.size))
17469 addr = -1L; /* bogus selector, access would fault */
17470 else {
17471 desc = child->mm->context.ldt + seg;
17472@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
17473 addr += base;
17474 }
17475 mutex_unlock(&child->mm->context.lock);
17476- }
17477+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
17478+ addr = ktla_ktva(addr);
17479
17480 return addr;
17481 }
17482@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
17483 unsigned char opcode[15];
17484 unsigned long addr = convert_ip_to_linear(child, regs);
17485
17486+ if (addr == -EINVAL)
17487+ return 0;
17488+
17489 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
17490 for (i = 0; i < copied; i++) {
17491 switch (opcode[i]) {
17492@@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct t
17493
17494 #ifdef CONFIG_X86_64
17495 case 0x40 ... 0x4f:
17496- if (regs->cs != __USER_CS)
17497+ if ((regs->cs & 0xffff) != __USER_CS)
17498 /* 32-bit mode: register increment */
17499 return 0;
17500 /* 64-bit mode: REX prefix */
17501diff -urNp linux-2.6.32.48/arch/x86/kernel/syscall_table_32.S linux-2.6.32.48/arch/x86/kernel/syscall_table_32.S
17502--- linux-2.6.32.48/arch/x86/kernel/syscall_table_32.S 2011-11-08 19:02:43.000000000 -0500
17503+++ linux-2.6.32.48/arch/x86/kernel/syscall_table_32.S 2011-11-15 19:59:43.000000000 -0500
17504@@ -1,3 +1,4 @@
17505+.section .rodata,"a",@progbits
17506 ENTRY(sys_call_table)
17507 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
17508 .long sys_exit
17509diff -urNp linux-2.6.32.48/arch/x86/kernel/sys_i386_32.c linux-2.6.32.48/arch/x86/kernel/sys_i386_32.c
17510--- linux-2.6.32.48/arch/x86/kernel/sys_i386_32.c 2011-11-08 19:02:43.000000000 -0500
17511+++ linux-2.6.32.48/arch/x86/kernel/sys_i386_32.c 2011-11-15 19:59:43.000000000 -0500
17512@@ -24,6 +24,21 @@
17513
17514 #include <asm/syscalls.h>
17515
17516+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
17517+{
17518+ unsigned long pax_task_size = TASK_SIZE;
17519+
17520+#ifdef CONFIG_PAX_SEGMEXEC
17521+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
17522+ pax_task_size = SEGMEXEC_TASK_SIZE;
17523+#endif
17524+
17525+ if (len > pax_task_size || addr > pax_task_size - len)
17526+ return -EINVAL;
17527+
17528+ return 0;
17529+}
17530+
17531 /*
17532 * Perform the select(nd, in, out, ex, tv) and mmap() system
17533 * calls. Linux/i386 didn't use to be able to handle more than
17534@@ -58,6 +73,212 @@ out:
17535 return err;
17536 }
17537
17538+unsigned long
17539+arch_get_unmapped_area(struct file *filp, unsigned long addr,
17540+ unsigned long len, unsigned long pgoff, unsigned long flags)
17541+{
17542+ struct mm_struct *mm = current->mm;
17543+ struct vm_area_struct *vma;
17544+ unsigned long start_addr, pax_task_size = TASK_SIZE;
17545+
17546+#ifdef CONFIG_PAX_SEGMEXEC
17547+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
17548+ pax_task_size = SEGMEXEC_TASK_SIZE;
17549+#endif
17550+
17551+ pax_task_size -= PAGE_SIZE;
17552+
17553+ if (len > pax_task_size)
17554+ return -ENOMEM;
17555+
17556+ if (flags & MAP_FIXED)
17557+ return addr;
17558+
17559+#ifdef CONFIG_PAX_RANDMMAP
17560+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17561+#endif
17562+
17563+ if (addr) {
17564+ addr = PAGE_ALIGN(addr);
17565+ if (pax_task_size - len >= addr) {
17566+ vma = find_vma(mm, addr);
17567+ if (check_heap_stack_gap(vma, addr, len))
17568+ return addr;
17569+ }
17570+ }
17571+ if (len > mm->cached_hole_size) {
17572+ start_addr = addr = mm->free_area_cache;
17573+ } else {
17574+ start_addr = addr = mm->mmap_base;
17575+ mm->cached_hole_size = 0;
17576+ }
17577+
17578+#ifdef CONFIG_PAX_PAGEEXEC
17579+ if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
17580+ start_addr = 0x00110000UL;
17581+
17582+#ifdef CONFIG_PAX_RANDMMAP
17583+ if (mm->pax_flags & MF_PAX_RANDMMAP)
17584+ start_addr += mm->delta_mmap & 0x03FFF000UL;
17585+#endif
17586+
17587+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
17588+ start_addr = addr = mm->mmap_base;
17589+ else
17590+ addr = start_addr;
17591+ }
17592+#endif
17593+
17594+full_search:
17595+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
17596+ /* At this point: (!vma || addr < vma->vm_end). */
17597+ if (pax_task_size - len < addr) {
17598+ /*
17599+ * Start a new search - just in case we missed
17600+ * some holes.
17601+ */
17602+ if (start_addr != mm->mmap_base) {
17603+ start_addr = addr = mm->mmap_base;
17604+ mm->cached_hole_size = 0;
17605+ goto full_search;
17606+ }
17607+ return -ENOMEM;
17608+ }
17609+ if (check_heap_stack_gap(vma, addr, len))
17610+ break;
17611+ if (addr + mm->cached_hole_size < vma->vm_start)
17612+ mm->cached_hole_size = vma->vm_start - addr;
17613+ addr = vma->vm_end;
17614+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
17615+ start_addr = addr = mm->mmap_base;
17616+ mm->cached_hole_size = 0;
17617+ goto full_search;
17618+ }
17619+ }
17620+
17621+ /*
17622+ * Remember the place where we stopped the search:
17623+ */
17624+ mm->free_area_cache = addr + len;
17625+ return addr;
17626+}
17627+
17628+unsigned long
17629+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17630+ const unsigned long len, const unsigned long pgoff,
17631+ const unsigned long flags)
17632+{
17633+ struct vm_area_struct *vma;
17634+ struct mm_struct *mm = current->mm;
17635+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
17636+
17637+#ifdef CONFIG_PAX_SEGMEXEC
17638+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
17639+ pax_task_size = SEGMEXEC_TASK_SIZE;
17640+#endif
17641+
17642+ pax_task_size -= PAGE_SIZE;
17643+
17644+ /* requested length too big for entire address space */
17645+ if (len > pax_task_size)
17646+ return -ENOMEM;
17647+
17648+ if (flags & MAP_FIXED)
17649+ return addr;
17650+
17651+#ifdef CONFIG_PAX_PAGEEXEC
17652+ if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
17653+ goto bottomup;
17654+#endif
17655+
17656+#ifdef CONFIG_PAX_RANDMMAP
17657+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17658+#endif
17659+
17660+ /* requesting a specific address */
17661+ if (addr) {
17662+ addr = PAGE_ALIGN(addr);
17663+ if (pax_task_size - len >= addr) {
17664+ vma = find_vma(mm, addr);
17665+ if (check_heap_stack_gap(vma, addr, len))
17666+ return addr;
17667+ }
17668+ }
17669+
17670+ /* check if free_area_cache is useful for us */
17671+ if (len <= mm->cached_hole_size) {
17672+ mm->cached_hole_size = 0;
17673+ mm->free_area_cache = mm->mmap_base;
17674+ }
17675+
17676+ /* either no address requested or can't fit in requested address hole */
17677+ addr = mm->free_area_cache;
17678+
17679+ /* make sure it can fit in the remaining address space */
17680+ if (addr > len) {
17681+ vma = find_vma(mm, addr-len);
17682+ if (check_heap_stack_gap(vma, addr - len, len))
17683+ /* remember the address as a hint for next time */
17684+ return (mm->free_area_cache = addr-len);
17685+ }
17686+
17687+ if (mm->mmap_base < len)
17688+ goto bottomup;
17689+
17690+ addr = mm->mmap_base-len;
17691+
17692+ do {
17693+ /*
17694+ * Lookup failure means no vma is above this address,
17695+ * else if new region fits below vma->vm_start,
17696+ * return with success:
17697+ */
17698+ vma = find_vma(mm, addr);
17699+ if (check_heap_stack_gap(vma, addr, len))
17700+ /* remember the address as a hint for next time */
17701+ return (mm->free_area_cache = addr);
17702+
17703+ /* remember the largest hole we saw so far */
17704+ if (addr + mm->cached_hole_size < vma->vm_start)
17705+ mm->cached_hole_size = vma->vm_start - addr;
17706+
17707+ /* try just below the current vma->vm_start */
17708+ addr = skip_heap_stack_gap(vma, len);
17709+ } while (!IS_ERR_VALUE(addr));
17710+
17711+bottomup:
17712+ /*
17713+ * A failed mmap() very likely causes application failure,
17714+ * so fall back to the bottom-up function here. This scenario
17715+ * can happen with large stack limits and large mmap()
17716+ * allocations.
17717+ */
17718+
17719+#ifdef CONFIG_PAX_SEGMEXEC
17720+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
17721+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
17722+ else
17723+#endif
17724+
17725+ mm->mmap_base = TASK_UNMAPPED_BASE;
17726+
17727+#ifdef CONFIG_PAX_RANDMMAP
17728+ if (mm->pax_flags & MF_PAX_RANDMMAP)
17729+ mm->mmap_base += mm->delta_mmap;
17730+#endif
17731+
17732+ mm->free_area_cache = mm->mmap_base;
17733+ mm->cached_hole_size = ~0UL;
17734+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17735+ /*
17736+ * Restore the topdown base:
17737+ */
17738+ mm->mmap_base = base;
17739+ mm->free_area_cache = base;
17740+ mm->cached_hole_size = ~0UL;
17741+
17742+ return addr;
17743+}
17744
17745 struct sel_arg_struct {
17746 unsigned long n;
17747@@ -93,7 +314,7 @@ asmlinkage int sys_ipc(uint call, int fi
17748 return sys_semtimedop(first, (struct sembuf __user *)ptr, second, NULL);
17749 case SEMTIMEDOP:
17750 return sys_semtimedop(first, (struct sembuf __user *)ptr, second,
17751- (const struct timespec __user *)fifth);
17752+ (__force const struct timespec __user *)fifth);
17753
17754 case SEMGET:
17755 return sys_semget(first, second, third);
17756@@ -140,7 +361,7 @@ asmlinkage int sys_ipc(uint call, int fi
17757 ret = do_shmat(first, (char __user *) ptr, second, &raddr);
17758 if (ret)
17759 return ret;
17760- return put_user(raddr, (ulong __user *) third);
17761+ return put_user(raddr, (__force ulong __user *) third);
17762 }
17763 case 1: /* iBCS2 emulator entry point */
17764 if (!segment_eq(get_fs(), get_ds()))
17765@@ -207,17 +428,3 @@ asmlinkage int sys_olduname(struct oldol
17766
17767 return error;
17768 }
17769-
17770-
17771-/*
17772- * Do a system call from kernel instead of calling sys_execve so we
17773- * end up with proper pt_regs.
17774- */
17775-int kernel_execve(const char *filename, char *const argv[], char *const envp[])
17776-{
17777- long __res;
17778- asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx"
17779- : "=a" (__res)
17780- : "0" (__NR_execve), "ri" (filename), "c" (argv), "d" (envp) : "memory");
17781- return __res;
17782-}
17783diff -urNp linux-2.6.32.48/arch/x86/kernel/sys_x86_64.c linux-2.6.32.48/arch/x86/kernel/sys_x86_64.c
17784--- linux-2.6.32.48/arch/x86/kernel/sys_x86_64.c 2011-11-08 19:02:43.000000000 -0500
17785+++ linux-2.6.32.48/arch/x86/kernel/sys_x86_64.c 2011-11-15 19:59:43.000000000 -0500
17786@@ -32,8 +32,8 @@ out:
17787 return error;
17788 }
17789
17790-static void find_start_end(unsigned long flags, unsigned long *begin,
17791- unsigned long *end)
17792+static void find_start_end(struct mm_struct *mm, unsigned long flags,
17793+ unsigned long *begin, unsigned long *end)
17794 {
17795 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
17796 unsigned long new_begin;
17797@@ -52,7 +52,7 @@ static void find_start_end(unsigned long
17798 *begin = new_begin;
17799 }
17800 } else {
17801- *begin = TASK_UNMAPPED_BASE;
17802+ *begin = mm->mmap_base;
17803 *end = TASK_SIZE;
17804 }
17805 }
17806@@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
17807 if (flags & MAP_FIXED)
17808 return addr;
17809
17810- find_start_end(flags, &begin, &end);
17811+ find_start_end(mm, flags, &begin, &end);
17812
17813 if (len > end)
17814 return -ENOMEM;
17815
17816+#ifdef CONFIG_PAX_RANDMMAP
17817+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17818+#endif
17819+
17820 if (addr) {
17821 addr = PAGE_ALIGN(addr);
17822 vma = find_vma(mm, addr);
17823- if (end - len >= addr &&
17824- (!vma || addr + len <= vma->vm_start))
17825+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
17826 return addr;
17827 }
17828 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
17829@@ -106,7 +109,7 @@ full_search:
17830 }
17831 return -ENOMEM;
17832 }
17833- if (!vma || addr + len <= vma->vm_start) {
17834+ if (check_heap_stack_gap(vma, addr, len)) {
17835 /*
17836 * Remember the place where we stopped the search:
17837 */
17838@@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
17839 {
17840 struct vm_area_struct *vma;
17841 struct mm_struct *mm = current->mm;
17842- unsigned long addr = addr0;
17843+ unsigned long base = mm->mmap_base, addr = addr0;
17844
17845 /* requested length too big for entire address space */
17846 if (len > TASK_SIZE)
17847@@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
17848 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
17849 goto bottomup;
17850
17851+#ifdef CONFIG_PAX_RANDMMAP
17852+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17853+#endif
17854+
17855 /* requesting a specific address */
17856 if (addr) {
17857 addr = PAGE_ALIGN(addr);
17858- vma = find_vma(mm, addr);
17859- if (TASK_SIZE - len >= addr &&
17860- (!vma || addr + len <= vma->vm_start))
17861- return addr;
17862+ if (TASK_SIZE - len >= addr) {
17863+ vma = find_vma(mm, addr);
17864+ if (check_heap_stack_gap(vma, addr, len))
17865+ return addr;
17866+ }
17867 }
17868
17869 /* check if free_area_cache is useful for us */
17870@@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
17871 /* make sure it can fit in the remaining address space */
17872 if (addr > len) {
17873 vma = find_vma(mm, addr-len);
17874- if (!vma || addr <= vma->vm_start)
17875+ if (check_heap_stack_gap(vma, addr - len, len))
17876 /* remember the address as a hint for next time */
17877 return mm->free_area_cache = addr-len;
17878 }
17879@@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
17880 * return with success:
17881 */
17882 vma = find_vma(mm, addr);
17883- if (!vma || addr+len <= vma->vm_start)
17884+ if (check_heap_stack_gap(vma, addr, len))
17885 /* remember the address as a hint for next time */
17886 return mm->free_area_cache = addr;
17887
17888@@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
17889 mm->cached_hole_size = vma->vm_start - addr;
17890
17891 /* try just below the current vma->vm_start */
17892- addr = vma->vm_start-len;
17893- } while (len < vma->vm_start);
17894+ addr = skip_heap_stack_gap(vma, len);
17895+ } while (!IS_ERR_VALUE(addr));
17896
17897 bottomup:
17898 /*
17899@@ -198,13 +206,21 @@ bottomup:
17900 * can happen with large stack limits and large mmap()
17901 * allocations.
17902 */
17903+ mm->mmap_base = TASK_UNMAPPED_BASE;
17904+
17905+#ifdef CONFIG_PAX_RANDMMAP
17906+ if (mm->pax_flags & MF_PAX_RANDMMAP)
17907+ mm->mmap_base += mm->delta_mmap;
17908+#endif
17909+
17910+ mm->free_area_cache = mm->mmap_base;
17911 mm->cached_hole_size = ~0UL;
17912- mm->free_area_cache = TASK_UNMAPPED_BASE;
17913 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17914 /*
17915 * Restore the topdown base:
17916 */
17917- mm->free_area_cache = mm->mmap_base;
17918+ mm->mmap_base = base;
17919+ mm->free_area_cache = base;
17920 mm->cached_hole_size = ~0UL;
17921
17922 return addr;
17923diff -urNp linux-2.6.32.48/arch/x86/kernel/tboot.c linux-2.6.32.48/arch/x86/kernel/tboot.c
17924--- linux-2.6.32.48/arch/x86/kernel/tboot.c 2011-11-08 19:02:43.000000000 -0500
17925+++ linux-2.6.32.48/arch/x86/kernel/tboot.c 2011-11-15 19:59:43.000000000 -0500
17926@@ -216,7 +216,7 @@ static int tboot_setup_sleep(void)
17927
17928 void tboot_shutdown(u32 shutdown_type)
17929 {
17930- void (*shutdown)(void);
17931+ void (* __noreturn shutdown)(void);
17932
17933 if (!tboot_enabled())
17934 return;
17935@@ -238,7 +238,7 @@ void tboot_shutdown(u32 shutdown_type)
17936
17937 switch_to_tboot_pt();
17938
17939- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
17940+ shutdown = (void *)tboot->shutdown_entry;
17941 shutdown();
17942
17943 /* should not reach here */
17944@@ -295,7 +295,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
17945 tboot_shutdown(acpi_shutdown_map[sleep_state]);
17946 }
17947
17948-static atomic_t ap_wfs_count;
17949+static atomic_unchecked_t ap_wfs_count;
17950
17951 static int tboot_wait_for_aps(int num_aps)
17952 {
17953@@ -319,9 +319,9 @@ static int __cpuinit tboot_cpu_callback(
17954 {
17955 switch (action) {
17956 case CPU_DYING:
17957- atomic_inc(&ap_wfs_count);
17958+ atomic_inc_unchecked(&ap_wfs_count);
17959 if (num_online_cpus() == 1)
17960- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
17961+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
17962 return NOTIFY_BAD;
17963 break;
17964 }
17965@@ -340,7 +340,7 @@ static __init int tboot_late_init(void)
17966
17967 tboot_create_trampoline();
17968
17969- atomic_set(&ap_wfs_count, 0);
17970+ atomic_set_unchecked(&ap_wfs_count, 0);
17971 register_hotcpu_notifier(&tboot_cpu_notifier);
17972 return 0;
17973 }
17974diff -urNp linux-2.6.32.48/arch/x86/kernel/time.c linux-2.6.32.48/arch/x86/kernel/time.c
17975--- linux-2.6.32.48/arch/x86/kernel/time.c 2011-11-08 19:02:43.000000000 -0500
17976+++ linux-2.6.32.48/arch/x86/kernel/time.c 2011-11-15 19:59:43.000000000 -0500
17977@@ -26,17 +26,13 @@
17978 int timer_ack;
17979 #endif
17980
17981-#ifdef CONFIG_X86_64
17982-volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
17983-#endif
17984-
17985 unsigned long profile_pc(struct pt_regs *regs)
17986 {
17987 unsigned long pc = instruction_pointer(regs);
17988
17989- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
17990+ if (!user_mode(regs) && in_lock_functions(pc)) {
17991 #ifdef CONFIG_FRAME_POINTER
17992- return *(unsigned long *)(regs->bp + sizeof(long));
17993+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
17994 #else
17995 unsigned long *sp =
17996 (unsigned long *)kernel_stack_pointer(regs);
17997@@ -45,11 +41,17 @@ unsigned long profile_pc(struct pt_regs
17998 * or above a saved flags. Eflags has bits 22-31 zero,
17999 * kernel addresses don't.
18000 */
18001+
18002+#ifdef CONFIG_PAX_KERNEXEC
18003+ return ktla_ktva(sp[0]);
18004+#else
18005 if (sp[0] >> 22)
18006 return sp[0];
18007 if (sp[1] >> 22)
18008 return sp[1];
18009 #endif
18010+
18011+#endif
18012 }
18013 return pc;
18014 }
18015diff -urNp linux-2.6.32.48/arch/x86/kernel/tls.c linux-2.6.32.48/arch/x86/kernel/tls.c
18016--- linux-2.6.32.48/arch/x86/kernel/tls.c 2011-11-08 19:02:43.000000000 -0500
18017+++ linux-2.6.32.48/arch/x86/kernel/tls.c 2011-11-15 19:59:43.000000000 -0500
18018@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
18019 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
18020 return -EINVAL;
18021
18022+#ifdef CONFIG_PAX_SEGMEXEC
18023+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
18024+ return -EINVAL;
18025+#endif
18026+
18027 set_tls_desc(p, idx, &info, 1);
18028
18029 return 0;
18030diff -urNp linux-2.6.32.48/arch/x86/kernel/trampoline_32.S linux-2.6.32.48/arch/x86/kernel/trampoline_32.S
18031--- linux-2.6.32.48/arch/x86/kernel/trampoline_32.S 2011-11-08 19:02:43.000000000 -0500
18032+++ linux-2.6.32.48/arch/x86/kernel/trampoline_32.S 2011-11-15 19:59:43.000000000 -0500
18033@@ -32,6 +32,12 @@
18034 #include <asm/segment.h>
18035 #include <asm/page_types.h>
18036
18037+#ifdef CONFIG_PAX_KERNEXEC
18038+#define ta(X) (X)
18039+#else
18040+#define ta(X) ((X) - __PAGE_OFFSET)
18041+#endif
18042+
18043 /* We can free up trampoline after bootup if cpu hotplug is not supported. */
18044 __CPUINITRODATA
18045 .code16
18046@@ -60,7 +66,7 @@ r_base = .
18047 inc %ax # protected mode (PE) bit
18048 lmsw %ax # into protected mode
18049 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
18050- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
18051+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
18052
18053 # These need to be in the same 64K segment as the above;
18054 # hence we don't use the boot_gdt_descr defined in head.S
18055diff -urNp linux-2.6.32.48/arch/x86/kernel/trampoline_64.S linux-2.6.32.48/arch/x86/kernel/trampoline_64.S
18056--- linux-2.6.32.48/arch/x86/kernel/trampoline_64.S 2011-11-08 19:02:43.000000000 -0500
18057+++ linux-2.6.32.48/arch/x86/kernel/trampoline_64.S 2011-11-15 19:59:43.000000000 -0500
18058@@ -91,7 +91,7 @@ startup_32:
18059 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
18060 movl %eax, %ds
18061
18062- movl $X86_CR4_PAE, %eax
18063+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
18064 movl %eax, %cr4 # Enable PAE mode
18065
18066 # Setup trampoline 4 level pagetables
18067@@ -127,7 +127,7 @@ startup_64:
18068 no_longmode:
18069 hlt
18070 jmp no_longmode
18071-#include "verify_cpu_64.S"
18072+#include "verify_cpu.S"
18073
18074 # Careful these need to be in the same 64K segment as the above;
18075 tidt:
18076@@ -138,7 +138,7 @@ tidt:
18077 # so the kernel can live anywhere
18078 .balign 4
18079 tgdt:
18080- .short tgdt_end - tgdt # gdt limit
18081+ .short tgdt_end - tgdt - 1 # gdt limit
18082 .long tgdt - r_base
18083 .short 0
18084 .quad 0x00cf9b000000ffff # __KERNEL32_CS
18085diff -urNp linux-2.6.32.48/arch/x86/kernel/traps.c linux-2.6.32.48/arch/x86/kernel/traps.c
18086--- linux-2.6.32.48/arch/x86/kernel/traps.c 2011-11-08 19:02:43.000000000 -0500
18087+++ linux-2.6.32.48/arch/x86/kernel/traps.c 2011-11-15 19:59:43.000000000 -0500
18088@@ -69,12 +69,6 @@ asmlinkage int system_call(void);
18089
18090 /* Do we ignore FPU interrupts ? */
18091 char ignore_fpu_irq;
18092-
18093-/*
18094- * The IDT has to be page-aligned to simplify the Pentium
18095- * F0 0F bug workaround.
18096- */
18097-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
18098 #endif
18099
18100 DECLARE_BITMAP(used_vectors, NR_VECTORS);
18101@@ -112,19 +106,19 @@ static inline void preempt_conditional_c
18102 static inline void
18103 die_if_kernel(const char *str, struct pt_regs *regs, long err)
18104 {
18105- if (!user_mode_vm(regs))
18106+ if (!user_mode(regs))
18107 die(str, regs, err);
18108 }
18109 #endif
18110
18111 static void __kprobes
18112-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
18113+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
18114 long error_code, siginfo_t *info)
18115 {
18116 struct task_struct *tsk = current;
18117
18118 #ifdef CONFIG_X86_32
18119- if (regs->flags & X86_VM_MASK) {
18120+ if (v8086_mode(regs)) {
18121 /*
18122 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
18123 * On nmi (interrupt 2), do_trap should not be called.
18124@@ -135,7 +129,7 @@ do_trap(int trapnr, int signr, char *str
18125 }
18126 #endif
18127
18128- if (!user_mode(regs))
18129+ if (!user_mode_novm(regs))
18130 goto kernel_trap;
18131
18132 #ifdef CONFIG_X86_32
18133@@ -158,7 +152,7 @@ trap_signal:
18134 printk_ratelimit()) {
18135 printk(KERN_INFO
18136 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
18137- tsk->comm, tsk->pid, str,
18138+ tsk->comm, task_pid_nr(tsk), str,
18139 regs->ip, regs->sp, error_code);
18140 print_vma_addr(" in ", regs->ip);
18141 printk("\n");
18142@@ -175,8 +169,20 @@ kernel_trap:
18143 if (!fixup_exception(regs)) {
18144 tsk->thread.error_code = error_code;
18145 tsk->thread.trap_no = trapnr;
18146+
18147+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18148+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
18149+ str = "PAX: suspicious stack segment fault";
18150+#endif
18151+
18152 die(str, regs, error_code);
18153 }
18154+
18155+#ifdef CONFIG_PAX_REFCOUNT
18156+ if (trapnr == 4)
18157+ pax_report_refcount_overflow(regs);
18158+#endif
18159+
18160 return;
18161
18162 #ifdef CONFIG_X86_32
18163@@ -265,14 +271,30 @@ do_general_protection(struct pt_regs *re
18164 conditional_sti(regs);
18165
18166 #ifdef CONFIG_X86_32
18167- if (regs->flags & X86_VM_MASK)
18168+ if (v8086_mode(regs))
18169 goto gp_in_vm86;
18170 #endif
18171
18172 tsk = current;
18173- if (!user_mode(regs))
18174+ if (!user_mode_novm(regs))
18175 goto gp_in_kernel;
18176
18177+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
18178+ if (!nx_enabled && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
18179+ struct mm_struct *mm = tsk->mm;
18180+ unsigned long limit;
18181+
18182+ down_write(&mm->mmap_sem);
18183+ limit = mm->context.user_cs_limit;
18184+ if (limit < TASK_SIZE) {
18185+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
18186+ up_write(&mm->mmap_sem);
18187+ return;
18188+ }
18189+ up_write(&mm->mmap_sem);
18190+ }
18191+#endif
18192+
18193 tsk->thread.error_code = error_code;
18194 tsk->thread.trap_no = 13;
18195
18196@@ -305,6 +327,13 @@ gp_in_kernel:
18197 if (notify_die(DIE_GPF, "general protection fault", regs,
18198 error_code, 13, SIGSEGV) == NOTIFY_STOP)
18199 return;
18200+
18201+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18202+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
18203+ die("PAX: suspicious general protection fault", regs, error_code);
18204+ else
18205+#endif
18206+
18207 die("general protection fault", regs, error_code);
18208 }
18209
18210@@ -435,6 +464,17 @@ static notrace __kprobes void default_do
18211 dotraplinkage notrace __kprobes void
18212 do_nmi(struct pt_regs *regs, long error_code)
18213 {
18214+
18215+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18216+ if (!user_mode(regs)) {
18217+ unsigned long cs = regs->cs & 0xFFFF;
18218+ unsigned long ip = ktva_ktla(regs->ip);
18219+
18220+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
18221+ regs->ip = ip;
18222+ }
18223+#endif
18224+
18225 nmi_enter();
18226
18227 inc_irq_stat(__nmi_count);
18228@@ -558,7 +598,7 @@ dotraplinkage void __kprobes do_debug(st
18229 }
18230
18231 #ifdef CONFIG_X86_32
18232- if (regs->flags & X86_VM_MASK)
18233+ if (v8086_mode(regs))
18234 goto debug_vm86;
18235 #endif
18236
18237@@ -570,7 +610,7 @@ dotraplinkage void __kprobes do_debug(st
18238 * kernel space (but re-enable TF when returning to user mode).
18239 */
18240 if (condition & DR_STEP) {
18241- if (!user_mode(regs))
18242+ if (!user_mode_novm(regs))
18243 goto clear_TF_reenable;
18244 }
18245
18246@@ -757,7 +797,7 @@ do_simd_coprocessor_error(struct pt_regs
18247 * Handle strange cache flush from user space exception
18248 * in all other cases. This is undocumented behaviour.
18249 */
18250- if (regs->flags & X86_VM_MASK) {
18251+ if (v8086_mode(regs)) {
18252 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
18253 return;
18254 }
18255@@ -798,7 +838,7 @@ asmlinkage void __attribute__((weak)) sm
18256 void __math_state_restore(void)
18257 {
18258 struct thread_info *thread = current_thread_info();
18259- struct task_struct *tsk = thread->task;
18260+ struct task_struct *tsk = current;
18261
18262 /*
18263 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
18264@@ -825,8 +865,7 @@ void __math_state_restore(void)
18265 */
18266 asmlinkage void math_state_restore(void)
18267 {
18268- struct thread_info *thread = current_thread_info();
18269- struct task_struct *tsk = thread->task;
18270+ struct task_struct *tsk = current;
18271
18272 if (!tsk_used_math(tsk)) {
18273 local_irq_enable();
18274diff -urNp linux-2.6.32.48/arch/x86/kernel/verify_cpu_64.S linux-2.6.32.48/arch/x86/kernel/verify_cpu_64.S
18275--- linux-2.6.32.48/arch/x86/kernel/verify_cpu_64.S 2011-11-08 19:02:43.000000000 -0500
18276+++ linux-2.6.32.48/arch/x86/kernel/verify_cpu_64.S 1969-12-31 19:00:00.000000000 -0500
18277@@ -1,105 +0,0 @@
18278-/*
18279- *
18280- * verify_cpu.S - Code for cpu long mode and SSE verification. This
18281- * code has been borrowed from boot/setup.S and was introduced by
18282- * Andi Kleen.
18283- *
18284- * Copyright (c) 2007 Andi Kleen (ak@suse.de)
18285- * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
18286- * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
18287- *
18288- * This source code is licensed under the GNU General Public License,
18289- * Version 2. See the file COPYING for more details.
18290- *
18291- * This is a common code for verification whether CPU supports
18292- * long mode and SSE or not. It is not called directly instead this
18293- * file is included at various places and compiled in that context.
18294- * Following are the current usage.
18295- *
18296- * This file is included by both 16bit and 32bit code.
18297- *
18298- * arch/x86_64/boot/setup.S : Boot cpu verification (16bit)
18299- * arch/x86_64/boot/compressed/head.S: Boot cpu verification (32bit)
18300- * arch/x86_64/kernel/trampoline.S: secondary processor verfication (16bit)
18301- * arch/x86_64/kernel/acpi/wakeup.S:Verfication at resume (16bit)
18302- *
18303- * verify_cpu, returns the status of cpu check in register %eax.
18304- * 0: Success 1: Failure
18305- *
18306- * The caller needs to check for the error code and take the action
18307- * appropriately. Either display a message or halt.
18308- */
18309-
18310-#include <asm/cpufeature.h>
18311-
18312-verify_cpu:
18313- pushfl # Save caller passed flags
18314- pushl $0 # Kill any dangerous flags
18315- popfl
18316-
18317- pushfl # standard way to check for cpuid
18318- popl %eax
18319- movl %eax,%ebx
18320- xorl $0x200000,%eax
18321- pushl %eax
18322- popfl
18323- pushfl
18324- popl %eax
18325- cmpl %eax,%ebx
18326- jz verify_cpu_no_longmode # cpu has no cpuid
18327-
18328- movl $0x0,%eax # See if cpuid 1 is implemented
18329- cpuid
18330- cmpl $0x1,%eax
18331- jb verify_cpu_no_longmode # no cpuid 1
18332-
18333- xor %di,%di
18334- cmpl $0x68747541,%ebx # AuthenticAMD
18335- jnz verify_cpu_noamd
18336- cmpl $0x69746e65,%edx
18337- jnz verify_cpu_noamd
18338- cmpl $0x444d4163,%ecx
18339- jnz verify_cpu_noamd
18340- mov $1,%di # cpu is from AMD
18341-
18342-verify_cpu_noamd:
18343- movl $0x1,%eax # Does the cpu have what it takes
18344- cpuid
18345- andl $REQUIRED_MASK0,%edx
18346- xorl $REQUIRED_MASK0,%edx
18347- jnz verify_cpu_no_longmode
18348-
18349- movl $0x80000000,%eax # See if extended cpuid is implemented
18350- cpuid
18351- cmpl $0x80000001,%eax
18352- jb verify_cpu_no_longmode # no extended cpuid
18353-
18354- movl $0x80000001,%eax # Does the cpu have what it takes
18355- cpuid
18356- andl $REQUIRED_MASK1,%edx
18357- xorl $REQUIRED_MASK1,%edx
18358- jnz verify_cpu_no_longmode
18359-
18360-verify_cpu_sse_test:
18361- movl $1,%eax
18362- cpuid
18363- andl $SSE_MASK,%edx
18364- cmpl $SSE_MASK,%edx
18365- je verify_cpu_sse_ok
18366- test %di,%di
18367- jz verify_cpu_no_longmode # only try to force SSE on AMD
18368- movl $0xc0010015,%ecx # HWCR
18369- rdmsr
18370- btr $15,%eax # enable SSE
18371- wrmsr
18372- xor %di,%di # don't loop
18373- jmp verify_cpu_sse_test # try again
18374-
18375-verify_cpu_no_longmode:
18376- popfl # Restore caller passed flags
18377- movl $1,%eax
18378- ret
18379-verify_cpu_sse_ok:
18380- popfl # Restore caller passed flags
18381- xorl %eax, %eax
18382- ret
18383diff -urNp linux-2.6.32.48/arch/x86/kernel/verify_cpu.S linux-2.6.32.48/arch/x86/kernel/verify_cpu.S
18384--- linux-2.6.32.48/arch/x86/kernel/verify_cpu.S 1969-12-31 19:00:00.000000000 -0500
18385+++ linux-2.6.32.48/arch/x86/kernel/verify_cpu.S 2011-11-15 19:59:43.000000000 -0500
18386@@ -0,0 +1,140 @@
18387+/*
18388+ *
18389+ * verify_cpu.S - Code for cpu long mode and SSE verification. This
18390+ * code has been borrowed from boot/setup.S and was introduced by
18391+ * Andi Kleen.
18392+ *
18393+ * Copyright (c) 2007 Andi Kleen (ak@suse.de)
18394+ * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
18395+ * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
18396+ * Copyright (c) 2010 Kees Cook (kees.cook@canonical.com)
18397+ *
18398+ * This source code is licensed under the GNU General Public License,
18399+ * Version 2. See the file COPYING for more details.
18400+ *
18401+ * This is a common code for verification whether CPU supports
18402+ * long mode and SSE or not. It is not called directly instead this
18403+ * file is included at various places and compiled in that context.
18404+ * This file is expected to run in 32bit code. Currently:
18405+ *
18406+ * arch/x86/boot/compressed/head_64.S: Boot cpu verification
18407+ * arch/x86/kernel/trampoline_64.S: secondary processor verification
18408+ * arch/x86/kernel/head_32.S: processor startup
18409+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
18410+ *
18411+ * verify_cpu, returns the status of longmode and SSE in register %eax.
18412+ * 0: Success 1: Failure
18413+ *
18414+ * On Intel, the XD_DISABLE flag will be cleared as a side-effect.
18415+ *
18416+ * The caller needs to check for the error code and take the action
18417+ * appropriately. Either display a message or halt.
18418+ */
18419+
18420+#include <asm/cpufeature.h>
18421+#include <asm/msr-index.h>
18422+
18423+verify_cpu:
18424+ pushfl # Save caller passed flags
18425+ pushl $0 # Kill any dangerous flags
18426+ popfl
18427+
18428+ pushfl # standard way to check for cpuid
18429+ popl %eax
18430+ movl %eax,%ebx
18431+ xorl $0x200000,%eax
18432+ pushl %eax
18433+ popfl
18434+ pushfl
18435+ popl %eax
18436+ cmpl %eax,%ebx
18437+ jz verify_cpu_no_longmode # cpu has no cpuid
18438+
18439+ movl $0x0,%eax # See if cpuid 1 is implemented
18440+ cpuid
18441+ cmpl $0x1,%eax
18442+ jb verify_cpu_no_longmode # no cpuid 1
18443+
18444+ xor %di,%di
18445+ cmpl $0x68747541,%ebx # AuthenticAMD
18446+ jnz verify_cpu_noamd
18447+ cmpl $0x69746e65,%edx
18448+ jnz verify_cpu_noamd
18449+ cmpl $0x444d4163,%ecx
18450+ jnz verify_cpu_noamd
18451+ mov $1,%di # cpu is from AMD
18452+ jmp verify_cpu_check
18453+
18454+verify_cpu_noamd:
18455+ cmpl $0x756e6547,%ebx # GenuineIntel?
18456+ jnz verify_cpu_check
18457+ cmpl $0x49656e69,%edx
18458+ jnz verify_cpu_check
18459+ cmpl $0x6c65746e,%ecx
18460+ jnz verify_cpu_check
18461+
18462+ # only call IA32_MISC_ENABLE when:
18463+ # family > 6 || (family == 6 && model >= 0xd)
18464+ movl $0x1, %eax # check CPU family and model
18465+ cpuid
18466+ movl %eax, %ecx
18467+
18468+ andl $0x0ff00f00, %eax # mask family and extended family
18469+ shrl $8, %eax
18470+ cmpl $6, %eax
18471+ ja verify_cpu_clear_xd # family > 6, ok
18472+ jb verify_cpu_check # family < 6, skip
18473+
18474+ andl $0x000f00f0, %ecx # mask model and extended model
18475+ shrl $4, %ecx
18476+ cmpl $0xd, %ecx
18477+ jb verify_cpu_check # family == 6, model < 0xd, skip
18478+
18479+verify_cpu_clear_xd:
18480+ movl $MSR_IA32_MISC_ENABLE, %ecx
18481+ rdmsr
18482+ btrl $2, %edx # clear MSR_IA32_MISC_ENABLE_XD_DISABLE
18483+ jnc verify_cpu_check # only write MSR if bit was changed
18484+ wrmsr
18485+
18486+verify_cpu_check:
18487+ movl $0x1,%eax # Does the cpu have what it takes
18488+ cpuid
18489+ andl $REQUIRED_MASK0,%edx
18490+ xorl $REQUIRED_MASK0,%edx
18491+ jnz verify_cpu_no_longmode
18492+
18493+ movl $0x80000000,%eax # See if extended cpuid is implemented
18494+ cpuid
18495+ cmpl $0x80000001,%eax
18496+ jb verify_cpu_no_longmode # no extended cpuid
18497+
18498+ movl $0x80000001,%eax # Does the cpu have what it takes
18499+ cpuid
18500+ andl $REQUIRED_MASK1,%edx
18501+ xorl $REQUIRED_MASK1,%edx
18502+ jnz verify_cpu_no_longmode
18503+
18504+verify_cpu_sse_test:
18505+ movl $1,%eax
18506+ cpuid
18507+ andl $SSE_MASK,%edx
18508+ cmpl $SSE_MASK,%edx
18509+ je verify_cpu_sse_ok
18510+ test %di,%di
18511+ jz verify_cpu_no_longmode # only try to force SSE on AMD
18512+ movl $MSR_K7_HWCR,%ecx
18513+ rdmsr
18514+ btr $15,%eax # enable SSE
18515+ wrmsr
18516+ xor %di,%di # don't loop
18517+ jmp verify_cpu_sse_test # try again
18518+
18519+verify_cpu_no_longmode:
18520+ popfl # Restore caller passed flags
18521+ movl $1,%eax
18522+ ret
18523+verify_cpu_sse_ok:
18524+ popfl # Restore caller passed flags
18525+ xorl %eax, %eax
18526+ ret
18527diff -urNp linux-2.6.32.48/arch/x86/kernel/vm86_32.c linux-2.6.32.48/arch/x86/kernel/vm86_32.c
18528--- linux-2.6.32.48/arch/x86/kernel/vm86_32.c 2011-11-08 19:02:43.000000000 -0500
18529+++ linux-2.6.32.48/arch/x86/kernel/vm86_32.c 2011-11-15 19:59:43.000000000 -0500
18530@@ -41,6 +41,7 @@
18531 #include <linux/ptrace.h>
18532 #include <linux/audit.h>
18533 #include <linux/stddef.h>
18534+#include <linux/grsecurity.h>
18535
18536 #include <asm/uaccess.h>
18537 #include <asm/io.h>
18538@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
18539 do_exit(SIGSEGV);
18540 }
18541
18542- tss = &per_cpu(init_tss, get_cpu());
18543+ tss = init_tss + get_cpu();
18544 current->thread.sp0 = current->thread.saved_sp0;
18545 current->thread.sysenter_cs = __KERNEL_CS;
18546 load_sp0(tss, &current->thread);
18547@@ -208,6 +209,13 @@ int sys_vm86old(struct pt_regs *regs)
18548 struct task_struct *tsk;
18549 int tmp, ret = -EPERM;
18550
18551+#ifdef CONFIG_GRKERNSEC_VM86
18552+ if (!capable(CAP_SYS_RAWIO)) {
18553+ gr_handle_vm86();
18554+ goto out;
18555+ }
18556+#endif
18557+
18558 tsk = current;
18559 if (tsk->thread.saved_sp0)
18560 goto out;
18561@@ -238,6 +246,14 @@ int sys_vm86(struct pt_regs *regs)
18562 int tmp, ret;
18563 struct vm86plus_struct __user *v86;
18564
18565+#ifdef CONFIG_GRKERNSEC_VM86
18566+ if (!capable(CAP_SYS_RAWIO)) {
18567+ gr_handle_vm86();
18568+ ret = -EPERM;
18569+ goto out;
18570+ }
18571+#endif
18572+
18573 tsk = current;
18574 switch (regs->bx) {
18575 case VM86_REQUEST_IRQ:
18576@@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
18577 tsk->thread.saved_fs = info->regs32->fs;
18578 tsk->thread.saved_gs = get_user_gs(info->regs32);
18579
18580- tss = &per_cpu(init_tss, get_cpu());
18581+ tss = init_tss + get_cpu();
18582 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
18583 if (cpu_has_sep)
18584 tsk->thread.sysenter_cs = 0;
18585@@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
18586 goto cannot_handle;
18587 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
18588 goto cannot_handle;
18589- intr_ptr = (unsigned long __user *) (i << 2);
18590+ intr_ptr = (__force unsigned long __user *) (i << 2);
18591 if (get_user(segoffs, intr_ptr))
18592 goto cannot_handle;
18593 if ((segoffs >> 16) == BIOSSEG)
18594diff -urNp linux-2.6.32.48/arch/x86/kernel/vmi_32.c linux-2.6.32.48/arch/x86/kernel/vmi_32.c
18595--- linux-2.6.32.48/arch/x86/kernel/vmi_32.c 2011-11-08 19:02:43.000000000 -0500
18596+++ linux-2.6.32.48/arch/x86/kernel/vmi_32.c 2011-11-15 19:59:43.000000000 -0500
18597@@ -44,12 +44,17 @@ typedef u32 __attribute__((regparm(1)))
18598 typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
18599
18600 #define call_vrom_func(rom,func) \
18601- (((VROMFUNC *)(rom->func))())
18602+ (((VROMFUNC *)(ktva_ktla(rom.func)))())
18603
18604 #define call_vrom_long_func(rom,func,arg) \
18605- (((VROMLONGFUNC *)(rom->func)) (arg))
18606+({\
18607+ u64 __reloc = ((VROMLONGFUNC *)(ktva_ktla(rom.func))) (arg);\
18608+ struct vmi_relocation_info *const __rel = (struct vmi_relocation_info *)&__reloc;\
18609+ __rel->eip = (unsigned char *)ktva_ktla((unsigned long)__rel->eip);\
18610+ __reloc;\
18611+})
18612
18613-static struct vrom_header *vmi_rom;
18614+static struct vrom_header vmi_rom __attribute((__section__(".vmi.rom"), __aligned__(PAGE_SIZE)));
18615 static int disable_pge;
18616 static int disable_pse;
18617 static int disable_sep;
18618@@ -76,10 +81,10 @@ static struct {
18619 void (*set_initial_ap_state)(int, int);
18620 void (*halt)(void);
18621 void (*set_lazy_mode)(int mode);
18622-} vmi_ops;
18623+} __no_const vmi_ops __read_only;
18624
18625 /* Cached VMI operations */
18626-struct vmi_timer_ops vmi_timer_ops;
18627+struct vmi_timer_ops vmi_timer_ops __read_only;
18628
18629 /*
18630 * VMI patching routines.
18631@@ -94,7 +99,7 @@ struct vmi_timer_ops vmi_timer_ops;
18632 static inline void patch_offset(void *insnbuf,
18633 unsigned long ip, unsigned long dest)
18634 {
18635- *(unsigned long *)(insnbuf+1) = dest-ip-5;
18636+ *(unsigned long *)(insnbuf+1) = dest-ip-5;
18637 }
18638
18639 static unsigned patch_internal(int call, unsigned len, void *insnbuf,
18640@@ -102,6 +107,7 @@ static unsigned patch_internal(int call,
18641 {
18642 u64 reloc;
18643 struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
18644+
18645 reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
18646 switch(rel->type) {
18647 case VMI_RELOCATION_CALL_REL:
18648@@ -404,13 +410,13 @@ static void vmi_set_pud(pud_t *pudp, pud
18649
18650 static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
18651 {
18652- const pte_t pte = { .pte = 0 };
18653+ const pte_t pte = __pte(0ULL);
18654 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
18655 }
18656
18657 static void vmi_pmd_clear(pmd_t *pmd)
18658 {
18659- const pte_t pte = { .pte = 0 };
18660+ const pte_t pte = __pte(0ULL);
18661 vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
18662 }
18663 #endif
18664@@ -438,10 +444,10 @@ vmi_startup_ipi_hook(int phys_apicid, un
18665 ap.ss = __KERNEL_DS;
18666 ap.esp = (unsigned long) start_esp;
18667
18668- ap.ds = __USER_DS;
18669- ap.es = __USER_DS;
18670+ ap.ds = __KERNEL_DS;
18671+ ap.es = __KERNEL_DS;
18672 ap.fs = __KERNEL_PERCPU;
18673- ap.gs = __KERNEL_STACK_CANARY;
18674+ savesegment(gs, ap.gs);
18675
18676 ap.eflags = 0;
18677
18678@@ -486,6 +492,18 @@ static void vmi_leave_lazy_mmu(void)
18679 paravirt_leave_lazy_mmu();
18680 }
18681
18682+#ifdef CONFIG_PAX_KERNEXEC
18683+static unsigned long vmi_pax_open_kernel(void)
18684+{
18685+ return 0;
18686+}
18687+
18688+static unsigned long vmi_pax_close_kernel(void)
18689+{
18690+ return 0;
18691+}
18692+#endif
18693+
18694 static inline int __init check_vmi_rom(struct vrom_header *rom)
18695 {
18696 struct pci_header *pci;
18697@@ -498,6 +516,10 @@ static inline int __init check_vmi_rom(s
18698 return 0;
18699 if (rom->vrom_signature != VMI_SIGNATURE)
18700 return 0;
18701+ if (rom->rom_length * 512 > sizeof(*rom)) {
18702+ printk(KERN_WARNING "PAX: VMI: ROM size too big: %x\n", rom->rom_length * 512);
18703+ return 0;
18704+ }
18705 if (rom->api_version_maj != VMI_API_REV_MAJOR ||
18706 rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
18707 printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
18708@@ -562,7 +584,7 @@ static inline int __init probe_vmi_rom(v
18709 struct vrom_header *romstart;
18710 romstart = (struct vrom_header *)isa_bus_to_virt(base);
18711 if (check_vmi_rom(romstart)) {
18712- vmi_rom = romstart;
18713+ vmi_rom = *romstart;
18714 return 1;
18715 }
18716 }
18717@@ -836,6 +858,11 @@ static inline int __init activate_vmi(vo
18718
18719 para_fill(pv_irq_ops.safe_halt, Halt);
18720
18721+#ifdef CONFIG_PAX_KERNEXEC
18722+ pv_mmu_ops.pax_open_kernel = vmi_pax_open_kernel;
18723+ pv_mmu_ops.pax_close_kernel = vmi_pax_close_kernel;
18724+#endif
18725+
18726 /*
18727 * Alternative instruction rewriting doesn't happen soon enough
18728 * to convert VMI_IRET to a call instead of a jump; so we have
18729@@ -853,16 +880,16 @@ static inline int __init activate_vmi(vo
18730
18731 void __init vmi_init(void)
18732 {
18733- if (!vmi_rom)
18734+ if (!vmi_rom.rom_signature)
18735 probe_vmi_rom();
18736 else
18737- check_vmi_rom(vmi_rom);
18738+ check_vmi_rom(&vmi_rom);
18739
18740 /* In case probing for or validating the ROM failed, basil */
18741- if (!vmi_rom)
18742+ if (!vmi_rom.rom_signature)
18743 return;
18744
18745- reserve_top_address(-vmi_rom->virtual_top);
18746+ reserve_top_address(-vmi_rom.virtual_top);
18747
18748 #ifdef CONFIG_X86_IO_APIC
18749 /* This is virtual hardware; timer routing is wired correctly */
18750@@ -874,7 +901,7 @@ void __init vmi_activate(void)
18751 {
18752 unsigned long flags;
18753
18754- if (!vmi_rom)
18755+ if (!vmi_rom.rom_signature)
18756 return;
18757
18758 local_irq_save(flags);
18759diff -urNp linux-2.6.32.48/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.48/arch/x86/kernel/vmlinux.lds.S
18760--- linux-2.6.32.48/arch/x86/kernel/vmlinux.lds.S 2011-11-08 19:02:43.000000000 -0500
18761+++ linux-2.6.32.48/arch/x86/kernel/vmlinux.lds.S 2011-11-15 19:59:43.000000000 -0500
18762@@ -26,6 +26,13 @@
18763 #include <asm/page_types.h>
18764 #include <asm/cache.h>
18765 #include <asm/boot.h>
18766+#include <asm/segment.h>
18767+
18768+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18769+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
18770+#else
18771+#define __KERNEL_TEXT_OFFSET 0
18772+#endif
18773
18774 #undef i386 /* in case the preprocessor is a 32bit one */
18775
18776@@ -34,40 +41,53 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONF
18777 #ifdef CONFIG_X86_32
18778 OUTPUT_ARCH(i386)
18779 ENTRY(phys_startup_32)
18780-jiffies = jiffies_64;
18781 #else
18782 OUTPUT_ARCH(i386:x86-64)
18783 ENTRY(phys_startup_64)
18784-jiffies_64 = jiffies;
18785 #endif
18786
18787 PHDRS {
18788 text PT_LOAD FLAGS(5); /* R_E */
18789- data PT_LOAD FLAGS(7); /* RWE */
18790+#ifdef CONFIG_X86_32
18791+ module PT_LOAD FLAGS(5); /* R_E */
18792+#endif
18793+#ifdef CONFIG_XEN
18794+ rodata PT_LOAD FLAGS(5); /* R_E */
18795+#else
18796+ rodata PT_LOAD FLAGS(4); /* R__ */
18797+#endif
18798+ data PT_LOAD FLAGS(6); /* RW_ */
18799 #ifdef CONFIG_X86_64
18800 user PT_LOAD FLAGS(5); /* R_E */
18801+#endif
18802+ init.begin PT_LOAD FLAGS(6); /* RW_ */
18803 #ifdef CONFIG_SMP
18804 percpu PT_LOAD FLAGS(6); /* RW_ */
18805 #endif
18806+ text.init PT_LOAD FLAGS(5); /* R_E */
18807+ text.exit PT_LOAD FLAGS(5); /* R_E */
18808 init PT_LOAD FLAGS(7); /* RWE */
18809-#endif
18810 note PT_NOTE FLAGS(0); /* ___ */
18811 }
18812
18813 SECTIONS
18814 {
18815 #ifdef CONFIG_X86_32
18816- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
18817- phys_startup_32 = startup_32 - LOAD_OFFSET;
18818+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
18819 #else
18820- . = __START_KERNEL;
18821- phys_startup_64 = startup_64 - LOAD_OFFSET;
18822+ . = __START_KERNEL;
18823 #endif
18824
18825 /* Text and read-only data */
18826- .text : AT(ADDR(.text) - LOAD_OFFSET) {
18827- _text = .;
18828+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
18829 /* bootstrapping code */
18830+#ifdef CONFIG_X86_32
18831+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
18832+#else
18833+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
18834+#endif
18835+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
18836+ _text = .;
18837 HEAD_TEXT
18838 #ifdef CONFIG_X86_32
18839 . = ALIGN(PAGE_SIZE);
18840@@ -82,28 +102,71 @@ SECTIONS
18841 IRQENTRY_TEXT
18842 *(.fixup)
18843 *(.gnu.warning)
18844- /* End of text section */
18845- _etext = .;
18846 } :text = 0x9090
18847
18848- NOTES :text :note
18849+ . += __KERNEL_TEXT_OFFSET;
18850+
18851+#ifdef CONFIG_X86_32
18852+ . = ALIGN(PAGE_SIZE);
18853+ .vmi.rom : AT(ADDR(.vmi.rom) - LOAD_OFFSET) {
18854+ *(.vmi.rom)
18855+ } :module
18856+
18857+ . = ALIGN(PAGE_SIZE);
18858+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
18859+
18860+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
18861+ MODULES_EXEC_VADDR = .;
18862+ BYTE(0)
18863+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
18864+ . = ALIGN(HPAGE_SIZE);
18865+ MODULES_EXEC_END = . - 1;
18866+#endif
18867+
18868+ } :module
18869+#endif
18870
18871- EXCEPTION_TABLE(16) :text = 0x9090
18872+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
18873+ /* End of text section */
18874+ _etext = . - __KERNEL_TEXT_OFFSET;
18875+ }
18876+
18877+#ifdef CONFIG_X86_32
18878+ . = ALIGN(PAGE_SIZE);
18879+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
18880+ *(.idt)
18881+ . = ALIGN(PAGE_SIZE);
18882+ *(.empty_zero_page)
18883+ *(.swapper_pg_fixmap)
18884+ *(.swapper_pg_pmd)
18885+ *(.swapper_pg_dir)
18886+ *(.trampoline_pg_dir)
18887+ } :rodata
18888+#endif
18889+
18890+ . = ALIGN(PAGE_SIZE);
18891+ NOTES :rodata :note
18892+
18893+ EXCEPTION_TABLE(16) :rodata
18894
18895 RO_DATA(PAGE_SIZE)
18896
18897 /* Data */
18898 .data : AT(ADDR(.data) - LOAD_OFFSET) {
18899+
18900+#ifdef CONFIG_PAX_KERNEXEC
18901+ . = ALIGN(HPAGE_SIZE);
18902+#else
18903+ . = ALIGN(PAGE_SIZE);
18904+#endif
18905+
18906 /* Start of data section */
18907 _sdata = .;
18908
18909 /* init_task */
18910 INIT_TASK_DATA(THREAD_SIZE)
18911
18912-#ifdef CONFIG_X86_32
18913- /* 32 bit has nosave before _edata */
18914 NOSAVE_DATA
18915-#endif
18916
18917 PAGE_ALIGNED_DATA(PAGE_SIZE)
18918
18919@@ -112,6 +175,8 @@ SECTIONS
18920 DATA_DATA
18921 CONSTRUCTORS
18922
18923+ jiffies = jiffies_64;
18924+
18925 /* rarely changed data like cpu maps */
18926 READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES)
18927
18928@@ -166,12 +231,6 @@ SECTIONS
18929 }
18930 vgetcpu_mode = VVIRT(.vgetcpu_mode);
18931
18932- . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
18933- .jiffies : AT(VLOAD(.jiffies)) {
18934- *(.jiffies)
18935- }
18936- jiffies = VVIRT(.jiffies);
18937-
18938 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
18939 *(.vsyscall_3)
18940 }
18941@@ -187,12 +246,19 @@ SECTIONS
18942 #endif /* CONFIG_X86_64 */
18943
18944 /* Init code and data - will be freed after init */
18945- . = ALIGN(PAGE_SIZE);
18946 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
18947+ BYTE(0)
18948+
18949+#ifdef CONFIG_PAX_KERNEXEC
18950+ . = ALIGN(HPAGE_SIZE);
18951+#else
18952+ . = ALIGN(PAGE_SIZE);
18953+#endif
18954+
18955 __init_begin = .; /* paired with __init_end */
18956- }
18957+ } :init.begin
18958
18959-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
18960+#ifdef CONFIG_SMP
18961 /*
18962 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
18963 * output PHDR, so the next output section - .init.text - should
18964@@ -201,12 +267,27 @@ SECTIONS
18965 PERCPU_VADDR(0, :percpu)
18966 #endif
18967
18968- INIT_TEXT_SECTION(PAGE_SIZE)
18969-#ifdef CONFIG_X86_64
18970- :init
18971-#endif
18972+ . = ALIGN(PAGE_SIZE);
18973+ init_begin = .;
18974+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
18975+ VMLINUX_SYMBOL(_sinittext) = .;
18976+ INIT_TEXT
18977+ VMLINUX_SYMBOL(_einittext) = .;
18978+ . = ALIGN(PAGE_SIZE);
18979+ } :text.init
18980
18981- INIT_DATA_SECTION(16)
18982+ /*
18983+ * .exit.text is discard at runtime, not link time, to deal with
18984+ * references from .altinstructions and .eh_frame
18985+ */
18986+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
18987+ EXIT_TEXT
18988+ . = ALIGN(16);
18989+ } :text.exit
18990+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
18991+
18992+ . = ALIGN(PAGE_SIZE);
18993+ INIT_DATA_SECTION(16) :init
18994
18995 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
18996 __x86_cpu_dev_start = .;
18997@@ -232,19 +313,11 @@ SECTIONS
18998 *(.altinstr_replacement)
18999 }
19000
19001- /*
19002- * .exit.text is discard at runtime, not link time, to deal with
19003- * references from .altinstructions and .eh_frame
19004- */
19005- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
19006- EXIT_TEXT
19007- }
19008-
19009 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
19010 EXIT_DATA
19011 }
19012
19013-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
19014+#ifndef CONFIG_SMP
19015 PERCPU(PAGE_SIZE)
19016 #endif
19017
19018@@ -267,12 +340,6 @@ SECTIONS
19019 . = ALIGN(PAGE_SIZE);
19020 }
19021
19022-#ifdef CONFIG_X86_64
19023- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
19024- NOSAVE_DATA
19025- }
19026-#endif
19027-
19028 /* BSS */
19029 . = ALIGN(PAGE_SIZE);
19030 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
19031@@ -288,6 +355,7 @@ SECTIONS
19032 __brk_base = .;
19033 . += 64 * 1024; /* 64k alignment slop space */
19034 *(.brk_reservation) /* areas brk users have reserved */
19035+ . = ALIGN(HPAGE_SIZE);
19036 __brk_limit = .;
19037 }
19038
19039@@ -316,13 +384,12 @@ SECTIONS
19040 * for the boot processor.
19041 */
19042 #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
19043-INIT_PER_CPU(gdt_page);
19044 INIT_PER_CPU(irq_stack_union);
19045
19046 /*
19047 * Build-time check on the image size:
19048 */
19049-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
19050+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
19051 "kernel image bigger than KERNEL_IMAGE_SIZE");
19052
19053 #ifdef CONFIG_SMP
19054diff -urNp linux-2.6.32.48/arch/x86/kernel/vsyscall_64.c linux-2.6.32.48/arch/x86/kernel/vsyscall_64.c
19055--- linux-2.6.32.48/arch/x86/kernel/vsyscall_64.c 2011-11-08 19:02:43.000000000 -0500
19056+++ linux-2.6.32.48/arch/x86/kernel/vsyscall_64.c 2011-11-15 19:59:43.000000000 -0500
19057@@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wa
19058
19059 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
19060 /* copy vsyscall data */
19061+ strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
19062 vsyscall_gtod_data.clock.vread = clock->vread;
19063 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
19064 vsyscall_gtod_data.clock.mask = clock->mask;
19065@@ -203,7 +204,7 @@ vgetcpu(unsigned *cpu, unsigned *node, s
19066 We do this here because otherwise user space would do it on
19067 its own in a likely inferior way (no access to jiffies).
19068 If you don't like it pass NULL. */
19069- if (tcache && tcache->blob[0] == (j = __jiffies)) {
19070+ if (tcache && tcache->blob[0] == (j = jiffies)) {
19071 p = tcache->blob[1];
19072 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
19073 /* Load per CPU data from RDTSCP */
19074diff -urNp linux-2.6.32.48/arch/x86/kernel/x8664_ksyms_64.c linux-2.6.32.48/arch/x86/kernel/x8664_ksyms_64.c
19075--- linux-2.6.32.48/arch/x86/kernel/x8664_ksyms_64.c 2011-11-08 19:02:43.000000000 -0500
19076+++ linux-2.6.32.48/arch/x86/kernel/x8664_ksyms_64.c 2011-11-15 19:59:43.000000000 -0500
19077@@ -30,8 +30,6 @@ EXPORT_SYMBOL(__put_user_8);
19078
19079 EXPORT_SYMBOL(copy_user_generic);
19080 EXPORT_SYMBOL(__copy_user_nocache);
19081-EXPORT_SYMBOL(copy_from_user);
19082-EXPORT_SYMBOL(copy_to_user);
19083 EXPORT_SYMBOL(__copy_from_user_inatomic);
19084
19085 EXPORT_SYMBOL(copy_page);
19086diff -urNp linux-2.6.32.48/arch/x86/kernel/xsave.c linux-2.6.32.48/arch/x86/kernel/xsave.c
19087--- linux-2.6.32.48/arch/x86/kernel/xsave.c 2011-11-08 19:02:43.000000000 -0500
19088+++ linux-2.6.32.48/arch/x86/kernel/xsave.c 2011-11-15 19:59:43.000000000 -0500
19089@@ -54,7 +54,7 @@ int check_for_xstate(struct i387_fxsave_
19090 fx_sw_user->xstate_size > fx_sw_user->extended_size)
19091 return -1;
19092
19093- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
19094+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
19095 fx_sw_user->extended_size -
19096 FP_XSTATE_MAGIC2_SIZE));
19097 /*
19098@@ -196,7 +196,7 @@ fx_only:
19099 * the other extended state.
19100 */
19101 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
19102- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
19103+ return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
19104 }
19105
19106 /*
19107@@ -228,7 +228,7 @@ int restore_i387_xstate(void __user *buf
19108 if (task_thread_info(tsk)->status & TS_XSAVE)
19109 err = restore_user_xstate(buf);
19110 else
19111- err = fxrstor_checking((__force struct i387_fxsave_struct *)
19112+ err = fxrstor_checking((struct i387_fxsave_struct __user *)
19113 buf);
19114 if (unlikely(err)) {
19115 /*
19116diff -urNp linux-2.6.32.48/arch/x86/kvm/emulate.c linux-2.6.32.48/arch/x86/kvm/emulate.c
19117--- linux-2.6.32.48/arch/x86/kvm/emulate.c 2011-11-08 19:02:43.000000000 -0500
19118+++ linux-2.6.32.48/arch/x86/kvm/emulate.c 2011-11-15 19:59:43.000000000 -0500
19119@@ -81,8 +81,8 @@
19120 #define Src2CL (1<<29)
19121 #define Src2ImmByte (2<<29)
19122 #define Src2One (3<<29)
19123-#define Src2Imm16 (4<<29)
19124-#define Src2Mask (7<<29)
19125+#define Src2Imm16 (4U<<29)
19126+#define Src2Mask (7U<<29)
19127
19128 enum {
19129 Group1_80, Group1_81, Group1_82, Group1_83,
19130@@ -411,6 +411,7 @@ static u32 group2_table[] = {
19131
19132 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
19133 do { \
19134+ unsigned long _tmp; \
19135 __asm__ __volatile__ ( \
19136 _PRE_EFLAGS("0", "4", "2") \
19137 _op _suffix " %"_x"3,%1; " \
19138@@ -424,8 +425,6 @@ static u32 group2_table[] = {
19139 /* Raw emulation: instruction has two explicit operands. */
19140 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
19141 do { \
19142- unsigned long _tmp; \
19143- \
19144 switch ((_dst).bytes) { \
19145 case 2: \
19146 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
19147@@ -441,7 +440,6 @@ static u32 group2_table[] = {
19148
19149 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
19150 do { \
19151- unsigned long _tmp; \
19152 switch ((_dst).bytes) { \
19153 case 1: \
19154 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
19155diff -urNp linux-2.6.32.48/arch/x86/kvm/lapic.c linux-2.6.32.48/arch/x86/kvm/lapic.c
19156--- linux-2.6.32.48/arch/x86/kvm/lapic.c 2011-11-08 19:02:43.000000000 -0500
19157+++ linux-2.6.32.48/arch/x86/kvm/lapic.c 2011-11-15 19:59:43.000000000 -0500
19158@@ -52,7 +52,7 @@
19159 #define APIC_BUS_CYCLE_NS 1
19160
19161 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
19162-#define apic_debug(fmt, arg...)
19163+#define apic_debug(fmt, arg...) do {} while (0)
19164
19165 #define APIC_LVT_NUM 6
19166 /* 14 is the version for Xeon and Pentium 8.4.8*/
19167diff -urNp linux-2.6.32.48/arch/x86/kvm/paging_tmpl.h linux-2.6.32.48/arch/x86/kvm/paging_tmpl.h
19168--- linux-2.6.32.48/arch/x86/kvm/paging_tmpl.h 2011-11-08 19:02:43.000000000 -0500
19169+++ linux-2.6.32.48/arch/x86/kvm/paging_tmpl.h 2011-11-15 19:59:43.000000000 -0500
19170@@ -416,6 +416,8 @@ static int FNAME(page_fault)(struct kvm_
19171 int level = PT_PAGE_TABLE_LEVEL;
19172 unsigned long mmu_seq;
19173
19174+ pax_track_stack();
19175+
19176 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
19177 kvm_mmu_audit(vcpu, "pre page fault");
19178
19179diff -urNp linux-2.6.32.48/arch/x86/kvm/svm.c linux-2.6.32.48/arch/x86/kvm/svm.c
19180--- linux-2.6.32.48/arch/x86/kvm/svm.c 2011-11-08 19:02:43.000000000 -0500
19181+++ linux-2.6.32.48/arch/x86/kvm/svm.c 2011-11-15 19:59:43.000000000 -0500
19182@@ -2486,7 +2486,11 @@ static void reload_tss(struct kvm_vcpu *
19183 int cpu = raw_smp_processor_id();
19184
19185 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
19186+
19187+ pax_open_kernel();
19188 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
19189+ pax_close_kernel();
19190+
19191 load_TR_desc();
19192 }
19193
19194@@ -2947,7 +2951,7 @@ static bool svm_gb_page_enable(void)
19195 return true;
19196 }
19197
19198-static struct kvm_x86_ops svm_x86_ops = {
19199+static const struct kvm_x86_ops svm_x86_ops = {
19200 .cpu_has_kvm_support = has_svm,
19201 .disabled_by_bios = is_disabled,
19202 .hardware_setup = svm_hardware_setup,
19203diff -urNp linux-2.6.32.48/arch/x86/kvm/vmx.c linux-2.6.32.48/arch/x86/kvm/vmx.c
19204--- linux-2.6.32.48/arch/x86/kvm/vmx.c 2011-11-08 19:02:43.000000000 -0500
19205+++ linux-2.6.32.48/arch/x86/kvm/vmx.c 2011-11-15 19:59:43.000000000 -0500
19206@@ -570,7 +570,11 @@ static void reload_tss(void)
19207
19208 kvm_get_gdt(&gdt);
19209 descs = (void *)gdt.base;
19210+
19211+ pax_open_kernel();
19212 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
19213+ pax_close_kernel();
19214+
19215 load_TR_desc();
19216 }
19217
19218@@ -1410,8 +1414,11 @@ static __init int hardware_setup(void)
19219 if (!cpu_has_vmx_flexpriority())
19220 flexpriority_enabled = 0;
19221
19222- if (!cpu_has_vmx_tpr_shadow())
19223- kvm_x86_ops->update_cr8_intercept = NULL;
19224+ if (!cpu_has_vmx_tpr_shadow()) {
19225+ pax_open_kernel();
19226+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
19227+ pax_close_kernel();
19228+ }
19229
19230 if (enable_ept && !cpu_has_vmx_ept_2m_page())
19231 kvm_disable_largepages();
19232@@ -2362,7 +2369,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
19233 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
19234
19235 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
19236- vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
19237+ vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
19238 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
19239 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
19240 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
19241@@ -3718,6 +3725,12 @@ static void vmx_vcpu_run(struct kvm_vcpu
19242 "jmp .Lkvm_vmx_return \n\t"
19243 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
19244 ".Lkvm_vmx_return: "
19245+
19246+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19247+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
19248+ ".Lkvm_vmx_return2: "
19249+#endif
19250+
19251 /* Save guest registers, load host registers, keep flags */
19252 "xchg %0, (%%"R"sp) \n\t"
19253 "mov %%"R"ax, %c[rax](%0) \n\t"
19254@@ -3764,8 +3777,13 @@ static void vmx_vcpu_run(struct kvm_vcpu
19255 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
19256 #endif
19257 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
19258+
19259+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19260+ ,[cs]"i"(__KERNEL_CS)
19261+#endif
19262+
19263 : "cc", "memory"
19264- , R"bx", R"di", R"si"
19265+ , R"ax", R"bx", R"di", R"si"
19266 #ifdef CONFIG_X86_64
19267 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
19268 #endif
19269@@ -3782,7 +3800,16 @@ static void vmx_vcpu_run(struct kvm_vcpu
19270 if (vmx->rmode.irq.pending)
19271 fixup_rmode_irq(vmx);
19272
19273- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
19274+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
19275+
19276+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19277+ loadsegment(fs, __KERNEL_PERCPU);
19278+#endif
19279+
19280+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19281+ __set_fs(current_thread_info()->addr_limit);
19282+#endif
19283+
19284 vmx->launched = 1;
19285
19286 vmx_complete_interrupts(vmx);
19287@@ -3957,7 +3984,7 @@ static bool vmx_gb_page_enable(void)
19288 return false;
19289 }
19290
19291-static struct kvm_x86_ops vmx_x86_ops = {
19292+static const struct kvm_x86_ops vmx_x86_ops = {
19293 .cpu_has_kvm_support = cpu_has_kvm_support,
19294 .disabled_by_bios = vmx_disabled_by_bios,
19295 .hardware_setup = hardware_setup,
19296diff -urNp linux-2.6.32.48/arch/x86/kvm/x86.c linux-2.6.32.48/arch/x86/kvm/x86.c
19297--- linux-2.6.32.48/arch/x86/kvm/x86.c 2011-11-08 19:02:43.000000000 -0500
19298+++ linux-2.6.32.48/arch/x86/kvm/x86.c 2011-11-15 19:59:43.000000000 -0500
19299@@ -82,7 +82,7 @@ static void update_cr8_intercept(struct
19300 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
19301 struct kvm_cpuid_entry2 __user *entries);
19302
19303-struct kvm_x86_ops *kvm_x86_ops;
19304+const struct kvm_x86_ops *kvm_x86_ops;
19305 EXPORT_SYMBOL_GPL(kvm_x86_ops);
19306
19307 int ignore_msrs = 0;
19308@@ -1430,15 +1430,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
19309 struct kvm_cpuid2 *cpuid,
19310 struct kvm_cpuid_entry2 __user *entries)
19311 {
19312- int r;
19313+ int r, i;
19314
19315 r = -E2BIG;
19316 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
19317 goto out;
19318 r = -EFAULT;
19319- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
19320- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
19321+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
19322 goto out;
19323+ for (i = 0; i < cpuid->nent; ++i) {
19324+ struct kvm_cpuid_entry2 cpuid_entry;
19325+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
19326+ goto out;
19327+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
19328+ }
19329 vcpu->arch.cpuid_nent = cpuid->nent;
19330 kvm_apic_set_version(vcpu);
19331 return 0;
19332@@ -1451,16 +1456,20 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
19333 struct kvm_cpuid2 *cpuid,
19334 struct kvm_cpuid_entry2 __user *entries)
19335 {
19336- int r;
19337+ int r, i;
19338
19339 vcpu_load(vcpu);
19340 r = -E2BIG;
19341 if (cpuid->nent < vcpu->arch.cpuid_nent)
19342 goto out;
19343 r = -EFAULT;
19344- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
19345- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
19346+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
19347 goto out;
19348+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
19349+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
19350+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
19351+ goto out;
19352+ }
19353 return 0;
19354
19355 out:
19356@@ -1678,7 +1687,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
19357 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
19358 struct kvm_interrupt *irq)
19359 {
19360- if (irq->irq < 0 || irq->irq >= 256)
19361+ if (irq->irq >= 256)
19362 return -EINVAL;
19363 if (irqchip_in_kernel(vcpu->kvm))
19364 return -ENXIO;
19365@@ -3260,10 +3269,10 @@ static struct notifier_block kvmclock_cp
19366 .notifier_call = kvmclock_cpufreq_notifier
19367 };
19368
19369-int kvm_arch_init(void *opaque)
19370+int kvm_arch_init(const void *opaque)
19371 {
19372 int r, cpu;
19373- struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
19374+ const struct kvm_x86_ops *ops = (const struct kvm_x86_ops *)opaque;
19375
19376 if (kvm_x86_ops) {
19377 printk(KERN_ERR "kvm: already loaded the other module\n");
19378diff -urNp linux-2.6.32.48/arch/x86/lguest/boot.c linux-2.6.32.48/arch/x86/lguest/boot.c
19379--- linux-2.6.32.48/arch/x86/lguest/boot.c 2011-11-08 19:02:43.000000000 -0500
19380+++ linux-2.6.32.48/arch/x86/lguest/boot.c 2011-11-15 19:59:43.000000000 -0500
19381@@ -1172,9 +1172,10 @@ static __init int early_put_chars(u32 vt
19382 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
19383 * Launcher to reboot us.
19384 */
19385-static void lguest_restart(char *reason)
19386+static __noreturn void lguest_restart(char *reason)
19387 {
19388 kvm_hypercall2(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART);
19389+ BUG();
19390 }
19391
19392 /*G:050
19393diff -urNp linux-2.6.32.48/arch/x86/lib/atomic64_32.c linux-2.6.32.48/arch/x86/lib/atomic64_32.c
19394--- linux-2.6.32.48/arch/x86/lib/atomic64_32.c 2011-11-08 19:02:43.000000000 -0500
19395+++ linux-2.6.32.48/arch/x86/lib/atomic64_32.c 2011-11-15 19:59:43.000000000 -0500
19396@@ -25,6 +25,12 @@ u64 atomic64_cmpxchg(atomic64_t *ptr, u6
19397 }
19398 EXPORT_SYMBOL(atomic64_cmpxchg);
19399
19400+u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val)
19401+{
19402+ return cmpxchg8b(&ptr->counter, old_val, new_val);
19403+}
19404+EXPORT_SYMBOL(atomic64_cmpxchg_unchecked);
19405+
19406 /**
19407 * atomic64_xchg - xchg atomic64 variable
19408 * @ptr: pointer to type atomic64_t
19409@@ -56,6 +62,36 @@ u64 atomic64_xchg(atomic64_t *ptr, u64 n
19410 EXPORT_SYMBOL(atomic64_xchg);
19411
19412 /**
19413+ * atomic64_xchg_unchecked - xchg atomic64 variable
19414+ * @ptr: pointer to type atomic64_unchecked_t
19415+ * @new_val: value to assign
19416+ *
19417+ * Atomically xchgs the value of @ptr to @new_val and returns
19418+ * the old value.
19419+ */
19420+u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
19421+{
19422+ /*
19423+ * Try first with a (possibly incorrect) assumption about
19424+ * what we have there. We'll do two loops most likely,
19425+ * but we'll get an ownership MESI transaction straight away
19426+ * instead of a read transaction followed by a
19427+ * flush-for-ownership transaction:
19428+ */
19429+ u64 old_val, real_val = 0;
19430+
19431+ do {
19432+ old_val = real_val;
19433+
19434+ real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
19435+
19436+ } while (real_val != old_val);
19437+
19438+ return old_val;
19439+}
19440+EXPORT_SYMBOL(atomic64_xchg_unchecked);
19441+
19442+/**
19443 * atomic64_set - set atomic64 variable
19444 * @ptr: pointer to type atomic64_t
19445 * @new_val: value to assign
19446@@ -69,7 +105,19 @@ void atomic64_set(atomic64_t *ptr, u64 n
19447 EXPORT_SYMBOL(atomic64_set);
19448
19449 /**
19450-EXPORT_SYMBOL(atomic64_read);
19451+ * atomic64_unchecked_set - set atomic64 variable
19452+ * @ptr: pointer to type atomic64_unchecked_t
19453+ * @new_val: value to assign
19454+ *
19455+ * Atomically sets the value of @ptr to @new_val.
19456+ */
19457+void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
19458+{
19459+ atomic64_xchg_unchecked(ptr, new_val);
19460+}
19461+EXPORT_SYMBOL(atomic64_set_unchecked);
19462+
19463+/**
19464 * atomic64_add_return - add and return
19465 * @delta: integer value to add
19466 * @ptr: pointer to type atomic64_t
19467@@ -99,24 +147,72 @@ noinline u64 atomic64_add_return(u64 del
19468 }
19469 EXPORT_SYMBOL(atomic64_add_return);
19470
19471+/**
19472+ * atomic64_add_return_unchecked - add and return
19473+ * @delta: integer value to add
19474+ * @ptr: pointer to type atomic64_unchecked_t
19475+ *
19476+ * Atomically adds @delta to @ptr and returns @delta + *@ptr
19477+ */
19478+noinline u64 atomic64_add_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
19479+{
19480+ /*
19481+ * Try first with a (possibly incorrect) assumption about
19482+ * what we have there. We'll do two loops most likely,
19483+ * but we'll get an ownership MESI transaction straight away
19484+ * instead of a read transaction followed by a
19485+ * flush-for-ownership transaction:
19486+ */
19487+ u64 old_val, new_val, real_val = 0;
19488+
19489+ do {
19490+ old_val = real_val;
19491+ new_val = old_val + delta;
19492+
19493+ real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
19494+
19495+ } while (real_val != old_val);
19496+
19497+ return new_val;
19498+}
19499+EXPORT_SYMBOL(atomic64_add_return_unchecked);
19500+
19501 u64 atomic64_sub_return(u64 delta, atomic64_t *ptr)
19502 {
19503 return atomic64_add_return(-delta, ptr);
19504 }
19505 EXPORT_SYMBOL(atomic64_sub_return);
19506
19507+u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
19508+{
19509+ return atomic64_add_return_unchecked(-delta, ptr);
19510+}
19511+EXPORT_SYMBOL(atomic64_sub_return_unchecked);
19512+
19513 u64 atomic64_inc_return(atomic64_t *ptr)
19514 {
19515 return atomic64_add_return(1, ptr);
19516 }
19517 EXPORT_SYMBOL(atomic64_inc_return);
19518
19519+u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr)
19520+{
19521+ return atomic64_add_return_unchecked(1, ptr);
19522+}
19523+EXPORT_SYMBOL(atomic64_inc_return_unchecked);
19524+
19525 u64 atomic64_dec_return(atomic64_t *ptr)
19526 {
19527 return atomic64_sub_return(1, ptr);
19528 }
19529 EXPORT_SYMBOL(atomic64_dec_return);
19530
19531+u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr)
19532+{
19533+ return atomic64_sub_return_unchecked(1, ptr);
19534+}
19535+EXPORT_SYMBOL(atomic64_dec_return_unchecked);
19536+
19537 /**
19538 * atomic64_add - add integer to atomic64 variable
19539 * @delta: integer value to add
19540@@ -131,6 +227,19 @@ void atomic64_add(u64 delta, atomic64_t
19541 EXPORT_SYMBOL(atomic64_add);
19542
19543 /**
19544+ * atomic64_add_unchecked - add integer to atomic64 variable
19545+ * @delta: integer value to add
19546+ * @ptr: pointer to type atomic64_unchecked_t
19547+ *
19548+ * Atomically adds @delta to @ptr.
19549+ */
19550+void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr)
19551+{
19552+ atomic64_add_return_unchecked(delta, ptr);
19553+}
19554+EXPORT_SYMBOL(atomic64_add_unchecked);
19555+
19556+/**
19557 * atomic64_sub - subtract the atomic64 variable
19558 * @delta: integer value to subtract
19559 * @ptr: pointer to type atomic64_t
19560@@ -144,6 +253,19 @@ void atomic64_sub(u64 delta, atomic64_t
19561 EXPORT_SYMBOL(atomic64_sub);
19562
19563 /**
19564+ * atomic64_sub_unchecked - subtract the atomic64 variable
19565+ * @delta: integer value to subtract
19566+ * @ptr: pointer to type atomic64_unchecked_t
19567+ *
19568+ * Atomically subtracts @delta from @ptr.
19569+ */
19570+void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr)
19571+{
19572+ atomic64_add_unchecked(-delta, ptr);
19573+}
19574+EXPORT_SYMBOL(atomic64_sub_unchecked);
19575+
19576+/**
19577 * atomic64_sub_and_test - subtract value from variable and test result
19578 * @delta: integer value to subtract
19579 * @ptr: pointer to type atomic64_t
19580@@ -173,6 +295,18 @@ void atomic64_inc(atomic64_t *ptr)
19581 EXPORT_SYMBOL(atomic64_inc);
19582
19583 /**
19584+ * atomic64_inc_unchecked - increment atomic64 variable
19585+ * @ptr: pointer to type atomic64_unchecked_t
19586+ *
19587+ * Atomically increments @ptr by 1.
19588+ */
19589+void atomic64_inc_unchecked(atomic64_unchecked_t *ptr)
19590+{
19591+ atomic64_add_unchecked(1, ptr);
19592+}
19593+EXPORT_SYMBOL(atomic64_inc_unchecked);
19594+
19595+/**
19596 * atomic64_dec - decrement atomic64 variable
19597 * @ptr: pointer to type atomic64_t
19598 *
19599@@ -185,6 +319,18 @@ void atomic64_dec(atomic64_t *ptr)
19600 EXPORT_SYMBOL(atomic64_dec);
19601
19602 /**
19603+ * atomic64_dec_unchecked - decrement atomic64 variable
19604+ * @ptr: pointer to type atomic64_unchecked_t
19605+ *
19606+ * Atomically decrements @ptr by 1.
19607+ */
19608+void atomic64_dec_unchecked(atomic64_unchecked_t *ptr)
19609+{
19610+ atomic64_sub_unchecked(1, ptr);
19611+}
19612+EXPORT_SYMBOL(atomic64_dec_unchecked);
19613+
19614+/**
19615 * atomic64_dec_and_test - decrement and test
19616 * @ptr: pointer to type atomic64_t
19617 *
19618diff -urNp linux-2.6.32.48/arch/x86/lib/checksum_32.S linux-2.6.32.48/arch/x86/lib/checksum_32.S
19619--- linux-2.6.32.48/arch/x86/lib/checksum_32.S 2011-11-08 19:02:43.000000000 -0500
19620+++ linux-2.6.32.48/arch/x86/lib/checksum_32.S 2011-11-15 19:59:43.000000000 -0500
19621@@ -28,7 +28,8 @@
19622 #include <linux/linkage.h>
19623 #include <asm/dwarf2.h>
19624 #include <asm/errno.h>
19625-
19626+#include <asm/segment.h>
19627+
19628 /*
19629 * computes a partial checksum, e.g. for TCP/UDP fragments
19630 */
19631@@ -304,9 +305,28 @@ unsigned int csum_partial_copy_generic (
19632
19633 #define ARGBASE 16
19634 #define FP 12
19635-
19636-ENTRY(csum_partial_copy_generic)
19637+
19638+ENTRY(csum_partial_copy_generic_to_user)
19639 CFI_STARTPROC
19640+
19641+#ifdef CONFIG_PAX_MEMORY_UDEREF
19642+ pushl %gs
19643+ CFI_ADJUST_CFA_OFFSET 4
19644+ popl %es
19645+ CFI_ADJUST_CFA_OFFSET -4
19646+ jmp csum_partial_copy_generic
19647+#endif
19648+
19649+ENTRY(csum_partial_copy_generic_from_user)
19650+
19651+#ifdef CONFIG_PAX_MEMORY_UDEREF
19652+ pushl %gs
19653+ CFI_ADJUST_CFA_OFFSET 4
19654+ popl %ds
19655+ CFI_ADJUST_CFA_OFFSET -4
19656+#endif
19657+
19658+ENTRY(csum_partial_copy_generic)
19659 subl $4,%esp
19660 CFI_ADJUST_CFA_OFFSET 4
19661 pushl %edi
19662@@ -331,7 +351,7 @@ ENTRY(csum_partial_copy_generic)
19663 jmp 4f
19664 SRC(1: movw (%esi), %bx )
19665 addl $2, %esi
19666-DST( movw %bx, (%edi) )
19667+DST( movw %bx, %es:(%edi) )
19668 addl $2, %edi
19669 addw %bx, %ax
19670 adcl $0, %eax
19671@@ -343,30 +363,30 @@ DST( movw %bx, (%edi) )
19672 SRC(1: movl (%esi), %ebx )
19673 SRC( movl 4(%esi), %edx )
19674 adcl %ebx, %eax
19675-DST( movl %ebx, (%edi) )
19676+DST( movl %ebx, %es:(%edi) )
19677 adcl %edx, %eax
19678-DST( movl %edx, 4(%edi) )
19679+DST( movl %edx, %es:4(%edi) )
19680
19681 SRC( movl 8(%esi), %ebx )
19682 SRC( movl 12(%esi), %edx )
19683 adcl %ebx, %eax
19684-DST( movl %ebx, 8(%edi) )
19685+DST( movl %ebx, %es:8(%edi) )
19686 adcl %edx, %eax
19687-DST( movl %edx, 12(%edi) )
19688+DST( movl %edx, %es:12(%edi) )
19689
19690 SRC( movl 16(%esi), %ebx )
19691 SRC( movl 20(%esi), %edx )
19692 adcl %ebx, %eax
19693-DST( movl %ebx, 16(%edi) )
19694+DST( movl %ebx, %es:16(%edi) )
19695 adcl %edx, %eax
19696-DST( movl %edx, 20(%edi) )
19697+DST( movl %edx, %es:20(%edi) )
19698
19699 SRC( movl 24(%esi), %ebx )
19700 SRC( movl 28(%esi), %edx )
19701 adcl %ebx, %eax
19702-DST( movl %ebx, 24(%edi) )
19703+DST( movl %ebx, %es:24(%edi) )
19704 adcl %edx, %eax
19705-DST( movl %edx, 28(%edi) )
19706+DST( movl %edx, %es:28(%edi) )
19707
19708 lea 32(%esi), %esi
19709 lea 32(%edi), %edi
19710@@ -380,7 +400,7 @@ DST( movl %edx, 28(%edi) )
19711 shrl $2, %edx # This clears CF
19712 SRC(3: movl (%esi), %ebx )
19713 adcl %ebx, %eax
19714-DST( movl %ebx, (%edi) )
19715+DST( movl %ebx, %es:(%edi) )
19716 lea 4(%esi), %esi
19717 lea 4(%edi), %edi
19718 dec %edx
19719@@ -392,12 +412,12 @@ DST( movl %ebx, (%edi) )
19720 jb 5f
19721 SRC( movw (%esi), %cx )
19722 leal 2(%esi), %esi
19723-DST( movw %cx, (%edi) )
19724+DST( movw %cx, %es:(%edi) )
19725 leal 2(%edi), %edi
19726 je 6f
19727 shll $16,%ecx
19728 SRC(5: movb (%esi), %cl )
19729-DST( movb %cl, (%edi) )
19730+DST( movb %cl, %es:(%edi) )
19731 6: addl %ecx, %eax
19732 adcl $0, %eax
19733 7:
19734@@ -408,7 +428,7 @@ DST( movb %cl, (%edi) )
19735
19736 6001:
19737 movl ARGBASE+20(%esp), %ebx # src_err_ptr
19738- movl $-EFAULT, (%ebx)
19739+ movl $-EFAULT, %ss:(%ebx)
19740
19741 # zero the complete destination - computing the rest
19742 # is too much work
19743@@ -421,11 +441,19 @@ DST( movb %cl, (%edi) )
19744
19745 6002:
19746 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19747- movl $-EFAULT,(%ebx)
19748+ movl $-EFAULT,%ss:(%ebx)
19749 jmp 5000b
19750
19751 .previous
19752
19753+ pushl %ss
19754+ CFI_ADJUST_CFA_OFFSET 4
19755+ popl %ds
19756+ CFI_ADJUST_CFA_OFFSET -4
19757+ pushl %ss
19758+ CFI_ADJUST_CFA_OFFSET 4
19759+ popl %es
19760+ CFI_ADJUST_CFA_OFFSET -4
19761 popl %ebx
19762 CFI_ADJUST_CFA_OFFSET -4
19763 CFI_RESTORE ebx
19764@@ -439,26 +467,47 @@ DST( movb %cl, (%edi) )
19765 CFI_ADJUST_CFA_OFFSET -4
19766 ret
19767 CFI_ENDPROC
19768-ENDPROC(csum_partial_copy_generic)
19769+ENDPROC(csum_partial_copy_generic_to_user)
19770
19771 #else
19772
19773 /* Version for PentiumII/PPro */
19774
19775 #define ROUND1(x) \
19776+ nop; nop; nop; \
19777 SRC(movl x(%esi), %ebx ) ; \
19778 addl %ebx, %eax ; \
19779- DST(movl %ebx, x(%edi) ) ;
19780+ DST(movl %ebx, %es:x(%edi)) ;
19781
19782 #define ROUND(x) \
19783+ nop; nop; nop; \
19784 SRC(movl x(%esi), %ebx ) ; \
19785 adcl %ebx, %eax ; \
19786- DST(movl %ebx, x(%edi) ) ;
19787+ DST(movl %ebx, %es:x(%edi)) ;
19788
19789 #define ARGBASE 12
19790-
19791-ENTRY(csum_partial_copy_generic)
19792+
19793+ENTRY(csum_partial_copy_generic_to_user)
19794 CFI_STARTPROC
19795+
19796+#ifdef CONFIG_PAX_MEMORY_UDEREF
19797+ pushl %gs
19798+ CFI_ADJUST_CFA_OFFSET 4
19799+ popl %es
19800+ CFI_ADJUST_CFA_OFFSET -4
19801+ jmp csum_partial_copy_generic
19802+#endif
19803+
19804+ENTRY(csum_partial_copy_generic_from_user)
19805+
19806+#ifdef CONFIG_PAX_MEMORY_UDEREF
19807+ pushl %gs
19808+ CFI_ADJUST_CFA_OFFSET 4
19809+ popl %ds
19810+ CFI_ADJUST_CFA_OFFSET -4
19811+#endif
19812+
19813+ENTRY(csum_partial_copy_generic)
19814 pushl %ebx
19815 CFI_ADJUST_CFA_OFFSET 4
19816 CFI_REL_OFFSET ebx, 0
19817@@ -482,7 +531,7 @@ ENTRY(csum_partial_copy_generic)
19818 subl %ebx, %edi
19819 lea -1(%esi),%edx
19820 andl $-32,%edx
19821- lea 3f(%ebx,%ebx), %ebx
19822+ lea 3f(%ebx,%ebx,2), %ebx
19823 testl %esi, %esi
19824 jmp *%ebx
19825 1: addl $64,%esi
19826@@ -503,19 +552,19 @@ ENTRY(csum_partial_copy_generic)
19827 jb 5f
19828 SRC( movw (%esi), %dx )
19829 leal 2(%esi), %esi
19830-DST( movw %dx, (%edi) )
19831+DST( movw %dx, %es:(%edi) )
19832 leal 2(%edi), %edi
19833 je 6f
19834 shll $16,%edx
19835 5:
19836 SRC( movb (%esi), %dl )
19837-DST( movb %dl, (%edi) )
19838+DST( movb %dl, %es:(%edi) )
19839 6: addl %edx, %eax
19840 adcl $0, %eax
19841 7:
19842 .section .fixup, "ax"
19843 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
19844- movl $-EFAULT, (%ebx)
19845+ movl $-EFAULT, %ss:(%ebx)
19846 # zero the complete destination (computing the rest is too much work)
19847 movl ARGBASE+8(%esp),%edi # dst
19848 movl ARGBASE+12(%esp),%ecx # len
19849@@ -523,10 +572,21 @@ DST( movb %dl, (%edi) )
19850 rep; stosb
19851 jmp 7b
19852 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19853- movl $-EFAULT, (%ebx)
19854+ movl $-EFAULT, %ss:(%ebx)
19855 jmp 7b
19856 .previous
19857
19858+#ifdef CONFIG_PAX_MEMORY_UDEREF
19859+ pushl %ss
19860+ CFI_ADJUST_CFA_OFFSET 4
19861+ popl %ds
19862+ CFI_ADJUST_CFA_OFFSET -4
19863+ pushl %ss
19864+ CFI_ADJUST_CFA_OFFSET 4
19865+ popl %es
19866+ CFI_ADJUST_CFA_OFFSET -4
19867+#endif
19868+
19869 popl %esi
19870 CFI_ADJUST_CFA_OFFSET -4
19871 CFI_RESTORE esi
19872@@ -538,7 +598,7 @@ DST( movb %dl, (%edi) )
19873 CFI_RESTORE ebx
19874 ret
19875 CFI_ENDPROC
19876-ENDPROC(csum_partial_copy_generic)
19877+ENDPROC(csum_partial_copy_generic_to_user)
19878
19879 #undef ROUND
19880 #undef ROUND1
19881diff -urNp linux-2.6.32.48/arch/x86/lib/clear_page_64.S linux-2.6.32.48/arch/x86/lib/clear_page_64.S
19882--- linux-2.6.32.48/arch/x86/lib/clear_page_64.S 2011-11-08 19:02:43.000000000 -0500
19883+++ linux-2.6.32.48/arch/x86/lib/clear_page_64.S 2011-11-15 19:59:43.000000000 -0500
19884@@ -1,5 +1,6 @@
19885 #include <linux/linkage.h>
19886 #include <asm/dwarf2.h>
19887+#include <asm/alternative-asm.h>
19888
19889 /*
19890 * Zero a page.
19891@@ -10,6 +11,7 @@ ENTRY(clear_page_c)
19892 movl $4096/8,%ecx
19893 xorl %eax,%eax
19894 rep stosq
19895+ pax_force_retaddr
19896 ret
19897 CFI_ENDPROC
19898 ENDPROC(clear_page_c)
19899@@ -33,6 +35,7 @@ ENTRY(clear_page)
19900 leaq 64(%rdi),%rdi
19901 jnz .Lloop
19902 nop
19903+ pax_force_retaddr
19904 ret
19905 CFI_ENDPROC
19906 .Lclear_page_end:
19907@@ -43,7 +46,7 @@ ENDPROC(clear_page)
19908
19909 #include <asm/cpufeature.h>
19910
19911- .section .altinstr_replacement,"ax"
19912+ .section .altinstr_replacement,"a"
19913 1: .byte 0xeb /* jmp <disp8> */
19914 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
19915 2:
19916diff -urNp linux-2.6.32.48/arch/x86/lib/copy_page_64.S linux-2.6.32.48/arch/x86/lib/copy_page_64.S
19917--- linux-2.6.32.48/arch/x86/lib/copy_page_64.S 2011-11-08 19:02:43.000000000 -0500
19918+++ linux-2.6.32.48/arch/x86/lib/copy_page_64.S 2011-11-15 19:59:43.000000000 -0500
19919@@ -2,12 +2,14 @@
19920
19921 #include <linux/linkage.h>
19922 #include <asm/dwarf2.h>
19923+#include <asm/alternative-asm.h>
19924
19925 ALIGN
19926 copy_page_c:
19927 CFI_STARTPROC
19928 movl $4096/8,%ecx
19929 rep movsq
19930+ pax_force_retaddr
19931 ret
19932 CFI_ENDPROC
19933 ENDPROC(copy_page_c)
19934@@ -94,6 +96,7 @@ ENTRY(copy_page)
19935 CFI_RESTORE r13
19936 addq $3*8,%rsp
19937 CFI_ADJUST_CFA_OFFSET -3*8
19938+ pax_force_retaddr
19939 ret
19940 .Lcopy_page_end:
19941 CFI_ENDPROC
19942@@ -104,7 +107,7 @@ ENDPROC(copy_page)
19943
19944 #include <asm/cpufeature.h>
19945
19946- .section .altinstr_replacement,"ax"
19947+ .section .altinstr_replacement,"a"
19948 1: .byte 0xeb /* jmp <disp8> */
19949 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
19950 2:
19951diff -urNp linux-2.6.32.48/arch/x86/lib/copy_user_64.S linux-2.6.32.48/arch/x86/lib/copy_user_64.S
19952--- linux-2.6.32.48/arch/x86/lib/copy_user_64.S 2011-11-08 19:02:43.000000000 -0500
19953+++ linux-2.6.32.48/arch/x86/lib/copy_user_64.S 2011-11-15 19:59:43.000000000 -0500
19954@@ -15,13 +15,15 @@
19955 #include <asm/asm-offsets.h>
19956 #include <asm/thread_info.h>
19957 #include <asm/cpufeature.h>
19958+#include <asm/pgtable.h>
19959+#include <asm/alternative-asm.h>
19960
19961 .macro ALTERNATIVE_JUMP feature,orig,alt
19962 0:
19963 .byte 0xe9 /* 32bit jump */
19964 .long \orig-1f /* by default jump to orig */
19965 1:
19966- .section .altinstr_replacement,"ax"
19967+ .section .altinstr_replacement,"a"
19968 2: .byte 0xe9 /* near jump with 32bit immediate */
19969 .long \alt-1b /* offset */ /* or alternatively to alt */
19970 .previous
19971@@ -64,55 +66,26 @@
19972 #endif
19973 .endm
19974
19975-/* Standard copy_to_user with segment limit checking */
19976-ENTRY(copy_to_user)
19977- CFI_STARTPROC
19978- GET_THREAD_INFO(%rax)
19979- movq %rdi,%rcx
19980- addq %rdx,%rcx
19981- jc bad_to_user
19982- cmpq TI_addr_limit(%rax),%rcx
19983- ja bad_to_user
19984- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19985- CFI_ENDPROC
19986-ENDPROC(copy_to_user)
19987-
19988-/* Standard copy_from_user with segment limit checking */
19989-ENTRY(copy_from_user)
19990- CFI_STARTPROC
19991- GET_THREAD_INFO(%rax)
19992- movq %rsi,%rcx
19993- addq %rdx,%rcx
19994- jc bad_from_user
19995- cmpq TI_addr_limit(%rax),%rcx
19996- ja bad_from_user
19997- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19998- CFI_ENDPROC
19999-ENDPROC(copy_from_user)
20000-
20001 ENTRY(copy_user_generic)
20002 CFI_STARTPROC
20003 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
20004 CFI_ENDPROC
20005 ENDPROC(copy_user_generic)
20006
20007-ENTRY(__copy_from_user_inatomic)
20008- CFI_STARTPROC
20009- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
20010- CFI_ENDPROC
20011-ENDPROC(__copy_from_user_inatomic)
20012-
20013 .section .fixup,"ax"
20014 /* must zero dest */
20015 ENTRY(bad_from_user)
20016 bad_from_user:
20017 CFI_STARTPROC
20018+ testl %edx,%edx
20019+ js bad_to_user
20020 movl %edx,%ecx
20021 xorl %eax,%eax
20022 rep
20023 stosb
20024 bad_to_user:
20025 movl %edx,%eax
20026+ pax_force_retaddr
20027 ret
20028 CFI_ENDPROC
20029 ENDPROC(bad_from_user)
20030@@ -180,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
20031 decl %ecx
20032 jnz 21b
20033 23: xor %eax,%eax
20034+ pax_force_retaddr
20035 ret
20036
20037 .section .fixup,"ax"
20038@@ -252,6 +226,7 @@ ENTRY(copy_user_generic_string)
20039 3: rep
20040 movsb
20041 4: xorl %eax,%eax
20042+ pax_force_retaddr
20043 ret
20044
20045 .section .fixup,"ax"
20046diff -urNp linux-2.6.32.48/arch/x86/lib/copy_user_nocache_64.S linux-2.6.32.48/arch/x86/lib/copy_user_nocache_64.S
20047--- linux-2.6.32.48/arch/x86/lib/copy_user_nocache_64.S 2011-11-08 19:02:43.000000000 -0500
20048+++ linux-2.6.32.48/arch/x86/lib/copy_user_nocache_64.S 2011-11-15 19:59:43.000000000 -0500
20049@@ -8,12 +8,14 @@
20050
20051 #include <linux/linkage.h>
20052 #include <asm/dwarf2.h>
20053+#include <asm/alternative-asm.h>
20054
20055 #define FIX_ALIGNMENT 1
20056
20057 #include <asm/current.h>
20058 #include <asm/asm-offsets.h>
20059 #include <asm/thread_info.h>
20060+#include <asm/pgtable.h>
20061
20062 .macro ALIGN_DESTINATION
20063 #ifdef FIX_ALIGNMENT
20064@@ -50,6 +52,15 @@
20065 */
20066 ENTRY(__copy_user_nocache)
20067 CFI_STARTPROC
20068+
20069+#ifdef CONFIG_PAX_MEMORY_UDEREF
20070+ mov $PAX_USER_SHADOW_BASE,%rcx
20071+ cmp %rcx,%rsi
20072+ jae 1f
20073+ add %rcx,%rsi
20074+1:
20075+#endif
20076+
20077 cmpl $8,%edx
20078 jb 20f /* less then 8 bytes, go to byte copy loop */
20079 ALIGN_DESTINATION
20080@@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
20081 jnz 21b
20082 23: xorl %eax,%eax
20083 sfence
20084+ pax_force_retaddr
20085 ret
20086
20087 .section .fixup,"ax"
20088diff -urNp linux-2.6.32.48/arch/x86/lib/csum-copy_64.S linux-2.6.32.48/arch/x86/lib/csum-copy_64.S
20089--- linux-2.6.32.48/arch/x86/lib/csum-copy_64.S 2011-11-08 19:02:43.000000000 -0500
20090+++ linux-2.6.32.48/arch/x86/lib/csum-copy_64.S 2011-11-15 19:59:43.000000000 -0500
20091@@ -8,6 +8,7 @@
20092 #include <linux/linkage.h>
20093 #include <asm/dwarf2.h>
20094 #include <asm/errno.h>
20095+#include <asm/alternative-asm.h>
20096
20097 /*
20098 * Checksum copy with exception handling.
20099@@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
20100 CFI_RESTORE rbp
20101 addq $7*8,%rsp
20102 CFI_ADJUST_CFA_OFFSET -7*8
20103+ pax_force_retaddr
20104 ret
20105 CFI_RESTORE_STATE
20106
20107diff -urNp linux-2.6.32.48/arch/x86/lib/csum-wrappers_64.c linux-2.6.32.48/arch/x86/lib/csum-wrappers_64.c
20108--- linux-2.6.32.48/arch/x86/lib/csum-wrappers_64.c 2011-11-08 19:02:43.000000000 -0500
20109+++ linux-2.6.32.48/arch/x86/lib/csum-wrappers_64.c 2011-11-15 19:59:43.000000000 -0500
20110@@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void _
20111 len -= 2;
20112 }
20113 }
20114- isum = csum_partial_copy_generic((__force const void *)src,
20115+
20116+#ifdef CONFIG_PAX_MEMORY_UDEREF
20117+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
20118+ src += PAX_USER_SHADOW_BASE;
20119+#endif
20120+
20121+ isum = csum_partial_copy_generic((const void __force_kernel *)src,
20122 dst, len, isum, errp, NULL);
20123 if (unlikely(*errp))
20124 goto out_err;
20125@@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *sr
20126 }
20127
20128 *errp = 0;
20129- return csum_partial_copy_generic(src, (void __force *)dst,
20130+
20131+#ifdef CONFIG_PAX_MEMORY_UDEREF
20132+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
20133+ dst += PAX_USER_SHADOW_BASE;
20134+#endif
20135+
20136+ return csum_partial_copy_generic(src, (void __force_kernel *)dst,
20137 len, isum, NULL, errp);
20138 }
20139 EXPORT_SYMBOL(csum_partial_copy_to_user);
20140diff -urNp linux-2.6.32.48/arch/x86/lib/getuser.S linux-2.6.32.48/arch/x86/lib/getuser.S
20141--- linux-2.6.32.48/arch/x86/lib/getuser.S 2011-11-08 19:02:43.000000000 -0500
20142+++ linux-2.6.32.48/arch/x86/lib/getuser.S 2011-11-15 19:59:43.000000000 -0500
20143@@ -33,15 +33,38 @@
20144 #include <asm/asm-offsets.h>
20145 #include <asm/thread_info.h>
20146 #include <asm/asm.h>
20147+#include <asm/segment.h>
20148+#include <asm/pgtable.h>
20149+#include <asm/alternative-asm.h>
20150+
20151+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20152+#define __copyuser_seg gs;
20153+#else
20154+#define __copyuser_seg
20155+#endif
20156
20157 .text
20158 ENTRY(__get_user_1)
20159 CFI_STARTPROC
20160+
20161+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20162 GET_THREAD_INFO(%_ASM_DX)
20163 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
20164 jae bad_get_user
20165-1: movzb (%_ASM_AX),%edx
20166+
20167+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20168+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
20169+ cmp %_ASM_DX,%_ASM_AX
20170+ jae 1234f
20171+ add %_ASM_DX,%_ASM_AX
20172+1234:
20173+#endif
20174+
20175+#endif
20176+
20177+1: __copyuser_seg movzb (%_ASM_AX),%edx
20178 xor %eax,%eax
20179+ pax_force_retaddr
20180 ret
20181 CFI_ENDPROC
20182 ENDPROC(__get_user_1)
20183@@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
20184 ENTRY(__get_user_2)
20185 CFI_STARTPROC
20186 add $1,%_ASM_AX
20187+
20188+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20189 jc bad_get_user
20190 GET_THREAD_INFO(%_ASM_DX)
20191 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
20192 jae bad_get_user
20193-2: movzwl -1(%_ASM_AX),%edx
20194+
20195+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20196+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
20197+ cmp %_ASM_DX,%_ASM_AX
20198+ jae 1234f
20199+ add %_ASM_DX,%_ASM_AX
20200+1234:
20201+#endif
20202+
20203+#endif
20204+
20205+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
20206 xor %eax,%eax
20207+ pax_force_retaddr
20208 ret
20209 CFI_ENDPROC
20210 ENDPROC(__get_user_2)
20211@@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
20212 ENTRY(__get_user_4)
20213 CFI_STARTPROC
20214 add $3,%_ASM_AX
20215+
20216+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20217 jc bad_get_user
20218 GET_THREAD_INFO(%_ASM_DX)
20219 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
20220 jae bad_get_user
20221-3: mov -3(%_ASM_AX),%edx
20222+
20223+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20224+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
20225+ cmp %_ASM_DX,%_ASM_AX
20226+ jae 1234f
20227+ add %_ASM_DX,%_ASM_AX
20228+1234:
20229+#endif
20230+
20231+#endif
20232+
20233+3: __copyuser_seg mov -3(%_ASM_AX),%edx
20234 xor %eax,%eax
20235+ pax_force_retaddr
20236 ret
20237 CFI_ENDPROC
20238 ENDPROC(__get_user_4)
20239@@ -80,8 +131,18 @@ ENTRY(__get_user_8)
20240 GET_THREAD_INFO(%_ASM_DX)
20241 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
20242 jae bad_get_user
20243+
20244+#ifdef CONFIG_PAX_MEMORY_UDEREF
20245+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
20246+ cmp %_ASM_DX,%_ASM_AX
20247+ jae 1234f
20248+ add %_ASM_DX,%_ASM_AX
20249+1234:
20250+#endif
20251+
20252 4: movq -7(%_ASM_AX),%_ASM_DX
20253 xor %eax,%eax
20254+ pax_force_retaddr
20255 ret
20256 CFI_ENDPROC
20257 ENDPROC(__get_user_8)
20258@@ -91,6 +152,7 @@ bad_get_user:
20259 CFI_STARTPROC
20260 xor %edx,%edx
20261 mov $(-EFAULT),%_ASM_AX
20262+ pax_force_retaddr
20263 ret
20264 CFI_ENDPROC
20265 END(bad_get_user)
20266diff -urNp linux-2.6.32.48/arch/x86/lib/iomap_copy_64.S linux-2.6.32.48/arch/x86/lib/iomap_copy_64.S
20267--- linux-2.6.32.48/arch/x86/lib/iomap_copy_64.S 2011-11-08 19:02:43.000000000 -0500
20268+++ linux-2.6.32.48/arch/x86/lib/iomap_copy_64.S 2011-11-15 19:59:43.000000000 -0500
20269@@ -17,6 +17,7 @@
20270
20271 #include <linux/linkage.h>
20272 #include <asm/dwarf2.h>
20273+#include <asm/alternative-asm.h>
20274
20275 /*
20276 * override generic version in lib/iomap_copy.c
20277@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
20278 CFI_STARTPROC
20279 movl %edx,%ecx
20280 rep movsd
20281+ pax_force_retaddr
20282 ret
20283 CFI_ENDPROC
20284 ENDPROC(__iowrite32_copy)
20285diff -urNp linux-2.6.32.48/arch/x86/lib/memcpy_64.S linux-2.6.32.48/arch/x86/lib/memcpy_64.S
20286--- linux-2.6.32.48/arch/x86/lib/memcpy_64.S 2011-11-08 19:02:43.000000000 -0500
20287+++ linux-2.6.32.48/arch/x86/lib/memcpy_64.S 2011-11-15 19:59:43.000000000 -0500
20288@@ -4,6 +4,7 @@
20289
20290 #include <asm/cpufeature.h>
20291 #include <asm/dwarf2.h>
20292+#include <asm/alternative-asm.h>
20293
20294 /*
20295 * memcpy - Copy a memory block.
20296@@ -34,6 +35,7 @@ memcpy_c:
20297 rep movsq
20298 movl %edx, %ecx
20299 rep movsb
20300+ pax_force_retaddr
20301 ret
20302 CFI_ENDPROC
20303 ENDPROC(memcpy_c)
20304@@ -118,6 +120,7 @@ ENTRY(memcpy)
20305 jnz .Lloop_1
20306
20307 .Lend:
20308+ pax_force_retaddr
20309 ret
20310 CFI_ENDPROC
20311 ENDPROC(memcpy)
20312@@ -128,7 +131,7 @@ ENDPROC(__memcpy)
20313 * It is also a lot simpler. Use this when possible:
20314 */
20315
20316- .section .altinstr_replacement, "ax"
20317+ .section .altinstr_replacement, "a"
20318 1: .byte 0xeb /* jmp <disp8> */
20319 .byte (memcpy_c - memcpy) - (2f - 1b) /* offset */
20320 2:
20321diff -urNp linux-2.6.32.48/arch/x86/lib/memset_64.S linux-2.6.32.48/arch/x86/lib/memset_64.S
20322--- linux-2.6.32.48/arch/x86/lib/memset_64.S 2011-11-08 19:02:43.000000000 -0500
20323+++ linux-2.6.32.48/arch/x86/lib/memset_64.S 2011-11-15 19:59:43.000000000 -0500
20324@@ -2,6 +2,7 @@
20325
20326 #include <linux/linkage.h>
20327 #include <asm/dwarf2.h>
20328+#include <asm/alternative-asm.h>
20329
20330 /*
20331 * ISO C memset - set a memory block to a byte value.
20332@@ -28,6 +29,7 @@ memset_c:
20333 movl %r8d,%ecx
20334 rep stosb
20335 movq %r9,%rax
20336+ pax_force_retaddr
20337 ret
20338 CFI_ENDPROC
20339 ENDPROC(memset_c)
20340@@ -96,6 +98,7 @@ ENTRY(__memset)
20341
20342 .Lende:
20343 movq %r10,%rax
20344+ pax_force_retaddr
20345 ret
20346
20347 CFI_RESTORE_STATE
20348@@ -118,7 +121,7 @@ ENDPROC(__memset)
20349
20350 #include <asm/cpufeature.h>
20351
20352- .section .altinstr_replacement,"ax"
20353+ .section .altinstr_replacement,"a"
20354 1: .byte 0xeb /* jmp <disp8> */
20355 .byte (memset_c - memset) - (2f - 1b) /* offset */
20356 2:
20357diff -urNp linux-2.6.32.48/arch/x86/lib/mmx_32.c linux-2.6.32.48/arch/x86/lib/mmx_32.c
20358--- linux-2.6.32.48/arch/x86/lib/mmx_32.c 2011-11-08 19:02:43.000000000 -0500
20359+++ linux-2.6.32.48/arch/x86/lib/mmx_32.c 2011-11-15 19:59:43.000000000 -0500
20360@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
20361 {
20362 void *p;
20363 int i;
20364+ unsigned long cr0;
20365
20366 if (unlikely(in_interrupt()))
20367 return __memcpy(to, from, len);
20368@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
20369 kernel_fpu_begin();
20370
20371 __asm__ __volatile__ (
20372- "1: prefetch (%0)\n" /* This set is 28 bytes */
20373- " prefetch 64(%0)\n"
20374- " prefetch 128(%0)\n"
20375- " prefetch 192(%0)\n"
20376- " prefetch 256(%0)\n"
20377+ "1: prefetch (%1)\n" /* This set is 28 bytes */
20378+ " prefetch 64(%1)\n"
20379+ " prefetch 128(%1)\n"
20380+ " prefetch 192(%1)\n"
20381+ " prefetch 256(%1)\n"
20382 "2: \n"
20383 ".section .fixup, \"ax\"\n"
20384- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20385+ "3: \n"
20386+
20387+#ifdef CONFIG_PAX_KERNEXEC
20388+ " movl %%cr0, %0\n"
20389+ " movl %0, %%eax\n"
20390+ " andl $0xFFFEFFFF, %%eax\n"
20391+ " movl %%eax, %%cr0\n"
20392+#endif
20393+
20394+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20395+
20396+#ifdef CONFIG_PAX_KERNEXEC
20397+ " movl %0, %%cr0\n"
20398+#endif
20399+
20400 " jmp 2b\n"
20401 ".previous\n"
20402 _ASM_EXTABLE(1b, 3b)
20403- : : "r" (from));
20404+ : "=&r" (cr0) : "r" (from) : "ax");
20405
20406 for ( ; i > 5; i--) {
20407 __asm__ __volatile__ (
20408- "1: prefetch 320(%0)\n"
20409- "2: movq (%0), %%mm0\n"
20410- " movq 8(%0), %%mm1\n"
20411- " movq 16(%0), %%mm2\n"
20412- " movq 24(%0), %%mm3\n"
20413- " movq %%mm0, (%1)\n"
20414- " movq %%mm1, 8(%1)\n"
20415- " movq %%mm2, 16(%1)\n"
20416- " movq %%mm3, 24(%1)\n"
20417- " movq 32(%0), %%mm0\n"
20418- " movq 40(%0), %%mm1\n"
20419- " movq 48(%0), %%mm2\n"
20420- " movq 56(%0), %%mm3\n"
20421- " movq %%mm0, 32(%1)\n"
20422- " movq %%mm1, 40(%1)\n"
20423- " movq %%mm2, 48(%1)\n"
20424- " movq %%mm3, 56(%1)\n"
20425+ "1: prefetch 320(%1)\n"
20426+ "2: movq (%1), %%mm0\n"
20427+ " movq 8(%1), %%mm1\n"
20428+ " movq 16(%1), %%mm2\n"
20429+ " movq 24(%1), %%mm3\n"
20430+ " movq %%mm0, (%2)\n"
20431+ " movq %%mm1, 8(%2)\n"
20432+ " movq %%mm2, 16(%2)\n"
20433+ " movq %%mm3, 24(%2)\n"
20434+ " movq 32(%1), %%mm0\n"
20435+ " movq 40(%1), %%mm1\n"
20436+ " movq 48(%1), %%mm2\n"
20437+ " movq 56(%1), %%mm3\n"
20438+ " movq %%mm0, 32(%2)\n"
20439+ " movq %%mm1, 40(%2)\n"
20440+ " movq %%mm2, 48(%2)\n"
20441+ " movq %%mm3, 56(%2)\n"
20442 ".section .fixup, \"ax\"\n"
20443- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20444+ "3:\n"
20445+
20446+#ifdef CONFIG_PAX_KERNEXEC
20447+ " movl %%cr0, %0\n"
20448+ " movl %0, %%eax\n"
20449+ " andl $0xFFFEFFFF, %%eax\n"
20450+ " movl %%eax, %%cr0\n"
20451+#endif
20452+
20453+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20454+
20455+#ifdef CONFIG_PAX_KERNEXEC
20456+ " movl %0, %%cr0\n"
20457+#endif
20458+
20459 " jmp 2b\n"
20460 ".previous\n"
20461 _ASM_EXTABLE(1b, 3b)
20462- : : "r" (from), "r" (to) : "memory");
20463+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20464
20465 from += 64;
20466 to += 64;
20467@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
20468 static void fast_copy_page(void *to, void *from)
20469 {
20470 int i;
20471+ unsigned long cr0;
20472
20473 kernel_fpu_begin();
20474
20475@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
20476 * but that is for later. -AV
20477 */
20478 __asm__ __volatile__(
20479- "1: prefetch (%0)\n"
20480- " prefetch 64(%0)\n"
20481- " prefetch 128(%0)\n"
20482- " prefetch 192(%0)\n"
20483- " prefetch 256(%0)\n"
20484+ "1: prefetch (%1)\n"
20485+ " prefetch 64(%1)\n"
20486+ " prefetch 128(%1)\n"
20487+ " prefetch 192(%1)\n"
20488+ " prefetch 256(%1)\n"
20489 "2: \n"
20490 ".section .fixup, \"ax\"\n"
20491- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20492+ "3: \n"
20493+
20494+#ifdef CONFIG_PAX_KERNEXEC
20495+ " movl %%cr0, %0\n"
20496+ " movl %0, %%eax\n"
20497+ " andl $0xFFFEFFFF, %%eax\n"
20498+ " movl %%eax, %%cr0\n"
20499+#endif
20500+
20501+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20502+
20503+#ifdef CONFIG_PAX_KERNEXEC
20504+ " movl %0, %%cr0\n"
20505+#endif
20506+
20507 " jmp 2b\n"
20508 ".previous\n"
20509- _ASM_EXTABLE(1b, 3b) : : "r" (from));
20510+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
20511
20512 for (i = 0; i < (4096-320)/64; i++) {
20513 __asm__ __volatile__ (
20514- "1: prefetch 320(%0)\n"
20515- "2: movq (%0), %%mm0\n"
20516- " movntq %%mm0, (%1)\n"
20517- " movq 8(%0), %%mm1\n"
20518- " movntq %%mm1, 8(%1)\n"
20519- " movq 16(%0), %%mm2\n"
20520- " movntq %%mm2, 16(%1)\n"
20521- " movq 24(%0), %%mm3\n"
20522- " movntq %%mm3, 24(%1)\n"
20523- " movq 32(%0), %%mm4\n"
20524- " movntq %%mm4, 32(%1)\n"
20525- " movq 40(%0), %%mm5\n"
20526- " movntq %%mm5, 40(%1)\n"
20527- " movq 48(%0), %%mm6\n"
20528- " movntq %%mm6, 48(%1)\n"
20529- " movq 56(%0), %%mm7\n"
20530- " movntq %%mm7, 56(%1)\n"
20531+ "1: prefetch 320(%1)\n"
20532+ "2: movq (%1), %%mm0\n"
20533+ " movntq %%mm0, (%2)\n"
20534+ " movq 8(%1), %%mm1\n"
20535+ " movntq %%mm1, 8(%2)\n"
20536+ " movq 16(%1), %%mm2\n"
20537+ " movntq %%mm2, 16(%2)\n"
20538+ " movq 24(%1), %%mm3\n"
20539+ " movntq %%mm3, 24(%2)\n"
20540+ " movq 32(%1), %%mm4\n"
20541+ " movntq %%mm4, 32(%2)\n"
20542+ " movq 40(%1), %%mm5\n"
20543+ " movntq %%mm5, 40(%2)\n"
20544+ " movq 48(%1), %%mm6\n"
20545+ " movntq %%mm6, 48(%2)\n"
20546+ " movq 56(%1), %%mm7\n"
20547+ " movntq %%mm7, 56(%2)\n"
20548 ".section .fixup, \"ax\"\n"
20549- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20550+ "3:\n"
20551+
20552+#ifdef CONFIG_PAX_KERNEXEC
20553+ " movl %%cr0, %0\n"
20554+ " movl %0, %%eax\n"
20555+ " andl $0xFFFEFFFF, %%eax\n"
20556+ " movl %%eax, %%cr0\n"
20557+#endif
20558+
20559+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20560+
20561+#ifdef CONFIG_PAX_KERNEXEC
20562+ " movl %0, %%cr0\n"
20563+#endif
20564+
20565 " jmp 2b\n"
20566 ".previous\n"
20567- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
20568+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20569
20570 from += 64;
20571 to += 64;
20572@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
20573 static void fast_copy_page(void *to, void *from)
20574 {
20575 int i;
20576+ unsigned long cr0;
20577
20578 kernel_fpu_begin();
20579
20580 __asm__ __volatile__ (
20581- "1: prefetch (%0)\n"
20582- " prefetch 64(%0)\n"
20583- " prefetch 128(%0)\n"
20584- " prefetch 192(%0)\n"
20585- " prefetch 256(%0)\n"
20586+ "1: prefetch (%1)\n"
20587+ " prefetch 64(%1)\n"
20588+ " prefetch 128(%1)\n"
20589+ " prefetch 192(%1)\n"
20590+ " prefetch 256(%1)\n"
20591 "2: \n"
20592 ".section .fixup, \"ax\"\n"
20593- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20594+ "3: \n"
20595+
20596+#ifdef CONFIG_PAX_KERNEXEC
20597+ " movl %%cr0, %0\n"
20598+ " movl %0, %%eax\n"
20599+ " andl $0xFFFEFFFF, %%eax\n"
20600+ " movl %%eax, %%cr0\n"
20601+#endif
20602+
20603+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20604+
20605+#ifdef CONFIG_PAX_KERNEXEC
20606+ " movl %0, %%cr0\n"
20607+#endif
20608+
20609 " jmp 2b\n"
20610 ".previous\n"
20611- _ASM_EXTABLE(1b, 3b) : : "r" (from));
20612+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
20613
20614 for (i = 0; i < 4096/64; i++) {
20615 __asm__ __volatile__ (
20616- "1: prefetch 320(%0)\n"
20617- "2: movq (%0), %%mm0\n"
20618- " movq 8(%0), %%mm1\n"
20619- " movq 16(%0), %%mm2\n"
20620- " movq 24(%0), %%mm3\n"
20621- " movq %%mm0, (%1)\n"
20622- " movq %%mm1, 8(%1)\n"
20623- " movq %%mm2, 16(%1)\n"
20624- " movq %%mm3, 24(%1)\n"
20625- " movq 32(%0), %%mm0\n"
20626- " movq 40(%0), %%mm1\n"
20627- " movq 48(%0), %%mm2\n"
20628- " movq 56(%0), %%mm3\n"
20629- " movq %%mm0, 32(%1)\n"
20630- " movq %%mm1, 40(%1)\n"
20631- " movq %%mm2, 48(%1)\n"
20632- " movq %%mm3, 56(%1)\n"
20633+ "1: prefetch 320(%1)\n"
20634+ "2: movq (%1), %%mm0\n"
20635+ " movq 8(%1), %%mm1\n"
20636+ " movq 16(%1), %%mm2\n"
20637+ " movq 24(%1), %%mm3\n"
20638+ " movq %%mm0, (%2)\n"
20639+ " movq %%mm1, 8(%2)\n"
20640+ " movq %%mm2, 16(%2)\n"
20641+ " movq %%mm3, 24(%2)\n"
20642+ " movq 32(%1), %%mm0\n"
20643+ " movq 40(%1), %%mm1\n"
20644+ " movq 48(%1), %%mm2\n"
20645+ " movq 56(%1), %%mm3\n"
20646+ " movq %%mm0, 32(%2)\n"
20647+ " movq %%mm1, 40(%2)\n"
20648+ " movq %%mm2, 48(%2)\n"
20649+ " movq %%mm3, 56(%2)\n"
20650 ".section .fixup, \"ax\"\n"
20651- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20652+ "3:\n"
20653+
20654+#ifdef CONFIG_PAX_KERNEXEC
20655+ " movl %%cr0, %0\n"
20656+ " movl %0, %%eax\n"
20657+ " andl $0xFFFEFFFF, %%eax\n"
20658+ " movl %%eax, %%cr0\n"
20659+#endif
20660+
20661+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20662+
20663+#ifdef CONFIG_PAX_KERNEXEC
20664+ " movl %0, %%cr0\n"
20665+#endif
20666+
20667 " jmp 2b\n"
20668 ".previous\n"
20669 _ASM_EXTABLE(1b, 3b)
20670- : : "r" (from), "r" (to) : "memory");
20671+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20672
20673 from += 64;
20674 to += 64;
20675diff -urNp linux-2.6.32.48/arch/x86/lib/msr-reg.S linux-2.6.32.48/arch/x86/lib/msr-reg.S
20676--- linux-2.6.32.48/arch/x86/lib/msr-reg.S 2011-11-08 19:02:43.000000000 -0500
20677+++ linux-2.6.32.48/arch/x86/lib/msr-reg.S 2011-11-15 19:59:43.000000000 -0500
20678@@ -3,6 +3,7 @@
20679 #include <asm/dwarf2.h>
20680 #include <asm/asm.h>
20681 #include <asm/msr.h>
20682+#include <asm/alternative-asm.h>
20683
20684 #ifdef CONFIG_X86_64
20685 /*
20686@@ -37,6 +38,7 @@ ENTRY(native_\op\()_safe_regs)
20687 movl %edi, 28(%r10)
20688 popq_cfi %rbp
20689 popq_cfi %rbx
20690+ pax_force_retaddr
20691 ret
20692 3:
20693 CFI_RESTORE_STATE
20694diff -urNp linux-2.6.32.48/arch/x86/lib/putuser.S linux-2.6.32.48/arch/x86/lib/putuser.S
20695--- linux-2.6.32.48/arch/x86/lib/putuser.S 2011-11-08 19:02:43.000000000 -0500
20696+++ linux-2.6.32.48/arch/x86/lib/putuser.S 2011-11-15 19:59:43.000000000 -0500
20697@@ -15,7 +15,9 @@
20698 #include <asm/thread_info.h>
20699 #include <asm/errno.h>
20700 #include <asm/asm.h>
20701-
20702+#include <asm/segment.h>
20703+#include <asm/pgtable.h>
20704+#include <asm/alternative-asm.h>
20705
20706 /*
20707 * __put_user_X
20708@@ -29,52 +31,119 @@
20709 * as they get called from within inline assembly.
20710 */
20711
20712-#define ENTER CFI_STARTPROC ; \
20713- GET_THREAD_INFO(%_ASM_BX)
20714-#define EXIT ret ; \
20715+#define ENTER CFI_STARTPROC
20716+#define EXIT pax_force_retaddr; ret ; \
20717 CFI_ENDPROC
20718
20719+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20720+#define _DEST %_ASM_CX,%_ASM_BX
20721+#else
20722+#define _DEST %_ASM_CX
20723+#endif
20724+
20725+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20726+#define __copyuser_seg gs;
20727+#else
20728+#define __copyuser_seg
20729+#endif
20730+
20731 .text
20732 ENTRY(__put_user_1)
20733 ENTER
20734+
20735+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20736+ GET_THREAD_INFO(%_ASM_BX)
20737 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
20738 jae bad_put_user
20739-1: movb %al,(%_ASM_CX)
20740+
20741+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20742+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20743+ cmp %_ASM_BX,%_ASM_CX
20744+ jb 1234f
20745+ xor %ebx,%ebx
20746+1234:
20747+#endif
20748+
20749+#endif
20750+
20751+1: __copyuser_seg movb %al,(_DEST)
20752 xor %eax,%eax
20753 EXIT
20754 ENDPROC(__put_user_1)
20755
20756 ENTRY(__put_user_2)
20757 ENTER
20758+
20759+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20760+ GET_THREAD_INFO(%_ASM_BX)
20761 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20762 sub $1,%_ASM_BX
20763 cmp %_ASM_BX,%_ASM_CX
20764 jae bad_put_user
20765-2: movw %ax,(%_ASM_CX)
20766+
20767+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20768+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20769+ cmp %_ASM_BX,%_ASM_CX
20770+ jb 1234f
20771+ xor %ebx,%ebx
20772+1234:
20773+#endif
20774+
20775+#endif
20776+
20777+2: __copyuser_seg movw %ax,(_DEST)
20778 xor %eax,%eax
20779 EXIT
20780 ENDPROC(__put_user_2)
20781
20782 ENTRY(__put_user_4)
20783 ENTER
20784+
20785+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20786+ GET_THREAD_INFO(%_ASM_BX)
20787 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20788 sub $3,%_ASM_BX
20789 cmp %_ASM_BX,%_ASM_CX
20790 jae bad_put_user
20791-3: movl %eax,(%_ASM_CX)
20792+
20793+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20794+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20795+ cmp %_ASM_BX,%_ASM_CX
20796+ jb 1234f
20797+ xor %ebx,%ebx
20798+1234:
20799+#endif
20800+
20801+#endif
20802+
20803+3: __copyuser_seg movl %eax,(_DEST)
20804 xor %eax,%eax
20805 EXIT
20806 ENDPROC(__put_user_4)
20807
20808 ENTRY(__put_user_8)
20809 ENTER
20810+
20811+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20812+ GET_THREAD_INFO(%_ASM_BX)
20813 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20814 sub $7,%_ASM_BX
20815 cmp %_ASM_BX,%_ASM_CX
20816 jae bad_put_user
20817-4: mov %_ASM_AX,(%_ASM_CX)
20818+
20819+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20820+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20821+ cmp %_ASM_BX,%_ASM_CX
20822+ jb 1234f
20823+ xor %ebx,%ebx
20824+1234:
20825+#endif
20826+
20827+#endif
20828+
20829+4: __copyuser_seg mov %_ASM_AX,(_DEST)
20830 #ifdef CONFIG_X86_32
20831-5: movl %edx,4(%_ASM_CX)
20832+5: __copyuser_seg movl %edx,4(_DEST)
20833 #endif
20834 xor %eax,%eax
20835 EXIT
20836diff -urNp linux-2.6.32.48/arch/x86/lib/rwlock_64.S linux-2.6.32.48/arch/x86/lib/rwlock_64.S
20837--- linux-2.6.32.48/arch/x86/lib/rwlock_64.S 2011-11-08 19:02:43.000000000 -0500
20838+++ linux-2.6.32.48/arch/x86/lib/rwlock_64.S 2011-11-15 19:59:43.000000000 -0500
20839@@ -17,6 +17,7 @@ ENTRY(__write_lock_failed)
20840 LOCK_PREFIX
20841 subl $RW_LOCK_BIAS,(%rdi)
20842 jnz __write_lock_failed
20843+ pax_force_retaddr
20844 ret
20845 CFI_ENDPROC
20846 END(__write_lock_failed)
20847@@ -33,6 +34,7 @@ ENTRY(__read_lock_failed)
20848 LOCK_PREFIX
20849 decl (%rdi)
20850 js __read_lock_failed
20851+ pax_force_retaddr
20852 ret
20853 CFI_ENDPROC
20854 END(__read_lock_failed)
20855diff -urNp linux-2.6.32.48/arch/x86/lib/rwsem_64.S linux-2.6.32.48/arch/x86/lib/rwsem_64.S
20856--- linux-2.6.32.48/arch/x86/lib/rwsem_64.S 2011-11-08 19:02:43.000000000 -0500
20857+++ linux-2.6.32.48/arch/x86/lib/rwsem_64.S 2011-11-15 19:59:43.000000000 -0500
20858@@ -48,6 +48,7 @@ ENTRY(call_rwsem_down_read_failed)
20859 call rwsem_down_read_failed
20860 popq %rdx
20861 restore_common_regs
20862+ pax_force_retaddr
20863 ret
20864 ENDPROC(call_rwsem_down_read_failed)
20865
20866@@ -56,6 +57,7 @@ ENTRY(call_rwsem_down_write_failed)
20867 movq %rax,%rdi
20868 call rwsem_down_write_failed
20869 restore_common_regs
20870+ pax_force_retaddr
20871 ret
20872 ENDPROC(call_rwsem_down_write_failed)
20873
20874@@ -66,7 +68,8 @@ ENTRY(call_rwsem_wake)
20875 movq %rax,%rdi
20876 call rwsem_wake
20877 restore_common_regs
20878-1: ret
20879+1: pax_force_retaddr
20880+ ret
20881 ENDPROC(call_rwsem_wake)
20882
20883 /* Fix up special calling conventions */
20884@@ -77,5 +80,6 @@ ENTRY(call_rwsem_downgrade_wake)
20885 call rwsem_downgrade_wake
20886 popq %rdx
20887 restore_common_regs
20888+ pax_force_retaddr
20889 ret
20890 ENDPROC(call_rwsem_downgrade_wake)
20891diff -urNp linux-2.6.32.48/arch/x86/lib/thunk_64.S linux-2.6.32.48/arch/x86/lib/thunk_64.S
20892--- linux-2.6.32.48/arch/x86/lib/thunk_64.S 2011-11-08 19:02:43.000000000 -0500
20893+++ linux-2.6.32.48/arch/x86/lib/thunk_64.S 2011-11-15 19:59:43.000000000 -0500
20894@@ -10,7 +10,8 @@
20895 #include <asm/dwarf2.h>
20896 #include <asm/calling.h>
20897 #include <asm/rwlock.h>
20898-
20899+ #include <asm/alternative-asm.h>
20900+
20901 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
20902 .macro thunk name,func
20903 .globl \name
20904@@ -70,6 +71,7 @@
20905 SAVE_ARGS
20906 restore:
20907 RESTORE_ARGS
20908+ pax_force_retaddr
20909 ret
20910 CFI_ENDPROC
20911
20912@@ -77,5 +79,6 @@ restore:
20913 SAVE_ARGS
20914 restore_norax:
20915 RESTORE_ARGS 1
20916+ pax_force_retaddr
20917 ret
20918 CFI_ENDPROC
20919diff -urNp linux-2.6.32.48/arch/x86/lib/usercopy_32.c linux-2.6.32.48/arch/x86/lib/usercopy_32.c
20920--- linux-2.6.32.48/arch/x86/lib/usercopy_32.c 2011-11-08 19:02:43.000000000 -0500
20921+++ linux-2.6.32.48/arch/x86/lib/usercopy_32.c 2011-11-15 19:59:43.000000000 -0500
20922@@ -43,7 +43,7 @@ do { \
20923 __asm__ __volatile__( \
20924 " testl %1,%1\n" \
20925 " jz 2f\n" \
20926- "0: lodsb\n" \
20927+ "0: "__copyuser_seg"lodsb\n" \
20928 " stosb\n" \
20929 " testb %%al,%%al\n" \
20930 " jz 1f\n" \
20931@@ -128,10 +128,12 @@ do { \
20932 int __d0; \
20933 might_fault(); \
20934 __asm__ __volatile__( \
20935+ __COPYUSER_SET_ES \
20936 "0: rep; stosl\n" \
20937 " movl %2,%0\n" \
20938 "1: rep; stosb\n" \
20939 "2:\n" \
20940+ __COPYUSER_RESTORE_ES \
20941 ".section .fixup,\"ax\"\n" \
20942 "3: lea 0(%2,%0,4),%0\n" \
20943 " jmp 2b\n" \
20944@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
20945 might_fault();
20946
20947 __asm__ __volatile__(
20948+ __COPYUSER_SET_ES
20949 " testl %0, %0\n"
20950 " jz 3f\n"
20951 " andl %0,%%ecx\n"
20952@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
20953 " subl %%ecx,%0\n"
20954 " addl %0,%%eax\n"
20955 "1:\n"
20956+ __COPYUSER_RESTORE_ES
20957 ".section .fixup,\"ax\"\n"
20958 "2: xorl %%eax,%%eax\n"
20959 " jmp 1b\n"
20960@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
20961
20962 #ifdef CONFIG_X86_INTEL_USERCOPY
20963 static unsigned long
20964-__copy_user_intel(void __user *to, const void *from, unsigned long size)
20965+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
20966 {
20967 int d0, d1;
20968 __asm__ __volatile__(
20969@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
20970 " .align 2,0x90\n"
20971 "3: movl 0(%4), %%eax\n"
20972 "4: movl 4(%4), %%edx\n"
20973- "5: movl %%eax, 0(%3)\n"
20974- "6: movl %%edx, 4(%3)\n"
20975+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
20976+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
20977 "7: movl 8(%4), %%eax\n"
20978 "8: movl 12(%4),%%edx\n"
20979- "9: movl %%eax, 8(%3)\n"
20980- "10: movl %%edx, 12(%3)\n"
20981+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
20982+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
20983 "11: movl 16(%4), %%eax\n"
20984 "12: movl 20(%4), %%edx\n"
20985- "13: movl %%eax, 16(%3)\n"
20986- "14: movl %%edx, 20(%3)\n"
20987+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
20988+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
20989 "15: movl 24(%4), %%eax\n"
20990 "16: movl 28(%4), %%edx\n"
20991- "17: movl %%eax, 24(%3)\n"
20992- "18: movl %%edx, 28(%3)\n"
20993+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
20994+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
20995 "19: movl 32(%4), %%eax\n"
20996 "20: movl 36(%4), %%edx\n"
20997- "21: movl %%eax, 32(%3)\n"
20998- "22: movl %%edx, 36(%3)\n"
20999+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
21000+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
21001 "23: movl 40(%4), %%eax\n"
21002 "24: movl 44(%4), %%edx\n"
21003- "25: movl %%eax, 40(%3)\n"
21004- "26: movl %%edx, 44(%3)\n"
21005+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
21006+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
21007 "27: movl 48(%4), %%eax\n"
21008 "28: movl 52(%4), %%edx\n"
21009- "29: movl %%eax, 48(%3)\n"
21010- "30: movl %%edx, 52(%3)\n"
21011+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
21012+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
21013 "31: movl 56(%4), %%eax\n"
21014 "32: movl 60(%4), %%edx\n"
21015- "33: movl %%eax, 56(%3)\n"
21016- "34: movl %%edx, 60(%3)\n"
21017+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
21018+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
21019 " addl $-64, %0\n"
21020 " addl $64, %4\n"
21021 " addl $64, %3\n"
21022@@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
21023 " shrl $2, %0\n"
21024 " andl $3, %%eax\n"
21025 " cld\n"
21026+ __COPYUSER_SET_ES
21027 "99: rep; movsl\n"
21028 "36: movl %%eax, %0\n"
21029 "37: rep; movsb\n"
21030 "100:\n"
21031+ __COPYUSER_RESTORE_ES
21032+ ".section .fixup,\"ax\"\n"
21033+ "101: lea 0(%%eax,%0,4),%0\n"
21034+ " jmp 100b\n"
21035+ ".previous\n"
21036+ ".section __ex_table,\"a\"\n"
21037+ " .align 4\n"
21038+ " .long 1b,100b\n"
21039+ " .long 2b,100b\n"
21040+ " .long 3b,100b\n"
21041+ " .long 4b,100b\n"
21042+ " .long 5b,100b\n"
21043+ " .long 6b,100b\n"
21044+ " .long 7b,100b\n"
21045+ " .long 8b,100b\n"
21046+ " .long 9b,100b\n"
21047+ " .long 10b,100b\n"
21048+ " .long 11b,100b\n"
21049+ " .long 12b,100b\n"
21050+ " .long 13b,100b\n"
21051+ " .long 14b,100b\n"
21052+ " .long 15b,100b\n"
21053+ " .long 16b,100b\n"
21054+ " .long 17b,100b\n"
21055+ " .long 18b,100b\n"
21056+ " .long 19b,100b\n"
21057+ " .long 20b,100b\n"
21058+ " .long 21b,100b\n"
21059+ " .long 22b,100b\n"
21060+ " .long 23b,100b\n"
21061+ " .long 24b,100b\n"
21062+ " .long 25b,100b\n"
21063+ " .long 26b,100b\n"
21064+ " .long 27b,100b\n"
21065+ " .long 28b,100b\n"
21066+ " .long 29b,100b\n"
21067+ " .long 30b,100b\n"
21068+ " .long 31b,100b\n"
21069+ " .long 32b,100b\n"
21070+ " .long 33b,100b\n"
21071+ " .long 34b,100b\n"
21072+ " .long 35b,100b\n"
21073+ " .long 36b,100b\n"
21074+ " .long 37b,100b\n"
21075+ " .long 99b,101b\n"
21076+ ".previous"
21077+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
21078+ : "1"(to), "2"(from), "0"(size)
21079+ : "eax", "edx", "memory");
21080+ return size;
21081+}
21082+
21083+static unsigned long
21084+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
21085+{
21086+ int d0, d1;
21087+ __asm__ __volatile__(
21088+ " .align 2,0x90\n"
21089+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
21090+ " cmpl $67, %0\n"
21091+ " jbe 3f\n"
21092+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
21093+ " .align 2,0x90\n"
21094+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
21095+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
21096+ "5: movl %%eax, 0(%3)\n"
21097+ "6: movl %%edx, 4(%3)\n"
21098+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
21099+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
21100+ "9: movl %%eax, 8(%3)\n"
21101+ "10: movl %%edx, 12(%3)\n"
21102+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
21103+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
21104+ "13: movl %%eax, 16(%3)\n"
21105+ "14: movl %%edx, 20(%3)\n"
21106+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
21107+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
21108+ "17: movl %%eax, 24(%3)\n"
21109+ "18: movl %%edx, 28(%3)\n"
21110+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
21111+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
21112+ "21: movl %%eax, 32(%3)\n"
21113+ "22: movl %%edx, 36(%3)\n"
21114+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
21115+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
21116+ "25: movl %%eax, 40(%3)\n"
21117+ "26: movl %%edx, 44(%3)\n"
21118+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
21119+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
21120+ "29: movl %%eax, 48(%3)\n"
21121+ "30: movl %%edx, 52(%3)\n"
21122+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
21123+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
21124+ "33: movl %%eax, 56(%3)\n"
21125+ "34: movl %%edx, 60(%3)\n"
21126+ " addl $-64, %0\n"
21127+ " addl $64, %4\n"
21128+ " addl $64, %3\n"
21129+ " cmpl $63, %0\n"
21130+ " ja 1b\n"
21131+ "35: movl %0, %%eax\n"
21132+ " shrl $2, %0\n"
21133+ " andl $3, %%eax\n"
21134+ " cld\n"
21135+ "99: rep; "__copyuser_seg" movsl\n"
21136+ "36: movl %%eax, %0\n"
21137+ "37: rep; "__copyuser_seg" movsb\n"
21138+ "100:\n"
21139 ".section .fixup,\"ax\"\n"
21140 "101: lea 0(%%eax,%0,4),%0\n"
21141 " jmp 100b\n"
21142@@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
21143 int d0, d1;
21144 __asm__ __volatile__(
21145 " .align 2,0x90\n"
21146- "0: movl 32(%4), %%eax\n"
21147+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
21148 " cmpl $67, %0\n"
21149 " jbe 2f\n"
21150- "1: movl 64(%4), %%eax\n"
21151+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
21152 " .align 2,0x90\n"
21153- "2: movl 0(%4), %%eax\n"
21154- "21: movl 4(%4), %%edx\n"
21155+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
21156+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
21157 " movl %%eax, 0(%3)\n"
21158 " movl %%edx, 4(%3)\n"
21159- "3: movl 8(%4), %%eax\n"
21160- "31: movl 12(%4),%%edx\n"
21161+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
21162+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
21163 " movl %%eax, 8(%3)\n"
21164 " movl %%edx, 12(%3)\n"
21165- "4: movl 16(%4), %%eax\n"
21166- "41: movl 20(%4), %%edx\n"
21167+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
21168+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
21169 " movl %%eax, 16(%3)\n"
21170 " movl %%edx, 20(%3)\n"
21171- "10: movl 24(%4), %%eax\n"
21172- "51: movl 28(%4), %%edx\n"
21173+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
21174+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
21175 " movl %%eax, 24(%3)\n"
21176 " movl %%edx, 28(%3)\n"
21177- "11: movl 32(%4), %%eax\n"
21178- "61: movl 36(%4), %%edx\n"
21179+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
21180+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
21181 " movl %%eax, 32(%3)\n"
21182 " movl %%edx, 36(%3)\n"
21183- "12: movl 40(%4), %%eax\n"
21184- "71: movl 44(%4), %%edx\n"
21185+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
21186+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
21187 " movl %%eax, 40(%3)\n"
21188 " movl %%edx, 44(%3)\n"
21189- "13: movl 48(%4), %%eax\n"
21190- "81: movl 52(%4), %%edx\n"
21191+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
21192+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
21193 " movl %%eax, 48(%3)\n"
21194 " movl %%edx, 52(%3)\n"
21195- "14: movl 56(%4), %%eax\n"
21196- "91: movl 60(%4), %%edx\n"
21197+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
21198+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
21199 " movl %%eax, 56(%3)\n"
21200 " movl %%edx, 60(%3)\n"
21201 " addl $-64, %0\n"
21202@@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
21203 " shrl $2, %0\n"
21204 " andl $3, %%eax\n"
21205 " cld\n"
21206- "6: rep; movsl\n"
21207+ "6: rep; "__copyuser_seg" movsl\n"
21208 " movl %%eax,%0\n"
21209- "7: rep; movsb\n"
21210+ "7: rep; "__copyuser_seg" movsb\n"
21211 "8:\n"
21212 ".section .fixup,\"ax\"\n"
21213 "9: lea 0(%%eax,%0,4),%0\n"
21214@@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
21215
21216 __asm__ __volatile__(
21217 " .align 2,0x90\n"
21218- "0: movl 32(%4), %%eax\n"
21219+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
21220 " cmpl $67, %0\n"
21221 " jbe 2f\n"
21222- "1: movl 64(%4), %%eax\n"
21223+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
21224 " .align 2,0x90\n"
21225- "2: movl 0(%4), %%eax\n"
21226- "21: movl 4(%4), %%edx\n"
21227+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
21228+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
21229 " movnti %%eax, 0(%3)\n"
21230 " movnti %%edx, 4(%3)\n"
21231- "3: movl 8(%4), %%eax\n"
21232- "31: movl 12(%4),%%edx\n"
21233+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
21234+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
21235 " movnti %%eax, 8(%3)\n"
21236 " movnti %%edx, 12(%3)\n"
21237- "4: movl 16(%4), %%eax\n"
21238- "41: movl 20(%4), %%edx\n"
21239+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
21240+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
21241 " movnti %%eax, 16(%3)\n"
21242 " movnti %%edx, 20(%3)\n"
21243- "10: movl 24(%4), %%eax\n"
21244- "51: movl 28(%4), %%edx\n"
21245+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
21246+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
21247 " movnti %%eax, 24(%3)\n"
21248 " movnti %%edx, 28(%3)\n"
21249- "11: movl 32(%4), %%eax\n"
21250- "61: movl 36(%4), %%edx\n"
21251+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
21252+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
21253 " movnti %%eax, 32(%3)\n"
21254 " movnti %%edx, 36(%3)\n"
21255- "12: movl 40(%4), %%eax\n"
21256- "71: movl 44(%4), %%edx\n"
21257+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
21258+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
21259 " movnti %%eax, 40(%3)\n"
21260 " movnti %%edx, 44(%3)\n"
21261- "13: movl 48(%4), %%eax\n"
21262- "81: movl 52(%4), %%edx\n"
21263+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
21264+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
21265 " movnti %%eax, 48(%3)\n"
21266 " movnti %%edx, 52(%3)\n"
21267- "14: movl 56(%4), %%eax\n"
21268- "91: movl 60(%4), %%edx\n"
21269+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
21270+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
21271 " movnti %%eax, 56(%3)\n"
21272 " movnti %%edx, 60(%3)\n"
21273 " addl $-64, %0\n"
21274@@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
21275 " shrl $2, %0\n"
21276 " andl $3, %%eax\n"
21277 " cld\n"
21278- "6: rep; movsl\n"
21279+ "6: rep; "__copyuser_seg" movsl\n"
21280 " movl %%eax,%0\n"
21281- "7: rep; movsb\n"
21282+ "7: rep; "__copyuser_seg" movsb\n"
21283 "8:\n"
21284 ".section .fixup,\"ax\"\n"
21285 "9: lea 0(%%eax,%0,4),%0\n"
21286@@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
21287
21288 __asm__ __volatile__(
21289 " .align 2,0x90\n"
21290- "0: movl 32(%4), %%eax\n"
21291+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
21292 " cmpl $67, %0\n"
21293 " jbe 2f\n"
21294- "1: movl 64(%4), %%eax\n"
21295+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
21296 " .align 2,0x90\n"
21297- "2: movl 0(%4), %%eax\n"
21298- "21: movl 4(%4), %%edx\n"
21299+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
21300+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
21301 " movnti %%eax, 0(%3)\n"
21302 " movnti %%edx, 4(%3)\n"
21303- "3: movl 8(%4), %%eax\n"
21304- "31: movl 12(%4),%%edx\n"
21305+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
21306+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
21307 " movnti %%eax, 8(%3)\n"
21308 " movnti %%edx, 12(%3)\n"
21309- "4: movl 16(%4), %%eax\n"
21310- "41: movl 20(%4), %%edx\n"
21311+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
21312+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
21313 " movnti %%eax, 16(%3)\n"
21314 " movnti %%edx, 20(%3)\n"
21315- "10: movl 24(%4), %%eax\n"
21316- "51: movl 28(%4), %%edx\n"
21317+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
21318+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
21319 " movnti %%eax, 24(%3)\n"
21320 " movnti %%edx, 28(%3)\n"
21321- "11: movl 32(%4), %%eax\n"
21322- "61: movl 36(%4), %%edx\n"
21323+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
21324+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
21325 " movnti %%eax, 32(%3)\n"
21326 " movnti %%edx, 36(%3)\n"
21327- "12: movl 40(%4), %%eax\n"
21328- "71: movl 44(%4), %%edx\n"
21329+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
21330+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
21331 " movnti %%eax, 40(%3)\n"
21332 " movnti %%edx, 44(%3)\n"
21333- "13: movl 48(%4), %%eax\n"
21334- "81: movl 52(%4), %%edx\n"
21335+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
21336+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
21337 " movnti %%eax, 48(%3)\n"
21338 " movnti %%edx, 52(%3)\n"
21339- "14: movl 56(%4), %%eax\n"
21340- "91: movl 60(%4), %%edx\n"
21341+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
21342+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
21343 " movnti %%eax, 56(%3)\n"
21344 " movnti %%edx, 60(%3)\n"
21345 " addl $-64, %0\n"
21346@@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
21347 " shrl $2, %0\n"
21348 " andl $3, %%eax\n"
21349 " cld\n"
21350- "6: rep; movsl\n"
21351+ "6: rep; "__copyuser_seg" movsl\n"
21352 " movl %%eax,%0\n"
21353- "7: rep; movsb\n"
21354+ "7: rep; "__copyuser_seg" movsb\n"
21355 "8:\n"
21356 ".section .fixup,\"ax\"\n"
21357 "9: lea 0(%%eax,%0,4),%0\n"
21358@@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
21359 */
21360 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
21361 unsigned long size);
21362-unsigned long __copy_user_intel(void __user *to, const void *from,
21363+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
21364+ unsigned long size);
21365+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
21366 unsigned long size);
21367 unsigned long __copy_user_zeroing_intel_nocache(void *to,
21368 const void __user *from, unsigned long size);
21369 #endif /* CONFIG_X86_INTEL_USERCOPY */
21370
21371 /* Generic arbitrary sized copy. */
21372-#define __copy_user(to, from, size) \
21373+#define __copy_user(to, from, size, prefix, set, restore) \
21374 do { \
21375 int __d0, __d1, __d2; \
21376 __asm__ __volatile__( \
21377+ set \
21378 " cmp $7,%0\n" \
21379 " jbe 1f\n" \
21380 " movl %1,%0\n" \
21381 " negl %0\n" \
21382 " andl $7,%0\n" \
21383 " subl %0,%3\n" \
21384- "4: rep; movsb\n" \
21385+ "4: rep; "prefix"movsb\n" \
21386 " movl %3,%0\n" \
21387 " shrl $2,%0\n" \
21388 " andl $3,%3\n" \
21389 " .align 2,0x90\n" \
21390- "0: rep; movsl\n" \
21391+ "0: rep; "prefix"movsl\n" \
21392 " movl %3,%0\n" \
21393- "1: rep; movsb\n" \
21394+ "1: rep; "prefix"movsb\n" \
21395 "2:\n" \
21396+ restore \
21397 ".section .fixup,\"ax\"\n" \
21398 "5: addl %3,%0\n" \
21399 " jmp 2b\n" \
21400@@ -682,14 +799,14 @@ do { \
21401 " negl %0\n" \
21402 " andl $7,%0\n" \
21403 " subl %0,%3\n" \
21404- "4: rep; movsb\n" \
21405+ "4: rep; "__copyuser_seg"movsb\n" \
21406 " movl %3,%0\n" \
21407 " shrl $2,%0\n" \
21408 " andl $3,%3\n" \
21409 " .align 2,0x90\n" \
21410- "0: rep; movsl\n" \
21411+ "0: rep; "__copyuser_seg"movsl\n" \
21412 " movl %3,%0\n" \
21413- "1: rep; movsb\n" \
21414+ "1: rep; "__copyuser_seg"movsb\n" \
21415 "2:\n" \
21416 ".section .fixup,\"ax\"\n" \
21417 "5: addl %3,%0\n" \
21418@@ -775,9 +892,9 @@ survive:
21419 }
21420 #endif
21421 if (movsl_is_ok(to, from, n))
21422- __copy_user(to, from, n);
21423+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
21424 else
21425- n = __copy_user_intel(to, from, n);
21426+ n = __generic_copy_to_user_intel(to, from, n);
21427 return n;
21428 }
21429 EXPORT_SYMBOL(__copy_to_user_ll);
21430@@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
21431 unsigned long n)
21432 {
21433 if (movsl_is_ok(to, from, n))
21434- __copy_user(to, from, n);
21435+ __copy_user(to, from, n, __copyuser_seg, "", "");
21436 else
21437- n = __copy_user_intel((void __user *)to,
21438- (const void *)from, n);
21439+ n = __generic_copy_from_user_intel(to, from, n);
21440 return n;
21441 }
21442 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
21443@@ -827,59 +943,38 @@ unsigned long __copy_from_user_ll_nocach
21444 if (n > 64 && cpu_has_xmm2)
21445 n = __copy_user_intel_nocache(to, from, n);
21446 else
21447- __copy_user(to, from, n);
21448+ __copy_user(to, from, n, __copyuser_seg, "", "");
21449 #else
21450- __copy_user(to, from, n);
21451+ __copy_user(to, from, n, __copyuser_seg, "", "");
21452 #endif
21453 return n;
21454 }
21455 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
21456
21457-/**
21458- * copy_to_user: - Copy a block of data into user space.
21459- * @to: Destination address, in user space.
21460- * @from: Source address, in kernel space.
21461- * @n: Number of bytes to copy.
21462- *
21463- * Context: User context only. This function may sleep.
21464- *
21465- * Copy data from kernel space to user space.
21466- *
21467- * Returns number of bytes that could not be copied.
21468- * On success, this will be zero.
21469- */
21470-unsigned long
21471-copy_to_user(void __user *to, const void *from, unsigned long n)
21472+#ifdef CONFIG_PAX_MEMORY_UDEREF
21473+void __set_fs(mm_segment_t x)
21474 {
21475- if (access_ok(VERIFY_WRITE, to, n))
21476- n = __copy_to_user(to, from, n);
21477- return n;
21478+ switch (x.seg) {
21479+ case 0:
21480+ loadsegment(gs, 0);
21481+ break;
21482+ case TASK_SIZE_MAX:
21483+ loadsegment(gs, __USER_DS);
21484+ break;
21485+ case -1UL:
21486+ loadsegment(gs, __KERNEL_DS);
21487+ break;
21488+ default:
21489+ BUG();
21490+ }
21491+ return;
21492 }
21493-EXPORT_SYMBOL(copy_to_user);
21494+EXPORT_SYMBOL(__set_fs);
21495
21496-/**
21497- * copy_from_user: - Copy a block of data from user space.
21498- * @to: Destination address, in kernel space.
21499- * @from: Source address, in user space.
21500- * @n: Number of bytes to copy.
21501- *
21502- * Context: User context only. This function may sleep.
21503- *
21504- * Copy data from user space to kernel space.
21505- *
21506- * Returns number of bytes that could not be copied.
21507- * On success, this will be zero.
21508- *
21509- * If some data could not be copied, this function will pad the copied
21510- * data to the requested size using zero bytes.
21511- */
21512-unsigned long
21513-copy_from_user(void *to, const void __user *from, unsigned long n)
21514+void set_fs(mm_segment_t x)
21515 {
21516- if (access_ok(VERIFY_READ, from, n))
21517- n = __copy_from_user(to, from, n);
21518- else
21519- memset(to, 0, n);
21520- return n;
21521+ current_thread_info()->addr_limit = x;
21522+ __set_fs(x);
21523 }
21524-EXPORT_SYMBOL(copy_from_user);
21525+EXPORT_SYMBOL(set_fs);
21526+#endif
21527diff -urNp linux-2.6.32.48/arch/x86/lib/usercopy_64.c linux-2.6.32.48/arch/x86/lib/usercopy_64.c
21528--- linux-2.6.32.48/arch/x86/lib/usercopy_64.c 2011-11-08 19:02:43.000000000 -0500
21529+++ linux-2.6.32.48/arch/x86/lib/usercopy_64.c 2011-11-15 19:59:43.000000000 -0500
21530@@ -42,6 +42,12 @@ long
21531 __strncpy_from_user(char *dst, const char __user *src, long count)
21532 {
21533 long res;
21534+
21535+#ifdef CONFIG_PAX_MEMORY_UDEREF
21536+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
21537+ src += PAX_USER_SHADOW_BASE;
21538+#endif
21539+
21540 __do_strncpy_from_user(dst, src, count, res);
21541 return res;
21542 }
21543@@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
21544 {
21545 long __d0;
21546 might_fault();
21547+
21548+#ifdef CONFIG_PAX_MEMORY_UDEREF
21549+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
21550+ addr += PAX_USER_SHADOW_BASE;
21551+#endif
21552+
21553 /* no memory constraint because it doesn't change any memory gcc knows
21554 about */
21555 asm volatile(
21556@@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
21557
21558 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
21559 {
21560- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
21561- return copy_user_generic((__force void *)to, (__force void *)from, len);
21562- }
21563- return len;
21564+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
21565+
21566+#ifdef CONFIG_PAX_MEMORY_UDEREF
21567+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
21568+ to += PAX_USER_SHADOW_BASE;
21569+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
21570+ from += PAX_USER_SHADOW_BASE;
21571+#endif
21572+
21573+ return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
21574+ }
21575+ return len;
21576 }
21577 EXPORT_SYMBOL(copy_in_user);
21578
21579@@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
21580 * it is not necessary to optimize tail handling.
21581 */
21582 unsigned long
21583-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
21584+copy_user_handle_tail(char __user *to, char __user *from, unsigned len, unsigned zerorest)
21585 {
21586 char c;
21587 unsigned zero_len;
21588diff -urNp linux-2.6.32.48/arch/x86/Makefile linux-2.6.32.48/arch/x86/Makefile
21589--- linux-2.6.32.48/arch/x86/Makefile 2011-11-08 19:02:43.000000000 -0500
21590+++ linux-2.6.32.48/arch/x86/Makefile 2011-11-15 19:59:43.000000000 -0500
21591@@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
21592 else
21593 BITS := 64
21594 UTS_MACHINE := x86_64
21595+ biarch := $(call cc-option,-m64)
21596 CHECKFLAGS += -D__x86_64__ -m64
21597
21598 KBUILD_AFLAGS += -m64
21599@@ -189,3 +190,12 @@ define archhelp
21600 echo ' FDARGS="..." arguments for the booted kernel'
21601 echo ' FDINITRD=file initrd for the booted kernel'
21602 endef
21603+
21604+define OLD_LD
21605+
21606+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
21607+*** Please upgrade your binutils to 2.18 or newer
21608+endef
21609+
21610+archprepare:
21611+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
21612diff -urNp linux-2.6.32.48/arch/x86/mm/extable.c linux-2.6.32.48/arch/x86/mm/extable.c
21613--- linux-2.6.32.48/arch/x86/mm/extable.c 2011-11-08 19:02:43.000000000 -0500
21614+++ linux-2.6.32.48/arch/x86/mm/extable.c 2011-11-15 19:59:43.000000000 -0500
21615@@ -1,14 +1,71 @@
21616 #include <linux/module.h>
21617 #include <linux/spinlock.h>
21618+#include <linux/sort.h>
21619 #include <asm/uaccess.h>
21620+#include <asm/pgtable.h>
21621
21622+/*
21623+ * The exception table needs to be sorted so that the binary
21624+ * search that we use to find entries in it works properly.
21625+ * This is used both for the kernel exception table and for
21626+ * the exception tables of modules that get loaded.
21627+ */
21628+static int cmp_ex(const void *a, const void *b)
21629+{
21630+ const struct exception_table_entry *x = a, *y = b;
21631+
21632+ /* avoid overflow */
21633+ if (x->insn > y->insn)
21634+ return 1;
21635+ if (x->insn < y->insn)
21636+ return -1;
21637+ return 0;
21638+}
21639+
21640+static void swap_ex(void *a, void *b, int size)
21641+{
21642+ struct exception_table_entry t, *x = a, *y = b;
21643+
21644+ t = *x;
21645+
21646+ pax_open_kernel();
21647+ *x = *y;
21648+ *y = t;
21649+ pax_close_kernel();
21650+}
21651+
21652+void sort_extable(struct exception_table_entry *start,
21653+ struct exception_table_entry *finish)
21654+{
21655+ sort(start, finish - start, sizeof(struct exception_table_entry),
21656+ cmp_ex, swap_ex);
21657+}
21658+
21659+#ifdef CONFIG_MODULES
21660+/*
21661+ * If the exception table is sorted, any referring to the module init
21662+ * will be at the beginning or the end.
21663+ */
21664+void trim_init_extable(struct module *m)
21665+{
21666+ /*trim the beginning*/
21667+ while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
21668+ m->extable++;
21669+ m->num_exentries--;
21670+ }
21671+ /*trim the end*/
21672+ while (m->num_exentries &&
21673+ within_module_init(m->extable[m->num_exentries-1].insn, m))
21674+ m->num_exentries--;
21675+}
21676+#endif /* CONFIG_MODULES */
21677
21678 int fixup_exception(struct pt_regs *regs)
21679 {
21680 const struct exception_table_entry *fixup;
21681
21682 #ifdef CONFIG_PNPBIOS
21683- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
21684+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
21685 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
21686 extern u32 pnp_bios_is_utter_crap;
21687 pnp_bios_is_utter_crap = 1;
21688diff -urNp linux-2.6.32.48/arch/x86/mm/fault.c linux-2.6.32.48/arch/x86/mm/fault.c
21689--- linux-2.6.32.48/arch/x86/mm/fault.c 2011-11-08 19:02:43.000000000 -0500
21690+++ linux-2.6.32.48/arch/x86/mm/fault.c 2011-11-15 19:59:43.000000000 -0500
21691@@ -11,10 +11,19 @@
21692 #include <linux/kprobes.h> /* __kprobes, ... */
21693 #include <linux/mmiotrace.h> /* kmmio_handler, ... */
21694 #include <linux/perf_event.h> /* perf_sw_event */
21695+#include <linux/unistd.h>
21696+#include <linux/compiler.h>
21697
21698 #include <asm/traps.h> /* dotraplinkage, ... */
21699 #include <asm/pgalloc.h> /* pgd_*(), ... */
21700 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
21701+#include <asm/vsyscall.h>
21702+#include <asm/tlbflush.h>
21703+
21704+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21705+#include <asm/stacktrace.h>
21706+#include "../kernel/dumpstack.h"
21707+#endif
21708
21709 /*
21710 * Page fault error code bits:
21711@@ -51,7 +60,7 @@ static inline int notify_page_fault(stru
21712 int ret = 0;
21713
21714 /* kprobe_running() needs smp_processor_id() */
21715- if (kprobes_built_in() && !user_mode_vm(regs)) {
21716+ if (kprobes_built_in() && !user_mode(regs)) {
21717 preempt_disable();
21718 if (kprobe_running() && kprobe_fault_handler(regs, 14))
21719 ret = 1;
21720@@ -112,7 +121,10 @@ check_prefetch_opcode(struct pt_regs *re
21721 return !instr_lo || (instr_lo>>1) == 1;
21722 case 0x00:
21723 /* Prefetch instruction is 0x0F0D or 0x0F18 */
21724- if (probe_kernel_address(instr, opcode))
21725+ if (user_mode(regs)) {
21726+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
21727+ return 0;
21728+ } else if (probe_kernel_address(instr, opcode))
21729 return 0;
21730
21731 *prefetch = (instr_lo == 0xF) &&
21732@@ -146,7 +158,10 @@ is_prefetch(struct pt_regs *regs, unsign
21733 while (instr < max_instr) {
21734 unsigned char opcode;
21735
21736- if (probe_kernel_address(instr, opcode))
21737+ if (user_mode(regs)) {
21738+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
21739+ break;
21740+ } else if (probe_kernel_address(instr, opcode))
21741 break;
21742
21743 instr++;
21744@@ -172,6 +187,30 @@ force_sig_info_fault(int si_signo, int s
21745 force_sig_info(si_signo, &info, tsk);
21746 }
21747
21748+#ifdef CONFIG_PAX_EMUTRAMP
21749+static int pax_handle_fetch_fault(struct pt_regs *regs);
21750+#endif
21751+
21752+#ifdef CONFIG_PAX_PAGEEXEC
21753+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
21754+{
21755+ pgd_t *pgd;
21756+ pud_t *pud;
21757+ pmd_t *pmd;
21758+
21759+ pgd = pgd_offset(mm, address);
21760+ if (!pgd_present(*pgd))
21761+ return NULL;
21762+ pud = pud_offset(pgd, address);
21763+ if (!pud_present(*pud))
21764+ return NULL;
21765+ pmd = pmd_offset(pud, address);
21766+ if (!pmd_present(*pmd))
21767+ return NULL;
21768+ return pmd;
21769+}
21770+#endif
21771+
21772 DEFINE_SPINLOCK(pgd_lock);
21773 LIST_HEAD(pgd_list);
21774
21775@@ -224,11 +263,24 @@ void vmalloc_sync_all(void)
21776 address += PMD_SIZE) {
21777
21778 unsigned long flags;
21779+
21780+#ifdef CONFIG_PAX_PER_CPU_PGD
21781+ unsigned long cpu;
21782+#else
21783 struct page *page;
21784+#endif
21785
21786 spin_lock_irqsave(&pgd_lock, flags);
21787+
21788+#ifdef CONFIG_PAX_PER_CPU_PGD
21789+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
21790+ pgd_t *pgd = get_cpu_pgd(cpu);
21791+#else
21792 list_for_each_entry(page, &pgd_list, lru) {
21793- if (!vmalloc_sync_one(page_address(page), address))
21794+ pgd_t *pgd = page_address(page);
21795+#endif
21796+
21797+ if (!vmalloc_sync_one(pgd, address))
21798 break;
21799 }
21800 spin_unlock_irqrestore(&pgd_lock, flags);
21801@@ -258,6 +310,11 @@ static noinline int vmalloc_fault(unsign
21802 * an interrupt in the middle of a task switch..
21803 */
21804 pgd_paddr = read_cr3();
21805+
21806+#ifdef CONFIG_PAX_PER_CPU_PGD
21807+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
21808+#endif
21809+
21810 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
21811 if (!pmd_k)
21812 return -1;
21813@@ -332,15 +389,27 @@ void vmalloc_sync_all(void)
21814
21815 const pgd_t *pgd_ref = pgd_offset_k(address);
21816 unsigned long flags;
21817+
21818+#ifdef CONFIG_PAX_PER_CPU_PGD
21819+ unsigned long cpu;
21820+#else
21821 struct page *page;
21822+#endif
21823
21824 if (pgd_none(*pgd_ref))
21825 continue;
21826
21827 spin_lock_irqsave(&pgd_lock, flags);
21828+
21829+#ifdef CONFIG_PAX_PER_CPU_PGD
21830+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
21831+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
21832+#else
21833 list_for_each_entry(page, &pgd_list, lru) {
21834 pgd_t *pgd;
21835 pgd = (pgd_t *)page_address(page) + pgd_index(address);
21836+#endif
21837+
21838 if (pgd_none(*pgd))
21839 set_pgd(pgd, *pgd_ref);
21840 else
21841@@ -373,7 +442,14 @@ static noinline int vmalloc_fault(unsign
21842 * happen within a race in page table update. In the later
21843 * case just flush:
21844 */
21845+
21846+#ifdef CONFIG_PAX_PER_CPU_PGD
21847+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
21848+ pgd = pgd_offset_cpu(smp_processor_id(), address);
21849+#else
21850 pgd = pgd_offset(current->active_mm, address);
21851+#endif
21852+
21853 pgd_ref = pgd_offset_k(address);
21854 if (pgd_none(*pgd_ref))
21855 return -1;
21856@@ -535,7 +611,7 @@ static int is_errata93(struct pt_regs *r
21857 static int is_errata100(struct pt_regs *regs, unsigned long address)
21858 {
21859 #ifdef CONFIG_X86_64
21860- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
21861+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
21862 return 1;
21863 #endif
21864 return 0;
21865@@ -562,7 +638,7 @@ static int is_f00f_bug(struct pt_regs *r
21866 }
21867
21868 static const char nx_warning[] = KERN_CRIT
21869-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
21870+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
21871
21872 static void
21873 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
21874@@ -571,15 +647,26 @@ show_fault_oops(struct pt_regs *regs, un
21875 if (!oops_may_print())
21876 return;
21877
21878- if (error_code & PF_INSTR) {
21879+ if (nx_enabled && (error_code & PF_INSTR)) {
21880 unsigned int level;
21881
21882 pte_t *pte = lookup_address(address, &level);
21883
21884 if (pte && pte_present(*pte) && !pte_exec(*pte))
21885- printk(nx_warning, current_uid());
21886+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
21887 }
21888
21889+#ifdef CONFIG_PAX_KERNEXEC
21890+ if (init_mm.start_code <= address && address < init_mm.end_code) {
21891+ if (current->signal->curr_ip)
21892+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
21893+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
21894+ else
21895+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
21896+ current->comm, task_pid_nr(current), current_uid(), current_euid());
21897+ }
21898+#endif
21899+
21900 printk(KERN_ALERT "BUG: unable to handle kernel ");
21901 if (address < PAGE_SIZE)
21902 printk(KERN_CONT "NULL pointer dereference");
21903@@ -704,6 +791,70 @@ __bad_area_nosemaphore(struct pt_regs *r
21904 unsigned long address, int si_code)
21905 {
21906 struct task_struct *tsk = current;
21907+#if defined(CONFIG_X86_64) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21908+ struct mm_struct *mm = tsk->mm;
21909+#endif
21910+
21911+#ifdef CONFIG_X86_64
21912+ if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
21913+ if (regs->ip == (unsigned long)vgettimeofday) {
21914+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
21915+ return;
21916+ } else if (regs->ip == (unsigned long)vtime) {
21917+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
21918+ return;
21919+ } else if (regs->ip == (unsigned long)vgetcpu) {
21920+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
21921+ return;
21922+ }
21923+ }
21924+#endif
21925+
21926+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21927+ if (mm && (error_code & PF_USER)) {
21928+ unsigned long ip = regs->ip;
21929+
21930+ if (v8086_mode(regs))
21931+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
21932+
21933+ /*
21934+ * It's possible to have interrupts off here:
21935+ */
21936+ local_irq_enable();
21937+
21938+#ifdef CONFIG_PAX_PAGEEXEC
21939+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) &&
21940+ ((nx_enabled && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && ip == address))) {
21941+
21942+#ifdef CONFIG_PAX_EMUTRAMP
21943+ switch (pax_handle_fetch_fault(regs)) {
21944+ case 2:
21945+ return;
21946+ }
21947+#endif
21948+
21949+ pax_report_fault(regs, (void *)ip, (void *)regs->sp);
21950+ do_group_exit(SIGKILL);
21951+ }
21952+#endif
21953+
21954+#ifdef CONFIG_PAX_SEGMEXEC
21955+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) {
21956+
21957+#ifdef CONFIG_PAX_EMUTRAMP
21958+ switch (pax_handle_fetch_fault(regs)) {
21959+ case 2:
21960+ return;
21961+ }
21962+#endif
21963+
21964+ pax_report_fault(regs, (void *)ip, (void *)regs->sp);
21965+ do_group_exit(SIGKILL);
21966+ }
21967+#endif
21968+
21969+ }
21970+#endif
21971
21972 /* User mode accesses just cause a SIGSEGV */
21973 if (error_code & PF_USER) {
21974@@ -857,6 +1008,99 @@ static int spurious_fault_check(unsigned
21975 return 1;
21976 }
21977
21978+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21979+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
21980+{
21981+ pte_t *pte;
21982+ pmd_t *pmd;
21983+ spinlock_t *ptl;
21984+ unsigned char pte_mask;
21985+
21986+ if (nx_enabled || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
21987+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
21988+ return 0;
21989+
21990+ /* PaX: it's our fault, let's handle it if we can */
21991+
21992+ /* PaX: take a look at read faults before acquiring any locks */
21993+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
21994+ /* instruction fetch attempt from a protected page in user mode */
21995+ up_read(&mm->mmap_sem);
21996+
21997+#ifdef CONFIG_PAX_EMUTRAMP
21998+ switch (pax_handle_fetch_fault(regs)) {
21999+ case 2:
22000+ return 1;
22001+ }
22002+#endif
22003+
22004+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
22005+ do_group_exit(SIGKILL);
22006+ }
22007+
22008+ pmd = pax_get_pmd(mm, address);
22009+ if (unlikely(!pmd))
22010+ return 0;
22011+
22012+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
22013+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
22014+ pte_unmap_unlock(pte, ptl);
22015+ return 0;
22016+ }
22017+
22018+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
22019+ /* write attempt to a protected page in user mode */
22020+ pte_unmap_unlock(pte, ptl);
22021+ return 0;
22022+ }
22023+
22024+#ifdef CONFIG_SMP
22025+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
22026+#else
22027+ if (likely(address > get_limit(regs->cs)))
22028+#endif
22029+ {
22030+ set_pte(pte, pte_mkread(*pte));
22031+ __flush_tlb_one(address);
22032+ pte_unmap_unlock(pte, ptl);
22033+ up_read(&mm->mmap_sem);
22034+ return 1;
22035+ }
22036+
22037+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
22038+
22039+ /*
22040+ * PaX: fill DTLB with user rights and retry
22041+ */
22042+ __asm__ __volatile__ (
22043+ "orb %2,(%1)\n"
22044+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
22045+/*
22046+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
22047+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
22048+ * page fault when examined during a TLB load attempt. this is true not only
22049+ * for PTEs holding a non-present entry but also present entries that will
22050+ * raise a page fault (such as those set up by PaX, or the copy-on-write
22051+ * mechanism). in effect it means that we do *not* need to flush the TLBs
22052+ * for our target pages since their PTEs are simply not in the TLBs at all.
22053+
22054+ * the best thing in omitting it is that we gain around 15-20% speed in the
22055+ * fast path of the page fault handler and can get rid of tracing since we
22056+ * can no longer flush unintended entries.
22057+ */
22058+ "invlpg (%0)\n"
22059+#endif
22060+ __copyuser_seg"testb $0,(%0)\n"
22061+ "xorb %3,(%1)\n"
22062+ :
22063+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
22064+ : "memory", "cc");
22065+ pte_unmap_unlock(pte, ptl);
22066+ up_read(&mm->mmap_sem);
22067+ return 1;
22068+}
22069+#endif
22070+
22071 /*
22072 * Handle a spurious fault caused by a stale TLB entry.
22073 *
22074@@ -923,6 +1167,9 @@ int show_unhandled_signals = 1;
22075 static inline int
22076 access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
22077 {
22078+ if (nx_enabled && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
22079+ return 1;
22080+
22081 if (write) {
22082 /* write, present and write, not present: */
22083 if (unlikely(!(vma->vm_flags & VM_WRITE)))
22084@@ -956,17 +1203,31 @@ do_page_fault(struct pt_regs *regs, unsi
22085 {
22086 struct vm_area_struct *vma;
22087 struct task_struct *tsk;
22088- unsigned long address;
22089 struct mm_struct *mm;
22090 int write;
22091 int fault;
22092
22093+ /* Get the faulting address: */
22094+ unsigned long address = read_cr2();
22095+
22096+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22097+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
22098+ if (!search_exception_tables(regs->ip)) {
22099+ bad_area_nosemaphore(regs, error_code, address);
22100+ return;
22101+ }
22102+ if (address < PAX_USER_SHADOW_BASE) {
22103+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
22104+ printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
22105+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
22106+ } else
22107+ address -= PAX_USER_SHADOW_BASE;
22108+ }
22109+#endif
22110+
22111 tsk = current;
22112 mm = tsk->mm;
22113
22114- /* Get the faulting address: */
22115- address = read_cr2();
22116-
22117 /*
22118 * Detect and handle instructions that would cause a page fault for
22119 * both a tracked kernel page and a userspace page.
22120@@ -1026,7 +1287,7 @@ do_page_fault(struct pt_regs *regs, unsi
22121 * User-mode registers count as a user access even for any
22122 * potential system fault or CPU buglet:
22123 */
22124- if (user_mode_vm(regs)) {
22125+ if (user_mode(regs)) {
22126 local_irq_enable();
22127 error_code |= PF_USER;
22128 } else {
22129@@ -1080,6 +1341,11 @@ do_page_fault(struct pt_regs *regs, unsi
22130 might_sleep();
22131 }
22132
22133+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
22134+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
22135+ return;
22136+#endif
22137+
22138 vma = find_vma(mm, address);
22139 if (unlikely(!vma)) {
22140 bad_area(regs, error_code, address);
22141@@ -1091,18 +1357,24 @@ do_page_fault(struct pt_regs *regs, unsi
22142 bad_area(regs, error_code, address);
22143 return;
22144 }
22145- if (error_code & PF_USER) {
22146- /*
22147- * Accessing the stack below %sp is always a bug.
22148- * The large cushion allows instructions like enter
22149- * and pusha to work. ("enter $65535, $31" pushes
22150- * 32 pointers and then decrements %sp by 65535.)
22151- */
22152- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
22153- bad_area(regs, error_code, address);
22154- return;
22155- }
22156+ /*
22157+ * Accessing the stack below %sp is always a bug.
22158+ * The large cushion allows instructions like enter
22159+ * and pusha to work. ("enter $65535, $31" pushes
22160+ * 32 pointers and then decrements %sp by 65535.)
22161+ */
22162+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
22163+ bad_area(regs, error_code, address);
22164+ return;
22165 }
22166+
22167+#ifdef CONFIG_PAX_SEGMEXEC
22168+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
22169+ bad_area(regs, error_code, address);
22170+ return;
22171+ }
22172+#endif
22173+
22174 if (unlikely(expand_stack(vma, address))) {
22175 bad_area(regs, error_code, address);
22176 return;
22177@@ -1146,3 +1418,199 @@ good_area:
22178
22179 up_read(&mm->mmap_sem);
22180 }
22181+
22182+#ifdef CONFIG_PAX_EMUTRAMP
22183+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
22184+{
22185+ int err;
22186+
22187+ do { /* PaX: gcc trampoline emulation #1 */
22188+ unsigned char mov1, mov2;
22189+ unsigned short jmp;
22190+ unsigned int addr1, addr2;
22191+
22192+#ifdef CONFIG_X86_64
22193+ if ((regs->ip + 11) >> 32)
22194+ break;
22195+#endif
22196+
22197+ err = get_user(mov1, (unsigned char __user *)regs->ip);
22198+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
22199+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
22200+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
22201+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
22202+
22203+ if (err)
22204+ break;
22205+
22206+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
22207+ regs->cx = addr1;
22208+ regs->ax = addr2;
22209+ regs->ip = addr2;
22210+ return 2;
22211+ }
22212+ } while (0);
22213+
22214+ do { /* PaX: gcc trampoline emulation #2 */
22215+ unsigned char mov, jmp;
22216+ unsigned int addr1, addr2;
22217+
22218+#ifdef CONFIG_X86_64
22219+ if ((regs->ip + 9) >> 32)
22220+ break;
22221+#endif
22222+
22223+ err = get_user(mov, (unsigned char __user *)regs->ip);
22224+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
22225+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
22226+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
22227+
22228+ if (err)
22229+ break;
22230+
22231+ if (mov == 0xB9 && jmp == 0xE9) {
22232+ regs->cx = addr1;
22233+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
22234+ return 2;
22235+ }
22236+ } while (0);
22237+
22238+ return 1; /* PaX in action */
22239+}
22240+
22241+#ifdef CONFIG_X86_64
22242+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
22243+{
22244+ int err;
22245+
22246+ do { /* PaX: gcc trampoline emulation #1 */
22247+ unsigned short mov1, mov2, jmp1;
22248+ unsigned char jmp2;
22249+ unsigned int addr1;
22250+ unsigned long addr2;
22251+
22252+ err = get_user(mov1, (unsigned short __user *)regs->ip);
22253+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
22254+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
22255+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
22256+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
22257+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
22258+
22259+ if (err)
22260+ break;
22261+
22262+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
22263+ regs->r11 = addr1;
22264+ regs->r10 = addr2;
22265+ regs->ip = addr1;
22266+ return 2;
22267+ }
22268+ } while (0);
22269+
22270+ do { /* PaX: gcc trampoline emulation #2 */
22271+ unsigned short mov1, mov2, jmp1;
22272+ unsigned char jmp2;
22273+ unsigned long addr1, addr2;
22274+
22275+ err = get_user(mov1, (unsigned short __user *)regs->ip);
22276+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
22277+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
22278+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
22279+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
22280+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
22281+
22282+ if (err)
22283+ break;
22284+
22285+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
22286+ regs->r11 = addr1;
22287+ regs->r10 = addr2;
22288+ regs->ip = addr1;
22289+ return 2;
22290+ }
22291+ } while (0);
22292+
22293+ return 1; /* PaX in action */
22294+}
22295+#endif
22296+
22297+/*
22298+ * PaX: decide what to do with offenders (regs->ip = fault address)
22299+ *
22300+ * returns 1 when task should be killed
22301+ * 2 when gcc trampoline was detected
22302+ */
22303+static int pax_handle_fetch_fault(struct pt_regs *regs)
22304+{
22305+ if (v8086_mode(regs))
22306+ return 1;
22307+
22308+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
22309+ return 1;
22310+
22311+#ifdef CONFIG_X86_32
22312+ return pax_handle_fetch_fault_32(regs);
22313+#else
22314+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
22315+ return pax_handle_fetch_fault_32(regs);
22316+ else
22317+ return pax_handle_fetch_fault_64(regs);
22318+#endif
22319+}
22320+#endif
22321+
22322+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
22323+void pax_report_insns(void *pc, void *sp)
22324+{
22325+ long i;
22326+
22327+ printk(KERN_ERR "PAX: bytes at PC: ");
22328+ for (i = 0; i < 20; i++) {
22329+ unsigned char c;
22330+ if (get_user(c, (unsigned char __force_user *)pc+i))
22331+ printk(KERN_CONT "?? ");
22332+ else
22333+ printk(KERN_CONT "%02x ", c);
22334+ }
22335+ printk("\n");
22336+
22337+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
22338+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
22339+ unsigned long c;
22340+ if (get_user(c, (unsigned long __force_user *)sp+i))
22341+#ifdef CONFIG_X86_32
22342+ printk(KERN_CONT "???????? ");
22343+#else
22344+ printk(KERN_CONT "???????????????? ");
22345+#endif
22346+ else
22347+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
22348+ }
22349+ printk("\n");
22350+}
22351+#endif
22352+
22353+/**
22354+ * probe_kernel_write(): safely attempt to write to a location
22355+ * @dst: address to write to
22356+ * @src: pointer to the data that shall be written
22357+ * @size: size of the data chunk
22358+ *
22359+ * Safely write to address @dst from the buffer at @src. If a kernel fault
22360+ * happens, handle that and return -EFAULT.
22361+ */
22362+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
22363+{
22364+ long ret;
22365+ mm_segment_t old_fs = get_fs();
22366+
22367+ set_fs(KERNEL_DS);
22368+ pagefault_disable();
22369+ pax_open_kernel();
22370+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
22371+ pax_close_kernel();
22372+ pagefault_enable();
22373+ set_fs(old_fs);
22374+
22375+ return ret ? -EFAULT : 0;
22376+}
22377diff -urNp linux-2.6.32.48/arch/x86/mm/gup.c linux-2.6.32.48/arch/x86/mm/gup.c
22378--- linux-2.6.32.48/arch/x86/mm/gup.c 2011-11-08 19:02:43.000000000 -0500
22379+++ linux-2.6.32.48/arch/x86/mm/gup.c 2011-11-15 19:59:43.000000000 -0500
22380@@ -237,7 +237,7 @@ int __get_user_pages_fast(unsigned long
22381 addr = start;
22382 len = (unsigned long) nr_pages << PAGE_SHIFT;
22383 end = start + len;
22384- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
22385+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
22386 (void __user *)start, len)))
22387 return 0;
22388
22389diff -urNp linux-2.6.32.48/arch/x86/mm/highmem_32.c linux-2.6.32.48/arch/x86/mm/highmem_32.c
22390--- linux-2.6.32.48/arch/x86/mm/highmem_32.c 2011-11-08 19:02:43.000000000 -0500
22391+++ linux-2.6.32.48/arch/x86/mm/highmem_32.c 2011-11-15 19:59:43.000000000 -0500
22392@@ -43,7 +43,10 @@ void *kmap_atomic_prot(struct page *page
22393 idx = type + KM_TYPE_NR*smp_processor_id();
22394 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
22395 BUG_ON(!pte_none(*(kmap_pte-idx)));
22396+
22397+ pax_open_kernel();
22398 set_pte(kmap_pte-idx, mk_pte(page, prot));
22399+ pax_close_kernel();
22400
22401 return (void *)vaddr;
22402 }
22403diff -urNp linux-2.6.32.48/arch/x86/mm/hugetlbpage.c linux-2.6.32.48/arch/x86/mm/hugetlbpage.c
22404--- linux-2.6.32.48/arch/x86/mm/hugetlbpage.c 2011-11-08 19:02:43.000000000 -0500
22405+++ linux-2.6.32.48/arch/x86/mm/hugetlbpage.c 2011-11-15 19:59:43.000000000 -0500
22406@@ -267,13 +267,20 @@ static unsigned long hugetlb_get_unmappe
22407 struct hstate *h = hstate_file(file);
22408 struct mm_struct *mm = current->mm;
22409 struct vm_area_struct *vma;
22410- unsigned long start_addr;
22411+ unsigned long start_addr, pax_task_size = TASK_SIZE;
22412+
22413+#ifdef CONFIG_PAX_SEGMEXEC
22414+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22415+ pax_task_size = SEGMEXEC_TASK_SIZE;
22416+#endif
22417+
22418+ pax_task_size -= PAGE_SIZE;
22419
22420 if (len > mm->cached_hole_size) {
22421- start_addr = mm->free_area_cache;
22422+ start_addr = mm->free_area_cache;
22423 } else {
22424- start_addr = TASK_UNMAPPED_BASE;
22425- mm->cached_hole_size = 0;
22426+ start_addr = mm->mmap_base;
22427+ mm->cached_hole_size = 0;
22428 }
22429
22430 full_search:
22431@@ -281,26 +288,27 @@ full_search:
22432
22433 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
22434 /* At this point: (!vma || addr < vma->vm_end). */
22435- if (TASK_SIZE - len < addr) {
22436+ if (pax_task_size - len < addr) {
22437 /*
22438 * Start a new search - just in case we missed
22439 * some holes.
22440 */
22441- if (start_addr != TASK_UNMAPPED_BASE) {
22442- start_addr = TASK_UNMAPPED_BASE;
22443+ if (start_addr != mm->mmap_base) {
22444+ start_addr = mm->mmap_base;
22445 mm->cached_hole_size = 0;
22446 goto full_search;
22447 }
22448 return -ENOMEM;
22449 }
22450- if (!vma || addr + len <= vma->vm_start) {
22451- mm->free_area_cache = addr + len;
22452- return addr;
22453- }
22454+ if (check_heap_stack_gap(vma, addr, len))
22455+ break;
22456 if (addr + mm->cached_hole_size < vma->vm_start)
22457 mm->cached_hole_size = vma->vm_start - addr;
22458 addr = ALIGN(vma->vm_end, huge_page_size(h));
22459 }
22460+
22461+ mm->free_area_cache = addr + len;
22462+ return addr;
22463 }
22464
22465 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
22466@@ -309,10 +317,9 @@ static unsigned long hugetlb_get_unmappe
22467 {
22468 struct hstate *h = hstate_file(file);
22469 struct mm_struct *mm = current->mm;
22470- struct vm_area_struct *vma, *prev_vma;
22471- unsigned long base = mm->mmap_base, addr = addr0;
22472+ struct vm_area_struct *vma;
22473+ unsigned long base = mm->mmap_base, addr;
22474 unsigned long largest_hole = mm->cached_hole_size;
22475- int first_time = 1;
22476
22477 /* don't allow allocations above current base */
22478 if (mm->free_area_cache > base)
22479@@ -322,64 +329,63 @@ static unsigned long hugetlb_get_unmappe
22480 largest_hole = 0;
22481 mm->free_area_cache = base;
22482 }
22483-try_again:
22484+
22485 /* make sure it can fit in the remaining address space */
22486 if (mm->free_area_cache < len)
22487 goto fail;
22488
22489 /* either no address requested or cant fit in requested address hole */
22490- addr = (mm->free_area_cache - len) & huge_page_mask(h);
22491+ addr = (mm->free_area_cache - len);
22492 do {
22493+ addr &= huge_page_mask(h);
22494+ vma = find_vma(mm, addr);
22495 /*
22496 * Lookup failure means no vma is above this address,
22497 * i.e. return with success:
22498- */
22499- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
22500- return addr;
22501-
22502- /*
22503 * new region fits between prev_vma->vm_end and
22504 * vma->vm_start, use it:
22505 */
22506- if (addr + len <= vma->vm_start &&
22507- (!prev_vma || (addr >= prev_vma->vm_end))) {
22508+ if (check_heap_stack_gap(vma, addr, len)) {
22509 /* remember the address as a hint for next time */
22510- mm->cached_hole_size = largest_hole;
22511- return (mm->free_area_cache = addr);
22512- } else {
22513- /* pull free_area_cache down to the first hole */
22514- if (mm->free_area_cache == vma->vm_end) {
22515- mm->free_area_cache = vma->vm_start;
22516- mm->cached_hole_size = largest_hole;
22517- }
22518+ mm->cached_hole_size = largest_hole;
22519+ return (mm->free_area_cache = addr);
22520+ }
22521+ /* pull free_area_cache down to the first hole */
22522+ if (mm->free_area_cache == vma->vm_end) {
22523+ mm->free_area_cache = vma->vm_start;
22524+ mm->cached_hole_size = largest_hole;
22525 }
22526
22527 /* remember the largest hole we saw so far */
22528 if (addr + largest_hole < vma->vm_start)
22529- largest_hole = vma->vm_start - addr;
22530+ largest_hole = vma->vm_start - addr;
22531
22532 /* try just below the current vma->vm_start */
22533- addr = (vma->vm_start - len) & huge_page_mask(h);
22534- } while (len <= vma->vm_start);
22535+ addr = skip_heap_stack_gap(vma, len);
22536+ } while (!IS_ERR_VALUE(addr));
22537
22538 fail:
22539 /*
22540- * if hint left us with no space for the requested
22541- * mapping then try again:
22542- */
22543- if (first_time) {
22544- mm->free_area_cache = base;
22545- largest_hole = 0;
22546- first_time = 0;
22547- goto try_again;
22548- }
22549- /*
22550 * A failed mmap() very likely causes application failure,
22551 * so fall back to the bottom-up function here. This scenario
22552 * can happen with large stack limits and large mmap()
22553 * allocations.
22554 */
22555- mm->free_area_cache = TASK_UNMAPPED_BASE;
22556+
22557+#ifdef CONFIG_PAX_SEGMEXEC
22558+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22559+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
22560+ else
22561+#endif
22562+
22563+ mm->mmap_base = TASK_UNMAPPED_BASE;
22564+
22565+#ifdef CONFIG_PAX_RANDMMAP
22566+ if (mm->pax_flags & MF_PAX_RANDMMAP)
22567+ mm->mmap_base += mm->delta_mmap;
22568+#endif
22569+
22570+ mm->free_area_cache = mm->mmap_base;
22571 mm->cached_hole_size = ~0UL;
22572 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
22573 len, pgoff, flags);
22574@@ -387,6 +393,7 @@ fail:
22575 /*
22576 * Restore the topdown base:
22577 */
22578+ mm->mmap_base = base;
22579 mm->free_area_cache = base;
22580 mm->cached_hole_size = ~0UL;
22581
22582@@ -400,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *f
22583 struct hstate *h = hstate_file(file);
22584 struct mm_struct *mm = current->mm;
22585 struct vm_area_struct *vma;
22586+ unsigned long pax_task_size = TASK_SIZE;
22587
22588 if (len & ~huge_page_mask(h))
22589 return -EINVAL;
22590- if (len > TASK_SIZE)
22591+
22592+#ifdef CONFIG_PAX_SEGMEXEC
22593+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22594+ pax_task_size = SEGMEXEC_TASK_SIZE;
22595+#endif
22596+
22597+ pax_task_size -= PAGE_SIZE;
22598+
22599+ if (len > pax_task_size)
22600 return -ENOMEM;
22601
22602 if (flags & MAP_FIXED) {
22603@@ -415,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *f
22604 if (addr) {
22605 addr = ALIGN(addr, huge_page_size(h));
22606 vma = find_vma(mm, addr);
22607- if (TASK_SIZE - len >= addr &&
22608- (!vma || addr + len <= vma->vm_start))
22609+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
22610 return addr;
22611 }
22612 if (mm->get_unmapped_area == arch_get_unmapped_area)
22613diff -urNp linux-2.6.32.48/arch/x86/mm/init_32.c linux-2.6.32.48/arch/x86/mm/init_32.c
22614--- linux-2.6.32.48/arch/x86/mm/init_32.c 2011-11-08 19:02:43.000000000 -0500
22615+++ linux-2.6.32.48/arch/x86/mm/init_32.c 2011-11-15 19:59:43.000000000 -0500
22616@@ -72,36 +72,6 @@ static __init void *alloc_low_page(void)
22617 }
22618
22619 /*
22620- * Creates a middle page table and puts a pointer to it in the
22621- * given global directory entry. This only returns the gd entry
22622- * in non-PAE compilation mode, since the middle layer is folded.
22623- */
22624-static pmd_t * __init one_md_table_init(pgd_t *pgd)
22625-{
22626- pud_t *pud;
22627- pmd_t *pmd_table;
22628-
22629-#ifdef CONFIG_X86_PAE
22630- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
22631- if (after_bootmem)
22632- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
22633- else
22634- pmd_table = (pmd_t *)alloc_low_page();
22635- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
22636- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
22637- pud = pud_offset(pgd, 0);
22638- BUG_ON(pmd_table != pmd_offset(pud, 0));
22639-
22640- return pmd_table;
22641- }
22642-#endif
22643- pud = pud_offset(pgd, 0);
22644- pmd_table = pmd_offset(pud, 0);
22645-
22646- return pmd_table;
22647-}
22648-
22649-/*
22650 * Create a page table and place a pointer to it in a middle page
22651 * directory entry:
22652 */
22653@@ -121,13 +91,28 @@ static pte_t * __init one_page_table_ini
22654 page_table = (pte_t *)alloc_low_page();
22655
22656 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
22657+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
22658+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
22659+#else
22660 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
22661+#endif
22662 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
22663 }
22664
22665 return pte_offset_kernel(pmd, 0);
22666 }
22667
22668+static pmd_t * __init one_md_table_init(pgd_t *pgd)
22669+{
22670+ pud_t *pud;
22671+ pmd_t *pmd_table;
22672+
22673+ pud = pud_offset(pgd, 0);
22674+ pmd_table = pmd_offset(pud, 0);
22675+
22676+ return pmd_table;
22677+}
22678+
22679 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
22680 {
22681 int pgd_idx = pgd_index(vaddr);
22682@@ -201,6 +186,7 @@ page_table_range_init(unsigned long star
22683 int pgd_idx, pmd_idx;
22684 unsigned long vaddr;
22685 pgd_t *pgd;
22686+ pud_t *pud;
22687 pmd_t *pmd;
22688 pte_t *pte = NULL;
22689
22690@@ -210,8 +196,13 @@ page_table_range_init(unsigned long star
22691 pgd = pgd_base + pgd_idx;
22692
22693 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
22694- pmd = one_md_table_init(pgd);
22695- pmd = pmd + pmd_index(vaddr);
22696+ pud = pud_offset(pgd, vaddr);
22697+ pmd = pmd_offset(pud, vaddr);
22698+
22699+#ifdef CONFIG_X86_PAE
22700+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
22701+#endif
22702+
22703 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
22704 pmd++, pmd_idx++) {
22705 pte = page_table_kmap_check(one_page_table_init(pmd),
22706@@ -223,11 +214,20 @@ page_table_range_init(unsigned long star
22707 }
22708 }
22709
22710-static inline int is_kernel_text(unsigned long addr)
22711+static inline int is_kernel_text(unsigned long start, unsigned long end)
22712 {
22713- if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
22714- return 1;
22715- return 0;
22716+ if ((start > ktla_ktva((unsigned long)_etext) ||
22717+ end <= ktla_ktva((unsigned long)_stext)) &&
22718+ (start > ktla_ktva((unsigned long)_einittext) ||
22719+ end <= ktla_ktva((unsigned long)_sinittext)) &&
22720+
22721+#ifdef CONFIG_ACPI_SLEEP
22722+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
22723+#endif
22724+
22725+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
22726+ return 0;
22727+ return 1;
22728 }
22729
22730 /*
22731@@ -243,9 +243,10 @@ kernel_physical_mapping_init(unsigned lo
22732 int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
22733 unsigned long start_pfn, end_pfn;
22734 pgd_t *pgd_base = swapper_pg_dir;
22735- int pgd_idx, pmd_idx, pte_ofs;
22736+ unsigned int pgd_idx, pmd_idx, pte_ofs;
22737 unsigned long pfn;
22738 pgd_t *pgd;
22739+ pud_t *pud;
22740 pmd_t *pmd;
22741 pte_t *pte;
22742 unsigned pages_2m, pages_4k;
22743@@ -278,8 +279,13 @@ repeat:
22744 pfn = start_pfn;
22745 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
22746 pgd = pgd_base + pgd_idx;
22747- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
22748- pmd = one_md_table_init(pgd);
22749+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
22750+ pud = pud_offset(pgd, 0);
22751+ pmd = pmd_offset(pud, 0);
22752+
22753+#ifdef CONFIG_X86_PAE
22754+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
22755+#endif
22756
22757 if (pfn >= end_pfn)
22758 continue;
22759@@ -291,14 +297,13 @@ repeat:
22760 #endif
22761 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
22762 pmd++, pmd_idx++) {
22763- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
22764+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
22765
22766 /*
22767 * Map with big pages if possible, otherwise
22768 * create normal page tables:
22769 */
22770 if (use_pse) {
22771- unsigned int addr2;
22772 pgprot_t prot = PAGE_KERNEL_LARGE;
22773 /*
22774 * first pass will use the same initial
22775@@ -308,11 +313,7 @@ repeat:
22776 __pgprot(PTE_IDENT_ATTR |
22777 _PAGE_PSE);
22778
22779- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
22780- PAGE_OFFSET + PAGE_SIZE-1;
22781-
22782- if (is_kernel_text(addr) ||
22783- is_kernel_text(addr2))
22784+ if (is_kernel_text(address, address + PMD_SIZE))
22785 prot = PAGE_KERNEL_LARGE_EXEC;
22786
22787 pages_2m++;
22788@@ -329,7 +330,7 @@ repeat:
22789 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
22790 pte += pte_ofs;
22791 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
22792- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
22793+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
22794 pgprot_t prot = PAGE_KERNEL;
22795 /*
22796 * first pass will use the same initial
22797@@ -337,7 +338,7 @@ repeat:
22798 */
22799 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
22800
22801- if (is_kernel_text(addr))
22802+ if (is_kernel_text(address, address + PAGE_SIZE))
22803 prot = PAGE_KERNEL_EXEC;
22804
22805 pages_4k++;
22806@@ -489,7 +490,7 @@ void __init native_pagetable_setup_start
22807
22808 pud = pud_offset(pgd, va);
22809 pmd = pmd_offset(pud, va);
22810- if (!pmd_present(*pmd))
22811+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
22812 break;
22813
22814 pte = pte_offset_kernel(pmd, va);
22815@@ -541,9 +542,7 @@ void __init early_ioremap_page_table_ran
22816
22817 static void __init pagetable_init(void)
22818 {
22819- pgd_t *pgd_base = swapper_pg_dir;
22820-
22821- permanent_kmaps_init(pgd_base);
22822+ permanent_kmaps_init(swapper_pg_dir);
22823 }
22824
22825 #ifdef CONFIG_ACPI_SLEEP
22826@@ -551,12 +550,12 @@ static void __init pagetable_init(void)
22827 * ACPI suspend needs this for resume, because things like the intel-agp
22828 * driver might have split up a kernel 4MB mapping.
22829 */
22830-char swsusp_pg_dir[PAGE_SIZE]
22831+pgd_t swsusp_pg_dir[PTRS_PER_PGD]
22832 __attribute__ ((aligned(PAGE_SIZE)));
22833
22834 static inline void save_pg_dir(void)
22835 {
22836- memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
22837+ clone_pgd_range(swsusp_pg_dir, swapper_pg_dir, PTRS_PER_PGD);
22838 }
22839 #else /* !CONFIG_ACPI_SLEEP */
22840 static inline void save_pg_dir(void)
22841@@ -588,7 +587,7 @@ void zap_low_mappings(bool early)
22842 flush_tlb_all();
22843 }
22844
22845-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
22846+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
22847 EXPORT_SYMBOL_GPL(__supported_pte_mask);
22848
22849 /* user-defined highmem size */
22850@@ -777,7 +776,7 @@ void __init setup_bootmem_allocator(void
22851 * Initialize the boot-time allocator (with low memory only):
22852 */
22853 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
22854- bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
22855+ bootmap = find_e820_area(0x100000, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
22856 PAGE_SIZE);
22857 if (bootmap == -1L)
22858 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
22859@@ -864,6 +863,12 @@ void __init mem_init(void)
22860
22861 pci_iommu_alloc();
22862
22863+#ifdef CONFIG_PAX_PER_CPU_PGD
22864+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
22865+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
22866+ KERNEL_PGD_PTRS);
22867+#endif
22868+
22869 #ifdef CONFIG_FLATMEM
22870 BUG_ON(!mem_map);
22871 #endif
22872@@ -881,7 +886,7 @@ void __init mem_init(void)
22873 set_highmem_pages_init();
22874
22875 codesize = (unsigned long) &_etext - (unsigned long) &_text;
22876- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
22877+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
22878 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
22879
22880 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
22881@@ -923,10 +928,10 @@ void __init mem_init(void)
22882 ((unsigned long)&__init_end -
22883 (unsigned long)&__init_begin) >> 10,
22884
22885- (unsigned long)&_etext, (unsigned long)&_edata,
22886- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
22887+ (unsigned long)&_sdata, (unsigned long)&_edata,
22888+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
22889
22890- (unsigned long)&_text, (unsigned long)&_etext,
22891+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
22892 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
22893
22894 /*
22895@@ -1007,6 +1012,7 @@ void set_kernel_text_rw(void)
22896 if (!kernel_set_to_readonly)
22897 return;
22898
22899+ start = ktla_ktva(start);
22900 pr_debug("Set kernel text: %lx - %lx for read write\n",
22901 start, start+size);
22902
22903@@ -1021,6 +1027,7 @@ void set_kernel_text_ro(void)
22904 if (!kernel_set_to_readonly)
22905 return;
22906
22907+ start = ktla_ktva(start);
22908 pr_debug("Set kernel text: %lx - %lx for read only\n",
22909 start, start+size);
22910
22911@@ -1032,6 +1039,7 @@ void mark_rodata_ro(void)
22912 unsigned long start = PFN_ALIGN(_text);
22913 unsigned long size = PFN_ALIGN(_etext) - start;
22914
22915+ start = ktla_ktva(start);
22916 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
22917 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
22918 size >> 10);
22919diff -urNp linux-2.6.32.48/arch/x86/mm/init_64.c linux-2.6.32.48/arch/x86/mm/init_64.c
22920--- linux-2.6.32.48/arch/x86/mm/init_64.c 2011-11-08 19:02:43.000000000 -0500
22921+++ linux-2.6.32.48/arch/x86/mm/init_64.c 2011-11-15 19:59:43.000000000 -0500
22922@@ -164,7 +164,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
22923 pmd = fill_pmd(pud, vaddr);
22924 pte = fill_pte(pmd, vaddr);
22925
22926+ pax_open_kernel();
22927 set_pte(pte, new_pte);
22928+ pax_close_kernel();
22929
22930 /*
22931 * It's enough to flush this one mapping.
22932@@ -223,14 +225,12 @@ static void __init __init_extra_mapping(
22933 pgd = pgd_offset_k((unsigned long)__va(phys));
22934 if (pgd_none(*pgd)) {
22935 pud = (pud_t *) spp_getpage();
22936- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
22937- _PAGE_USER));
22938+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
22939 }
22940 pud = pud_offset(pgd, (unsigned long)__va(phys));
22941 if (pud_none(*pud)) {
22942 pmd = (pmd_t *) spp_getpage();
22943- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
22944- _PAGE_USER));
22945+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
22946 }
22947 pmd = pmd_offset(pud, phys);
22948 BUG_ON(!pmd_none(*pmd));
22949@@ -675,6 +675,12 @@ void __init mem_init(void)
22950
22951 pci_iommu_alloc();
22952
22953+#ifdef CONFIG_PAX_PER_CPU_PGD
22954+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
22955+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
22956+ KERNEL_PGD_PTRS);
22957+#endif
22958+
22959 /* clear_bss() already clear the empty_zero_page */
22960
22961 reservedpages = 0;
22962@@ -861,8 +867,8 @@ int kern_addr_valid(unsigned long addr)
22963 static struct vm_area_struct gate_vma = {
22964 .vm_start = VSYSCALL_START,
22965 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
22966- .vm_page_prot = PAGE_READONLY_EXEC,
22967- .vm_flags = VM_READ | VM_EXEC
22968+ .vm_page_prot = PAGE_READONLY,
22969+ .vm_flags = VM_READ
22970 };
22971
22972 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
22973@@ -896,7 +902,7 @@ int in_gate_area_no_task(unsigned long a
22974
22975 const char *arch_vma_name(struct vm_area_struct *vma)
22976 {
22977- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
22978+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
22979 return "[vdso]";
22980 if (vma == &gate_vma)
22981 return "[vsyscall]";
22982diff -urNp linux-2.6.32.48/arch/x86/mm/init.c linux-2.6.32.48/arch/x86/mm/init.c
22983--- linux-2.6.32.48/arch/x86/mm/init.c 2011-11-08 19:02:43.000000000 -0500
22984+++ linux-2.6.32.48/arch/x86/mm/init.c 2011-11-15 19:59:43.000000000 -0500
22985@@ -69,11 +69,7 @@ static void __init find_early_table_spac
22986 * cause a hotspot and fill up ZONE_DMA. The page tables
22987 * need roughly 0.5KB per GB.
22988 */
22989-#ifdef CONFIG_X86_32
22990- start = 0x7000;
22991-#else
22992- start = 0x8000;
22993-#endif
22994+ start = 0x100000;
22995 e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
22996 tables, PAGE_SIZE);
22997 if (e820_table_start == -1UL)
22998@@ -147,7 +143,7 @@ unsigned long __init_refok init_memory_m
22999 #endif
23000
23001 set_nx();
23002- if (nx_enabled)
23003+ if (nx_enabled && cpu_has_nx)
23004 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
23005
23006 /* Enable PSE if available */
23007@@ -329,10 +325,27 @@ unsigned long __init_refok init_memory_m
23008 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
23009 * mmio resources as well as potential bios/acpi data regions.
23010 */
23011+
23012 int devmem_is_allowed(unsigned long pagenr)
23013 {
23014+#ifdef CONFIG_GRKERNSEC_KMEM
23015+ /* allow BDA */
23016+ if (!pagenr)
23017+ return 1;
23018+ /* allow EBDA */
23019+ if ((0x9f000 >> PAGE_SHIFT) == pagenr)
23020+ return 1;
23021+ /* allow ISA/video mem */
23022+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
23023+ return 1;
23024+ /* throw out everything else below 1MB */
23025+ if (pagenr <= 256)
23026+ return 0;
23027+#else
23028 if (pagenr <= 256)
23029 return 1;
23030+#endif
23031+
23032 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
23033 return 0;
23034 if (!page_is_ram(pagenr))
23035@@ -379,6 +392,86 @@ void free_init_pages(char *what, unsigne
23036
23037 void free_initmem(void)
23038 {
23039+
23040+#ifdef CONFIG_PAX_KERNEXEC
23041+#ifdef CONFIG_X86_32
23042+ /* PaX: limit KERNEL_CS to actual size */
23043+ unsigned long addr, limit;
23044+ struct desc_struct d;
23045+ int cpu;
23046+
23047+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
23048+ limit = (limit - 1UL) >> PAGE_SHIFT;
23049+
23050+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
23051+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
23052+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
23053+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
23054+ }
23055+
23056+ /* PaX: make KERNEL_CS read-only */
23057+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
23058+ if (!paravirt_enabled())
23059+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
23060+/*
23061+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
23062+ pgd = pgd_offset_k(addr);
23063+ pud = pud_offset(pgd, addr);
23064+ pmd = pmd_offset(pud, addr);
23065+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
23066+ }
23067+*/
23068+#ifdef CONFIG_X86_PAE
23069+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
23070+/*
23071+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
23072+ pgd = pgd_offset_k(addr);
23073+ pud = pud_offset(pgd, addr);
23074+ pmd = pmd_offset(pud, addr);
23075+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
23076+ }
23077+*/
23078+#endif
23079+
23080+#ifdef CONFIG_MODULES
23081+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
23082+#endif
23083+
23084+#else
23085+ pgd_t *pgd;
23086+ pud_t *pud;
23087+ pmd_t *pmd;
23088+ unsigned long addr, end;
23089+
23090+ /* PaX: make kernel code/rodata read-only, rest non-executable */
23091+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
23092+ pgd = pgd_offset_k(addr);
23093+ pud = pud_offset(pgd, addr);
23094+ pmd = pmd_offset(pud, addr);
23095+ if (!pmd_present(*pmd))
23096+ continue;
23097+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
23098+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
23099+ else
23100+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
23101+ }
23102+
23103+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
23104+ end = addr + KERNEL_IMAGE_SIZE;
23105+ for (; addr < end; addr += PMD_SIZE) {
23106+ pgd = pgd_offset_k(addr);
23107+ pud = pud_offset(pgd, addr);
23108+ pmd = pmd_offset(pud, addr);
23109+ if (!pmd_present(*pmd))
23110+ continue;
23111+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
23112+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
23113+ }
23114+#endif
23115+
23116+ flush_tlb_all();
23117+#endif
23118+
23119 free_init_pages("unused kernel memory",
23120 (unsigned long)(&__init_begin),
23121 (unsigned long)(&__init_end));
23122diff -urNp linux-2.6.32.48/arch/x86/mm/iomap_32.c linux-2.6.32.48/arch/x86/mm/iomap_32.c
23123--- linux-2.6.32.48/arch/x86/mm/iomap_32.c 2011-11-08 19:02:43.000000000 -0500
23124+++ linux-2.6.32.48/arch/x86/mm/iomap_32.c 2011-11-15 19:59:43.000000000 -0500
23125@@ -65,7 +65,11 @@ void *kmap_atomic_prot_pfn(unsigned long
23126 debug_kmap_atomic(type);
23127 idx = type + KM_TYPE_NR * smp_processor_id();
23128 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
23129+
23130+ pax_open_kernel();
23131 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
23132+ pax_close_kernel();
23133+
23134 arch_flush_lazy_mmu_mode();
23135
23136 return (void *)vaddr;
23137diff -urNp linux-2.6.32.48/arch/x86/mm/ioremap.c linux-2.6.32.48/arch/x86/mm/ioremap.c
23138--- linux-2.6.32.48/arch/x86/mm/ioremap.c 2011-11-08 19:02:43.000000000 -0500
23139+++ linux-2.6.32.48/arch/x86/mm/ioremap.c 2011-11-15 19:59:43.000000000 -0500
23140@@ -41,8 +41,8 @@ int page_is_ram(unsigned long pagenr)
23141 * Second special case: Some BIOSen report the PC BIOS
23142 * area (640->1Mb) as ram even though it is not.
23143 */
23144- if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
23145- pagenr < (BIOS_END >> PAGE_SHIFT))
23146+ if (pagenr >= (ISA_START_ADDRESS >> PAGE_SHIFT) &&
23147+ pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
23148 return 0;
23149
23150 for (i = 0; i < e820.nr_map; i++) {
23151@@ -137,13 +137,10 @@ static void __iomem *__ioremap_caller(re
23152 /*
23153 * Don't allow anybody to remap normal RAM that we're using..
23154 */
23155- for (pfn = phys_addr >> PAGE_SHIFT;
23156- (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
23157- pfn++) {
23158-
23159+ for (pfn = phys_addr >> PAGE_SHIFT; ((resource_size_t)pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); pfn++) {
23160 int is_ram = page_is_ram(pfn);
23161
23162- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
23163+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
23164 return NULL;
23165 WARN_ON_ONCE(is_ram);
23166 }
23167@@ -407,7 +404,7 @@ static int __init early_ioremap_debug_se
23168 early_param("early_ioremap_debug", early_ioremap_debug_setup);
23169
23170 static __initdata int after_paging_init;
23171-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
23172+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
23173
23174 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
23175 {
23176@@ -439,8 +436,7 @@ void __init early_ioremap_init(void)
23177 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
23178
23179 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
23180- memset(bm_pte, 0, sizeof(bm_pte));
23181- pmd_populate_kernel(&init_mm, pmd, bm_pte);
23182+ pmd_populate_user(&init_mm, pmd, bm_pte);
23183
23184 /*
23185 * The boot-ioremap range spans multiple pmds, for which
23186diff -urNp linux-2.6.32.48/arch/x86/mm/kmemcheck/kmemcheck.c linux-2.6.32.48/arch/x86/mm/kmemcheck/kmemcheck.c
23187--- linux-2.6.32.48/arch/x86/mm/kmemcheck/kmemcheck.c 2011-11-08 19:02:43.000000000 -0500
23188+++ linux-2.6.32.48/arch/x86/mm/kmemcheck/kmemcheck.c 2011-11-15 19:59:43.000000000 -0500
23189@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
23190 * memory (e.g. tracked pages)? For now, we need this to avoid
23191 * invoking kmemcheck for PnP BIOS calls.
23192 */
23193- if (regs->flags & X86_VM_MASK)
23194+ if (v8086_mode(regs))
23195 return false;
23196- if (regs->cs != __KERNEL_CS)
23197+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
23198 return false;
23199
23200 pte = kmemcheck_pte_lookup(address);
23201diff -urNp linux-2.6.32.48/arch/x86/mm/mmap.c linux-2.6.32.48/arch/x86/mm/mmap.c
23202--- linux-2.6.32.48/arch/x86/mm/mmap.c 2011-11-08 19:02:43.000000000 -0500
23203+++ linux-2.6.32.48/arch/x86/mm/mmap.c 2011-11-15 19:59:43.000000000 -0500
23204@@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
23205 * Leave an at least ~128 MB hole with possible stack randomization.
23206 */
23207 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
23208-#define MAX_GAP (TASK_SIZE/6*5)
23209+#define MAX_GAP (pax_task_size/6*5)
23210
23211 /*
23212 * True on X86_32 or when emulating IA32 on X86_64
23213@@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
23214 return rnd << PAGE_SHIFT;
23215 }
23216
23217-static unsigned long mmap_base(void)
23218+static unsigned long mmap_base(struct mm_struct *mm)
23219 {
23220 unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
23221+ unsigned long pax_task_size = TASK_SIZE;
23222+
23223+#ifdef CONFIG_PAX_SEGMEXEC
23224+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
23225+ pax_task_size = SEGMEXEC_TASK_SIZE;
23226+#endif
23227
23228 if (gap < MIN_GAP)
23229 gap = MIN_GAP;
23230 else if (gap > MAX_GAP)
23231 gap = MAX_GAP;
23232
23233- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
23234+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
23235 }
23236
23237 /*
23238 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
23239 * does, but not when emulating X86_32
23240 */
23241-static unsigned long mmap_legacy_base(void)
23242+static unsigned long mmap_legacy_base(struct mm_struct *mm)
23243 {
23244- if (mmap_is_ia32())
23245+ if (mmap_is_ia32()) {
23246+
23247+#ifdef CONFIG_PAX_SEGMEXEC
23248+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
23249+ return SEGMEXEC_TASK_UNMAPPED_BASE;
23250+ else
23251+#endif
23252+
23253 return TASK_UNMAPPED_BASE;
23254- else
23255+ } else
23256 return TASK_UNMAPPED_BASE + mmap_rnd();
23257 }
23258
23259@@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
23260 void arch_pick_mmap_layout(struct mm_struct *mm)
23261 {
23262 if (mmap_is_legacy()) {
23263- mm->mmap_base = mmap_legacy_base();
23264+ mm->mmap_base = mmap_legacy_base(mm);
23265+
23266+#ifdef CONFIG_PAX_RANDMMAP
23267+ if (mm->pax_flags & MF_PAX_RANDMMAP)
23268+ mm->mmap_base += mm->delta_mmap;
23269+#endif
23270+
23271 mm->get_unmapped_area = arch_get_unmapped_area;
23272 mm->unmap_area = arch_unmap_area;
23273 } else {
23274- mm->mmap_base = mmap_base();
23275+ mm->mmap_base = mmap_base(mm);
23276+
23277+#ifdef CONFIG_PAX_RANDMMAP
23278+ if (mm->pax_flags & MF_PAX_RANDMMAP)
23279+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
23280+#endif
23281+
23282 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
23283 mm->unmap_area = arch_unmap_area_topdown;
23284 }
23285diff -urNp linux-2.6.32.48/arch/x86/mm/mmio-mod.c linux-2.6.32.48/arch/x86/mm/mmio-mod.c
23286--- linux-2.6.32.48/arch/x86/mm/mmio-mod.c 2011-11-08 19:02:43.000000000 -0500
23287+++ linux-2.6.32.48/arch/x86/mm/mmio-mod.c 2011-11-15 19:59:43.000000000 -0500
23288@@ -193,7 +193,7 @@ static void pre(struct kmmio_probe *p, s
23289 break;
23290 default:
23291 {
23292- unsigned char *ip = (unsigned char *)instptr;
23293+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
23294 my_trace->opcode = MMIO_UNKNOWN_OP;
23295 my_trace->width = 0;
23296 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
23297@@ -233,7 +233,7 @@ static void post(struct kmmio_probe *p,
23298 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
23299 void __iomem *addr)
23300 {
23301- static atomic_t next_id;
23302+ static atomic_unchecked_t next_id;
23303 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
23304 /* These are page-unaligned. */
23305 struct mmiotrace_map map = {
23306@@ -257,7 +257,7 @@ static void ioremap_trace_core(resource_
23307 .private = trace
23308 },
23309 .phys = offset,
23310- .id = atomic_inc_return(&next_id)
23311+ .id = atomic_inc_return_unchecked(&next_id)
23312 };
23313 map.map_id = trace->id;
23314
23315diff -urNp linux-2.6.32.48/arch/x86/mm/numa_32.c linux-2.6.32.48/arch/x86/mm/numa_32.c
23316--- linux-2.6.32.48/arch/x86/mm/numa_32.c 2011-11-08 19:02:43.000000000 -0500
23317+++ linux-2.6.32.48/arch/x86/mm/numa_32.c 2011-11-15 19:59:43.000000000 -0500
23318@@ -98,7 +98,6 @@ unsigned long node_memmap_size_bytes(int
23319 }
23320 #endif
23321
23322-extern unsigned long find_max_low_pfn(void);
23323 extern unsigned long highend_pfn, highstart_pfn;
23324
23325 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
23326diff -urNp linux-2.6.32.48/arch/x86/mm/pageattr.c linux-2.6.32.48/arch/x86/mm/pageattr.c
23327--- linux-2.6.32.48/arch/x86/mm/pageattr.c 2011-11-08 19:02:43.000000000 -0500
23328+++ linux-2.6.32.48/arch/x86/mm/pageattr.c 2011-11-15 19:59:43.000000000 -0500
23329@@ -261,16 +261,17 @@ static inline pgprot_t static_protection
23330 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
23331 */
23332 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
23333- pgprot_val(forbidden) |= _PAGE_NX;
23334+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
23335
23336 /*
23337 * The kernel text needs to be executable for obvious reasons
23338 * Does not cover __inittext since that is gone later on. On
23339 * 64bit we do not enforce !NX on the low mapping
23340 */
23341- if (within(address, (unsigned long)_text, (unsigned long)_etext))
23342- pgprot_val(forbidden) |= _PAGE_NX;
23343+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
23344+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
23345
23346+#ifdef CONFIG_DEBUG_RODATA
23347 /*
23348 * The .rodata section needs to be read-only. Using the pfn
23349 * catches all aliases.
23350@@ -278,6 +279,14 @@ static inline pgprot_t static_protection
23351 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
23352 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
23353 pgprot_val(forbidden) |= _PAGE_RW;
23354+#endif
23355+
23356+#ifdef CONFIG_PAX_KERNEXEC
23357+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
23358+ pgprot_val(forbidden) |= _PAGE_RW;
23359+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
23360+ }
23361+#endif
23362
23363 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
23364
23365@@ -331,23 +340,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
23366 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
23367 {
23368 /* change init_mm */
23369+ pax_open_kernel();
23370 set_pte_atomic(kpte, pte);
23371+
23372 #ifdef CONFIG_X86_32
23373 if (!SHARED_KERNEL_PMD) {
23374+
23375+#ifdef CONFIG_PAX_PER_CPU_PGD
23376+ unsigned long cpu;
23377+#else
23378 struct page *page;
23379+#endif
23380
23381+#ifdef CONFIG_PAX_PER_CPU_PGD
23382+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
23383+ pgd_t *pgd = get_cpu_pgd(cpu);
23384+#else
23385 list_for_each_entry(page, &pgd_list, lru) {
23386- pgd_t *pgd;
23387+ pgd_t *pgd = (pgd_t *)page_address(page);
23388+#endif
23389+
23390 pud_t *pud;
23391 pmd_t *pmd;
23392
23393- pgd = (pgd_t *)page_address(page) + pgd_index(address);
23394+ pgd += pgd_index(address);
23395 pud = pud_offset(pgd, address);
23396 pmd = pmd_offset(pud, address);
23397 set_pte_atomic((pte_t *)pmd, pte);
23398 }
23399 }
23400 #endif
23401+ pax_close_kernel();
23402 }
23403
23404 static int
23405diff -urNp linux-2.6.32.48/arch/x86/mm/pageattr-test.c linux-2.6.32.48/arch/x86/mm/pageattr-test.c
23406--- linux-2.6.32.48/arch/x86/mm/pageattr-test.c 2011-11-08 19:02:43.000000000 -0500
23407+++ linux-2.6.32.48/arch/x86/mm/pageattr-test.c 2011-11-15 19:59:43.000000000 -0500
23408@@ -36,7 +36,7 @@ enum {
23409
23410 static int pte_testbit(pte_t pte)
23411 {
23412- return pte_flags(pte) & _PAGE_UNUSED1;
23413+ return pte_flags(pte) & _PAGE_CPA_TEST;
23414 }
23415
23416 struct split_state {
23417diff -urNp linux-2.6.32.48/arch/x86/mm/pat.c linux-2.6.32.48/arch/x86/mm/pat.c
23418--- linux-2.6.32.48/arch/x86/mm/pat.c 2011-11-08 19:02:43.000000000 -0500
23419+++ linux-2.6.32.48/arch/x86/mm/pat.c 2011-11-15 19:59:43.000000000 -0500
23420@@ -258,7 +258,7 @@ chk_conflict(struct memtype *new, struct
23421
23422 conflict:
23423 printk(KERN_INFO "%s:%d conflicting memory types "
23424- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
23425+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), new->start,
23426 new->end, cattr_name(new->type), cattr_name(entry->type));
23427 return -EBUSY;
23428 }
23429@@ -559,7 +559,7 @@ unlock_ret:
23430
23431 if (err) {
23432 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
23433- current->comm, current->pid, start, end);
23434+ current->comm, task_pid_nr(current), start, end);
23435 }
23436
23437 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
23438@@ -689,8 +689,8 @@ static inline int range_is_allowed(unsig
23439 while (cursor < to) {
23440 if (!devmem_is_allowed(pfn)) {
23441 printk(KERN_INFO
23442- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
23443- current->comm, from, to);
23444+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
23445+ current->comm, from, to, cursor);
23446 return 0;
23447 }
23448 cursor += PAGE_SIZE;
23449@@ -755,7 +755,7 @@ int kernel_map_sync_memtype(u64 base, un
23450 printk(KERN_INFO
23451 "%s:%d ioremap_change_attr failed %s "
23452 "for %Lx-%Lx\n",
23453- current->comm, current->pid,
23454+ current->comm, task_pid_nr(current),
23455 cattr_name(flags),
23456 base, (unsigned long long)(base + size));
23457 return -EINVAL;
23458@@ -813,7 +813,7 @@ static int reserve_pfn_range(u64 paddr,
23459 free_memtype(paddr, paddr + size);
23460 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
23461 " for %Lx-%Lx, got %s\n",
23462- current->comm, current->pid,
23463+ current->comm, task_pid_nr(current),
23464 cattr_name(want_flags),
23465 (unsigned long long)paddr,
23466 (unsigned long long)(paddr + size),
23467diff -urNp linux-2.6.32.48/arch/x86/mm/pf_in.c linux-2.6.32.48/arch/x86/mm/pf_in.c
23468--- linux-2.6.32.48/arch/x86/mm/pf_in.c 2011-11-08 19:02:43.000000000 -0500
23469+++ linux-2.6.32.48/arch/x86/mm/pf_in.c 2011-11-15 19:59:43.000000000 -0500
23470@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned l
23471 int i;
23472 enum reason_type rv = OTHERS;
23473
23474- p = (unsigned char *)ins_addr;
23475+ p = (unsigned char *)ktla_ktva(ins_addr);
23476 p += skip_prefix(p, &prf);
23477 p += get_opcode(p, &opcode);
23478
23479@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(un
23480 struct prefix_bits prf;
23481 int i;
23482
23483- p = (unsigned char *)ins_addr;
23484+ p = (unsigned char *)ktla_ktva(ins_addr);
23485 p += skip_prefix(p, &prf);
23486 p += get_opcode(p, &opcode);
23487
23488@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned
23489 struct prefix_bits prf;
23490 int i;
23491
23492- p = (unsigned char *)ins_addr;
23493+ p = (unsigned char *)ktla_ktva(ins_addr);
23494 p += skip_prefix(p, &prf);
23495 p += get_opcode(p, &opcode);
23496
23497@@ -417,7 +417,7 @@ unsigned long get_ins_reg_val(unsigned l
23498 int i;
23499 unsigned long rv;
23500
23501- p = (unsigned char *)ins_addr;
23502+ p = (unsigned char *)ktla_ktva(ins_addr);
23503 p += skip_prefix(p, &prf);
23504 p += get_opcode(p, &opcode);
23505 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
23506@@ -472,7 +472,7 @@ unsigned long get_ins_imm_val(unsigned l
23507 int i;
23508 unsigned long rv;
23509
23510- p = (unsigned char *)ins_addr;
23511+ p = (unsigned char *)ktla_ktva(ins_addr);
23512 p += skip_prefix(p, &prf);
23513 p += get_opcode(p, &opcode);
23514 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
23515diff -urNp linux-2.6.32.48/arch/x86/mm/pgtable_32.c linux-2.6.32.48/arch/x86/mm/pgtable_32.c
23516--- linux-2.6.32.48/arch/x86/mm/pgtable_32.c 2011-11-08 19:02:43.000000000 -0500
23517+++ linux-2.6.32.48/arch/x86/mm/pgtable_32.c 2011-11-15 19:59:43.000000000 -0500
23518@@ -49,10 +49,13 @@ void set_pte_vaddr(unsigned long vaddr,
23519 return;
23520 }
23521 pte = pte_offset_kernel(pmd, vaddr);
23522+
23523+ pax_open_kernel();
23524 if (pte_val(pteval))
23525 set_pte_at(&init_mm, vaddr, pte, pteval);
23526 else
23527 pte_clear(&init_mm, vaddr, pte);
23528+ pax_close_kernel();
23529
23530 /*
23531 * It's enough to flush this one mapping.
23532diff -urNp linux-2.6.32.48/arch/x86/mm/pgtable.c linux-2.6.32.48/arch/x86/mm/pgtable.c
23533--- linux-2.6.32.48/arch/x86/mm/pgtable.c 2011-11-08 19:02:43.000000000 -0500
23534+++ linux-2.6.32.48/arch/x86/mm/pgtable.c 2011-11-15 19:59:43.000000000 -0500
23535@@ -83,9 +83,52 @@ static inline void pgd_list_del(pgd_t *p
23536 list_del(&page->lru);
23537 }
23538
23539-#define UNSHARED_PTRS_PER_PGD \
23540- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
23541+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23542+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
23543
23544+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
23545+{
23546+ while (count--)
23547+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
23548+}
23549+#endif
23550+
23551+#ifdef CONFIG_PAX_PER_CPU_PGD
23552+void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
23553+{
23554+ while (count--)
23555+
23556+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23557+ *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
23558+#else
23559+ *dst++ = *src++;
23560+#endif
23561+
23562+}
23563+#endif
23564+
23565+#ifdef CONFIG_X86_64
23566+#define pxd_t pud_t
23567+#define pyd_t pgd_t
23568+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
23569+#define pxd_free(mm, pud) pud_free((mm), (pud))
23570+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
23571+#define pyd_offset(mm ,address) pgd_offset((mm), (address))
23572+#define PYD_SIZE PGDIR_SIZE
23573+#else
23574+#define pxd_t pmd_t
23575+#define pyd_t pud_t
23576+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
23577+#define pxd_free(mm, pud) pmd_free((mm), (pud))
23578+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
23579+#define pyd_offset(mm ,address) pud_offset((mm), (address))
23580+#define PYD_SIZE PUD_SIZE
23581+#endif
23582+
23583+#ifdef CONFIG_PAX_PER_CPU_PGD
23584+static inline void pgd_ctor(pgd_t *pgd) {}
23585+static inline void pgd_dtor(pgd_t *pgd) {}
23586+#else
23587 static void pgd_ctor(pgd_t *pgd)
23588 {
23589 /* If the pgd points to a shared pagetable level (either the
23590@@ -119,6 +162,7 @@ static void pgd_dtor(pgd_t *pgd)
23591 pgd_list_del(pgd);
23592 spin_unlock_irqrestore(&pgd_lock, flags);
23593 }
23594+#endif
23595
23596 /*
23597 * List of all pgd's needed for non-PAE so it can invalidate entries
23598@@ -131,7 +175,7 @@ static void pgd_dtor(pgd_t *pgd)
23599 * -- wli
23600 */
23601
23602-#ifdef CONFIG_X86_PAE
23603+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
23604 /*
23605 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
23606 * updating the top-level pagetable entries to guarantee the
23607@@ -143,7 +187,7 @@ static void pgd_dtor(pgd_t *pgd)
23608 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
23609 * and initialize the kernel pmds here.
23610 */
23611-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
23612+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
23613
23614 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
23615 {
23616@@ -161,36 +205,38 @@ void pud_populate(struct mm_struct *mm,
23617 */
23618 flush_tlb_mm(mm);
23619 }
23620+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
23621+#define PREALLOCATED_PXDS USER_PGD_PTRS
23622 #else /* !CONFIG_X86_PAE */
23623
23624 /* No need to prepopulate any pagetable entries in non-PAE modes. */
23625-#define PREALLOCATED_PMDS 0
23626+#define PREALLOCATED_PXDS 0
23627
23628 #endif /* CONFIG_X86_PAE */
23629
23630-static void free_pmds(pmd_t *pmds[])
23631+static void free_pxds(pxd_t *pxds[])
23632 {
23633 int i;
23634
23635- for(i = 0; i < PREALLOCATED_PMDS; i++)
23636- if (pmds[i])
23637- free_page((unsigned long)pmds[i]);
23638+ for(i = 0; i < PREALLOCATED_PXDS; i++)
23639+ if (pxds[i])
23640+ free_page((unsigned long)pxds[i]);
23641 }
23642
23643-static int preallocate_pmds(pmd_t *pmds[])
23644+static int preallocate_pxds(pxd_t *pxds[])
23645 {
23646 int i;
23647 bool failed = false;
23648
23649- for(i = 0; i < PREALLOCATED_PMDS; i++) {
23650- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
23651- if (pmd == NULL)
23652+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
23653+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
23654+ if (pxd == NULL)
23655 failed = true;
23656- pmds[i] = pmd;
23657+ pxds[i] = pxd;
23658 }
23659
23660 if (failed) {
23661- free_pmds(pmds);
23662+ free_pxds(pxds);
23663 return -ENOMEM;
23664 }
23665
23666@@ -203,51 +249,56 @@ static int preallocate_pmds(pmd_t *pmds[
23667 * preallocate which never got a corresponding vma will need to be
23668 * freed manually.
23669 */
23670-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
23671+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
23672 {
23673 int i;
23674
23675- for(i = 0; i < PREALLOCATED_PMDS; i++) {
23676+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
23677 pgd_t pgd = pgdp[i];
23678
23679 if (pgd_val(pgd) != 0) {
23680- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
23681+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
23682
23683- pgdp[i] = native_make_pgd(0);
23684+ set_pgd(pgdp + i, native_make_pgd(0));
23685
23686- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
23687- pmd_free(mm, pmd);
23688+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
23689+ pxd_free(mm, pxd);
23690 }
23691 }
23692 }
23693
23694-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
23695+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
23696 {
23697- pud_t *pud;
23698+ pyd_t *pyd;
23699 unsigned long addr;
23700 int i;
23701
23702- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
23703+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
23704 return;
23705
23706- pud = pud_offset(pgd, 0);
23707+#ifdef CONFIG_X86_64
23708+ pyd = pyd_offset(mm, 0L);
23709+#else
23710+ pyd = pyd_offset(pgd, 0L);
23711+#endif
23712
23713- for (addr = i = 0; i < PREALLOCATED_PMDS;
23714- i++, pud++, addr += PUD_SIZE) {
23715- pmd_t *pmd = pmds[i];
23716+ for (addr = i = 0; i < PREALLOCATED_PXDS;
23717+ i++, pyd++, addr += PYD_SIZE) {
23718+ pxd_t *pxd = pxds[i];
23719
23720 if (i >= KERNEL_PGD_BOUNDARY)
23721- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
23722- sizeof(pmd_t) * PTRS_PER_PMD);
23723+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
23724+ sizeof(pxd_t) * PTRS_PER_PMD);
23725
23726- pud_populate(mm, pud, pmd);
23727+ pyd_populate(mm, pyd, pxd);
23728 }
23729 }
23730
23731 pgd_t *pgd_alloc(struct mm_struct *mm)
23732 {
23733 pgd_t *pgd;
23734- pmd_t *pmds[PREALLOCATED_PMDS];
23735+ pxd_t *pxds[PREALLOCATED_PXDS];
23736+
23737 unsigned long flags;
23738
23739 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
23740@@ -257,11 +308,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
23741
23742 mm->pgd = pgd;
23743
23744- if (preallocate_pmds(pmds) != 0)
23745+ if (preallocate_pxds(pxds) != 0)
23746 goto out_free_pgd;
23747
23748 if (paravirt_pgd_alloc(mm) != 0)
23749- goto out_free_pmds;
23750+ goto out_free_pxds;
23751
23752 /*
23753 * Make sure that pre-populating the pmds is atomic with
23754@@ -271,14 +322,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
23755 spin_lock_irqsave(&pgd_lock, flags);
23756
23757 pgd_ctor(pgd);
23758- pgd_prepopulate_pmd(mm, pgd, pmds);
23759+ pgd_prepopulate_pxd(mm, pgd, pxds);
23760
23761 spin_unlock_irqrestore(&pgd_lock, flags);
23762
23763 return pgd;
23764
23765-out_free_pmds:
23766- free_pmds(pmds);
23767+out_free_pxds:
23768+ free_pxds(pxds);
23769 out_free_pgd:
23770 free_page((unsigned long)pgd);
23771 out:
23772@@ -287,7 +338,7 @@ out:
23773
23774 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
23775 {
23776- pgd_mop_up_pmds(mm, pgd);
23777+ pgd_mop_up_pxds(mm, pgd);
23778 pgd_dtor(pgd);
23779 paravirt_pgd_free(mm, pgd);
23780 free_page((unsigned long)pgd);
23781diff -urNp linux-2.6.32.48/arch/x86/mm/setup_nx.c linux-2.6.32.48/arch/x86/mm/setup_nx.c
23782--- linux-2.6.32.48/arch/x86/mm/setup_nx.c 2011-11-08 19:02:43.000000000 -0500
23783+++ linux-2.6.32.48/arch/x86/mm/setup_nx.c 2011-11-15 19:59:43.000000000 -0500
23784@@ -4,11 +4,10 @@
23785
23786 #include <asm/pgtable.h>
23787
23788+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
23789 int nx_enabled;
23790
23791-#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23792-static int disable_nx __cpuinitdata;
23793-
23794+#ifndef CONFIG_PAX_PAGEEXEC
23795 /*
23796 * noexec = on|off
23797 *
23798@@ -22,32 +21,26 @@ static int __init noexec_setup(char *str
23799 if (!str)
23800 return -EINVAL;
23801 if (!strncmp(str, "on", 2)) {
23802- __supported_pte_mask |= _PAGE_NX;
23803- disable_nx = 0;
23804+ nx_enabled = 1;
23805 } else if (!strncmp(str, "off", 3)) {
23806- disable_nx = 1;
23807- __supported_pte_mask &= ~_PAGE_NX;
23808+ nx_enabled = 0;
23809 }
23810 return 0;
23811 }
23812 early_param("noexec", noexec_setup);
23813 #endif
23814+#endif
23815
23816 #ifdef CONFIG_X86_PAE
23817 void __init set_nx(void)
23818 {
23819- unsigned int v[4], l, h;
23820+ if (!nx_enabled && cpu_has_nx) {
23821+ unsigned l, h;
23822
23823- if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
23824- cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
23825-
23826- if ((v[3] & (1 << 20)) && !disable_nx) {
23827- rdmsr(MSR_EFER, l, h);
23828- l |= EFER_NX;
23829- wrmsr(MSR_EFER, l, h);
23830- nx_enabled = 1;
23831- __supported_pte_mask |= _PAGE_NX;
23832- }
23833+ __supported_pte_mask &= ~_PAGE_NX;
23834+ rdmsr(MSR_EFER, l, h);
23835+ l &= ~EFER_NX;
23836+ wrmsr(MSR_EFER, l, h);
23837 }
23838 }
23839 #else
23840@@ -62,7 +55,7 @@ void __cpuinit check_efer(void)
23841 unsigned long efer;
23842
23843 rdmsrl(MSR_EFER, efer);
23844- if (!(efer & EFER_NX) || disable_nx)
23845+ if (!(efer & EFER_NX) || !nx_enabled)
23846 __supported_pte_mask &= ~_PAGE_NX;
23847 }
23848 #endif
23849diff -urNp linux-2.6.32.48/arch/x86/mm/tlb.c linux-2.6.32.48/arch/x86/mm/tlb.c
23850--- linux-2.6.32.48/arch/x86/mm/tlb.c 2011-11-08 19:02:43.000000000 -0500
23851+++ linux-2.6.32.48/arch/x86/mm/tlb.c 2011-11-15 19:59:43.000000000 -0500
23852@@ -61,7 +61,11 @@ void leave_mm(int cpu)
23853 BUG();
23854 cpumask_clear_cpu(cpu,
23855 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
23856+
23857+#ifndef CONFIG_PAX_PER_CPU_PGD
23858 load_cr3(swapper_pg_dir);
23859+#endif
23860+
23861 }
23862 EXPORT_SYMBOL_GPL(leave_mm);
23863
23864diff -urNp linux-2.6.32.48/arch/x86/oprofile/backtrace.c linux-2.6.32.48/arch/x86/oprofile/backtrace.c
23865--- linux-2.6.32.48/arch/x86/oprofile/backtrace.c 2011-11-08 19:02:43.000000000 -0500
23866+++ linux-2.6.32.48/arch/x86/oprofile/backtrace.c 2011-11-15 19:59:43.000000000 -0500
23867@@ -57,7 +57,7 @@ static struct frame_head *dump_user_back
23868 struct frame_head bufhead[2];
23869
23870 /* Also check accessibility of one struct frame_head beyond */
23871- if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
23872+ if (!__access_ok(VERIFY_READ, head, sizeof(bufhead)))
23873 return NULL;
23874 if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
23875 return NULL;
23876@@ -77,7 +77,7 @@ x86_backtrace(struct pt_regs * const reg
23877 {
23878 struct frame_head *head = (struct frame_head *)frame_pointer(regs);
23879
23880- if (!user_mode_vm(regs)) {
23881+ if (!user_mode(regs)) {
23882 unsigned long stack = kernel_stack_pointer(regs);
23883 if (depth)
23884 dump_trace(NULL, regs, (unsigned long *)stack, 0,
23885diff -urNp linux-2.6.32.48/arch/x86/oprofile/op_model_p4.c linux-2.6.32.48/arch/x86/oprofile/op_model_p4.c
23886--- linux-2.6.32.48/arch/x86/oprofile/op_model_p4.c 2011-11-08 19:02:43.000000000 -0500
23887+++ linux-2.6.32.48/arch/x86/oprofile/op_model_p4.c 2011-11-15 19:59:43.000000000 -0500
23888@@ -50,7 +50,7 @@ static inline void setup_num_counters(vo
23889 #endif
23890 }
23891
23892-static int inline addr_increment(void)
23893+static inline int addr_increment(void)
23894 {
23895 #ifdef CONFIG_SMP
23896 return smp_num_siblings == 2 ? 2 : 1;
23897diff -urNp linux-2.6.32.48/arch/x86/pci/common.c linux-2.6.32.48/arch/x86/pci/common.c
23898--- linux-2.6.32.48/arch/x86/pci/common.c 2011-11-08 19:02:43.000000000 -0500
23899+++ linux-2.6.32.48/arch/x86/pci/common.c 2011-11-15 19:59:43.000000000 -0500
23900@@ -31,8 +31,8 @@ int noioapicreroute = 1;
23901 int pcibios_last_bus = -1;
23902 unsigned long pirq_table_addr;
23903 struct pci_bus *pci_root_bus;
23904-struct pci_raw_ops *raw_pci_ops;
23905-struct pci_raw_ops *raw_pci_ext_ops;
23906+const struct pci_raw_ops *raw_pci_ops;
23907+const struct pci_raw_ops *raw_pci_ext_ops;
23908
23909 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
23910 int reg, int len, u32 *val)
23911diff -urNp linux-2.6.32.48/arch/x86/pci/direct.c linux-2.6.32.48/arch/x86/pci/direct.c
23912--- linux-2.6.32.48/arch/x86/pci/direct.c 2011-11-08 19:02:43.000000000 -0500
23913+++ linux-2.6.32.48/arch/x86/pci/direct.c 2011-11-15 19:59:43.000000000 -0500
23914@@ -79,7 +79,7 @@ static int pci_conf1_write(unsigned int
23915
23916 #undef PCI_CONF1_ADDRESS
23917
23918-struct pci_raw_ops pci_direct_conf1 = {
23919+const struct pci_raw_ops pci_direct_conf1 = {
23920 .read = pci_conf1_read,
23921 .write = pci_conf1_write,
23922 };
23923@@ -173,7 +173,7 @@ static int pci_conf2_write(unsigned int
23924
23925 #undef PCI_CONF2_ADDRESS
23926
23927-struct pci_raw_ops pci_direct_conf2 = {
23928+const struct pci_raw_ops pci_direct_conf2 = {
23929 .read = pci_conf2_read,
23930 .write = pci_conf2_write,
23931 };
23932@@ -189,7 +189,7 @@ struct pci_raw_ops pci_direct_conf2 = {
23933 * This should be close to trivial, but it isn't, because there are buggy
23934 * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID.
23935 */
23936-static int __init pci_sanity_check(struct pci_raw_ops *o)
23937+static int __init pci_sanity_check(const struct pci_raw_ops *o)
23938 {
23939 u32 x = 0;
23940 int year, devfn;
23941diff -urNp linux-2.6.32.48/arch/x86/pci/mmconfig_32.c linux-2.6.32.48/arch/x86/pci/mmconfig_32.c
23942--- linux-2.6.32.48/arch/x86/pci/mmconfig_32.c 2011-11-08 19:02:43.000000000 -0500
23943+++ linux-2.6.32.48/arch/x86/pci/mmconfig_32.c 2011-11-15 19:59:43.000000000 -0500
23944@@ -125,7 +125,7 @@ static int pci_mmcfg_write(unsigned int
23945 return 0;
23946 }
23947
23948-static struct pci_raw_ops pci_mmcfg = {
23949+static const struct pci_raw_ops pci_mmcfg = {
23950 .read = pci_mmcfg_read,
23951 .write = pci_mmcfg_write,
23952 };
23953diff -urNp linux-2.6.32.48/arch/x86/pci/mmconfig_64.c linux-2.6.32.48/arch/x86/pci/mmconfig_64.c
23954--- linux-2.6.32.48/arch/x86/pci/mmconfig_64.c 2011-11-08 19:02:43.000000000 -0500
23955+++ linux-2.6.32.48/arch/x86/pci/mmconfig_64.c 2011-11-15 19:59:43.000000000 -0500
23956@@ -104,7 +104,7 @@ static int pci_mmcfg_write(unsigned int
23957 return 0;
23958 }
23959
23960-static struct pci_raw_ops pci_mmcfg = {
23961+static const struct pci_raw_ops pci_mmcfg = {
23962 .read = pci_mmcfg_read,
23963 .write = pci_mmcfg_write,
23964 };
23965diff -urNp linux-2.6.32.48/arch/x86/pci/numaq_32.c linux-2.6.32.48/arch/x86/pci/numaq_32.c
23966--- linux-2.6.32.48/arch/x86/pci/numaq_32.c 2011-11-08 19:02:43.000000000 -0500
23967+++ linux-2.6.32.48/arch/x86/pci/numaq_32.c 2011-11-15 19:59:43.000000000 -0500
23968@@ -112,7 +112,7 @@ static int pci_conf1_mq_write(unsigned i
23969
23970 #undef PCI_CONF1_MQ_ADDRESS
23971
23972-static struct pci_raw_ops pci_direct_conf1_mq = {
23973+static const struct pci_raw_ops pci_direct_conf1_mq = {
23974 .read = pci_conf1_mq_read,
23975 .write = pci_conf1_mq_write
23976 };
23977diff -urNp linux-2.6.32.48/arch/x86/pci/olpc.c linux-2.6.32.48/arch/x86/pci/olpc.c
23978--- linux-2.6.32.48/arch/x86/pci/olpc.c 2011-11-08 19:02:43.000000000 -0500
23979+++ linux-2.6.32.48/arch/x86/pci/olpc.c 2011-11-15 19:59:43.000000000 -0500
23980@@ -297,7 +297,7 @@ static int pci_olpc_write(unsigned int s
23981 return 0;
23982 }
23983
23984-static struct pci_raw_ops pci_olpc_conf = {
23985+static const struct pci_raw_ops pci_olpc_conf = {
23986 .read = pci_olpc_read,
23987 .write = pci_olpc_write,
23988 };
23989diff -urNp linux-2.6.32.48/arch/x86/pci/pcbios.c linux-2.6.32.48/arch/x86/pci/pcbios.c
23990--- linux-2.6.32.48/arch/x86/pci/pcbios.c 2011-11-08 19:02:43.000000000 -0500
23991+++ linux-2.6.32.48/arch/x86/pci/pcbios.c 2011-11-15 19:59:43.000000000 -0500
23992@@ -56,50 +56,93 @@ union bios32 {
23993 static struct {
23994 unsigned long address;
23995 unsigned short segment;
23996-} bios32_indirect = { 0, __KERNEL_CS };
23997+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
23998
23999 /*
24000 * Returns the entry point for the given service, NULL on error
24001 */
24002
24003-static unsigned long bios32_service(unsigned long service)
24004+static unsigned long __devinit bios32_service(unsigned long service)
24005 {
24006 unsigned char return_code; /* %al */
24007 unsigned long address; /* %ebx */
24008 unsigned long length; /* %ecx */
24009 unsigned long entry; /* %edx */
24010 unsigned long flags;
24011+ struct desc_struct d, *gdt;
24012
24013 local_irq_save(flags);
24014- __asm__("lcall *(%%edi); cld"
24015+
24016+ gdt = get_cpu_gdt_table(smp_processor_id());
24017+
24018+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
24019+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
24020+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
24021+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
24022+
24023+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
24024 : "=a" (return_code),
24025 "=b" (address),
24026 "=c" (length),
24027 "=d" (entry)
24028 : "0" (service),
24029 "1" (0),
24030- "D" (&bios32_indirect));
24031+ "D" (&bios32_indirect),
24032+ "r"(__PCIBIOS_DS)
24033+ : "memory");
24034+
24035+ pax_open_kernel();
24036+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
24037+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
24038+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
24039+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
24040+ pax_close_kernel();
24041+
24042 local_irq_restore(flags);
24043
24044 switch (return_code) {
24045- case 0:
24046- return address + entry;
24047- case 0x80: /* Not present */
24048- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
24049- return 0;
24050- default: /* Shouldn't happen */
24051- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
24052- service, return_code);
24053+ case 0: {
24054+ int cpu;
24055+ unsigned char flags;
24056+
24057+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
24058+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
24059+ printk(KERN_WARNING "bios32_service: not valid\n");
24060 return 0;
24061+ }
24062+ address = address + PAGE_OFFSET;
24063+ length += 16UL; /* some BIOSs underreport this... */
24064+ flags = 4;
24065+ if (length >= 64*1024*1024) {
24066+ length >>= PAGE_SHIFT;
24067+ flags |= 8;
24068+ }
24069+
24070+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
24071+ gdt = get_cpu_gdt_table(cpu);
24072+ pack_descriptor(&d, address, length, 0x9b, flags);
24073+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
24074+ pack_descriptor(&d, address, length, 0x93, flags);
24075+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
24076+ }
24077+ return entry;
24078+ }
24079+ case 0x80: /* Not present */
24080+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
24081+ return 0;
24082+ default: /* Shouldn't happen */
24083+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
24084+ service, return_code);
24085+ return 0;
24086 }
24087 }
24088
24089 static struct {
24090 unsigned long address;
24091 unsigned short segment;
24092-} pci_indirect = { 0, __KERNEL_CS };
24093+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
24094
24095-static int pci_bios_present;
24096+static int pci_bios_present __read_only;
24097
24098 static int __devinit check_pcibios(void)
24099 {
24100@@ -108,11 +151,13 @@ static int __devinit check_pcibios(void)
24101 unsigned long flags, pcibios_entry;
24102
24103 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
24104- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
24105+ pci_indirect.address = pcibios_entry;
24106
24107 local_irq_save(flags);
24108- __asm__(
24109- "lcall *(%%edi); cld\n\t"
24110+ __asm__("movw %w6, %%ds\n\t"
24111+ "lcall *%%ss:(%%edi); cld\n\t"
24112+ "push %%ss\n\t"
24113+ "pop %%ds\n\t"
24114 "jc 1f\n\t"
24115 "xor %%ah, %%ah\n"
24116 "1:"
24117@@ -121,7 +166,8 @@ static int __devinit check_pcibios(void)
24118 "=b" (ebx),
24119 "=c" (ecx)
24120 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
24121- "D" (&pci_indirect)
24122+ "D" (&pci_indirect),
24123+ "r" (__PCIBIOS_DS)
24124 : "memory");
24125 local_irq_restore(flags);
24126
24127@@ -165,7 +211,10 @@ static int pci_bios_read(unsigned int se
24128
24129 switch (len) {
24130 case 1:
24131- __asm__("lcall *(%%esi); cld\n\t"
24132+ __asm__("movw %w6, %%ds\n\t"
24133+ "lcall *%%ss:(%%esi); cld\n\t"
24134+ "push %%ss\n\t"
24135+ "pop %%ds\n\t"
24136 "jc 1f\n\t"
24137 "xor %%ah, %%ah\n"
24138 "1:"
24139@@ -174,7 +223,8 @@ static int pci_bios_read(unsigned int se
24140 : "1" (PCIBIOS_READ_CONFIG_BYTE),
24141 "b" (bx),
24142 "D" ((long)reg),
24143- "S" (&pci_indirect));
24144+ "S" (&pci_indirect),
24145+ "r" (__PCIBIOS_DS));
24146 /*
24147 * Zero-extend the result beyond 8 bits, do not trust the
24148 * BIOS having done it:
24149@@ -182,7 +232,10 @@ static int pci_bios_read(unsigned int se
24150 *value &= 0xff;
24151 break;
24152 case 2:
24153- __asm__("lcall *(%%esi); cld\n\t"
24154+ __asm__("movw %w6, %%ds\n\t"
24155+ "lcall *%%ss:(%%esi); cld\n\t"
24156+ "push %%ss\n\t"
24157+ "pop %%ds\n\t"
24158 "jc 1f\n\t"
24159 "xor %%ah, %%ah\n"
24160 "1:"
24161@@ -191,7 +244,8 @@ static int pci_bios_read(unsigned int se
24162 : "1" (PCIBIOS_READ_CONFIG_WORD),
24163 "b" (bx),
24164 "D" ((long)reg),
24165- "S" (&pci_indirect));
24166+ "S" (&pci_indirect),
24167+ "r" (__PCIBIOS_DS));
24168 /*
24169 * Zero-extend the result beyond 16 bits, do not trust the
24170 * BIOS having done it:
24171@@ -199,7 +253,10 @@ static int pci_bios_read(unsigned int se
24172 *value &= 0xffff;
24173 break;
24174 case 4:
24175- __asm__("lcall *(%%esi); cld\n\t"
24176+ __asm__("movw %w6, %%ds\n\t"
24177+ "lcall *%%ss:(%%esi); cld\n\t"
24178+ "push %%ss\n\t"
24179+ "pop %%ds\n\t"
24180 "jc 1f\n\t"
24181 "xor %%ah, %%ah\n"
24182 "1:"
24183@@ -208,7 +265,8 @@ static int pci_bios_read(unsigned int se
24184 : "1" (PCIBIOS_READ_CONFIG_DWORD),
24185 "b" (bx),
24186 "D" ((long)reg),
24187- "S" (&pci_indirect));
24188+ "S" (&pci_indirect),
24189+ "r" (__PCIBIOS_DS));
24190 break;
24191 }
24192
24193@@ -231,7 +289,10 @@ static int pci_bios_write(unsigned int s
24194
24195 switch (len) {
24196 case 1:
24197- __asm__("lcall *(%%esi); cld\n\t"
24198+ __asm__("movw %w6, %%ds\n\t"
24199+ "lcall *%%ss:(%%esi); cld\n\t"
24200+ "push %%ss\n\t"
24201+ "pop %%ds\n\t"
24202 "jc 1f\n\t"
24203 "xor %%ah, %%ah\n"
24204 "1:"
24205@@ -240,10 +301,14 @@ static int pci_bios_write(unsigned int s
24206 "c" (value),
24207 "b" (bx),
24208 "D" ((long)reg),
24209- "S" (&pci_indirect));
24210+ "S" (&pci_indirect),
24211+ "r" (__PCIBIOS_DS));
24212 break;
24213 case 2:
24214- __asm__("lcall *(%%esi); cld\n\t"
24215+ __asm__("movw %w6, %%ds\n\t"
24216+ "lcall *%%ss:(%%esi); cld\n\t"
24217+ "push %%ss\n\t"
24218+ "pop %%ds\n\t"
24219 "jc 1f\n\t"
24220 "xor %%ah, %%ah\n"
24221 "1:"
24222@@ -252,10 +317,14 @@ static int pci_bios_write(unsigned int s
24223 "c" (value),
24224 "b" (bx),
24225 "D" ((long)reg),
24226- "S" (&pci_indirect));
24227+ "S" (&pci_indirect),
24228+ "r" (__PCIBIOS_DS));
24229 break;
24230 case 4:
24231- __asm__("lcall *(%%esi); cld\n\t"
24232+ __asm__("movw %w6, %%ds\n\t"
24233+ "lcall *%%ss:(%%esi); cld\n\t"
24234+ "push %%ss\n\t"
24235+ "pop %%ds\n\t"
24236 "jc 1f\n\t"
24237 "xor %%ah, %%ah\n"
24238 "1:"
24239@@ -264,7 +333,8 @@ static int pci_bios_write(unsigned int s
24240 "c" (value),
24241 "b" (bx),
24242 "D" ((long)reg),
24243- "S" (&pci_indirect));
24244+ "S" (&pci_indirect),
24245+ "r" (__PCIBIOS_DS));
24246 break;
24247 }
24248
24249@@ -278,7 +348,7 @@ static int pci_bios_write(unsigned int s
24250 * Function table for BIOS32 access
24251 */
24252
24253-static struct pci_raw_ops pci_bios_access = {
24254+static const struct pci_raw_ops pci_bios_access = {
24255 .read = pci_bios_read,
24256 .write = pci_bios_write
24257 };
24258@@ -287,7 +357,7 @@ static struct pci_raw_ops pci_bios_acces
24259 * Try to find PCI BIOS.
24260 */
24261
24262-static struct pci_raw_ops * __devinit pci_find_bios(void)
24263+static const struct pci_raw_ops * __devinit pci_find_bios(void)
24264 {
24265 union bios32 *check;
24266 unsigned char sum;
24267@@ -368,10 +438,13 @@ struct irq_routing_table * pcibios_get_i
24268
24269 DBG("PCI: Fetching IRQ routing table... ");
24270 __asm__("push %%es\n\t"
24271+ "movw %w8, %%ds\n\t"
24272 "push %%ds\n\t"
24273 "pop %%es\n\t"
24274- "lcall *(%%esi); cld\n\t"
24275+ "lcall *%%ss:(%%esi); cld\n\t"
24276 "pop %%es\n\t"
24277+ "push %%ss\n\t"
24278+ "pop %%ds\n"
24279 "jc 1f\n\t"
24280 "xor %%ah, %%ah\n"
24281 "1:"
24282@@ -382,7 +455,8 @@ struct irq_routing_table * pcibios_get_i
24283 "1" (0),
24284 "D" ((long) &opt),
24285 "S" (&pci_indirect),
24286- "m" (opt)
24287+ "m" (opt),
24288+ "r" (__PCIBIOS_DS)
24289 : "memory");
24290 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
24291 if (ret & 0xff00)
24292@@ -406,7 +480,10 @@ int pcibios_set_irq_routing(struct pci_d
24293 {
24294 int ret;
24295
24296- __asm__("lcall *(%%esi); cld\n\t"
24297+ __asm__("movw %w5, %%ds\n\t"
24298+ "lcall *%%ss:(%%esi); cld\n\t"
24299+ "push %%ss\n\t"
24300+ "pop %%ds\n"
24301 "jc 1f\n\t"
24302 "xor %%ah, %%ah\n"
24303 "1:"
24304@@ -414,7 +491,8 @@ int pcibios_set_irq_routing(struct pci_d
24305 : "0" (PCIBIOS_SET_PCI_HW_INT),
24306 "b" ((dev->bus->number << 8) | dev->devfn),
24307 "c" ((irq << 8) | (pin + 10)),
24308- "S" (&pci_indirect));
24309+ "S" (&pci_indirect),
24310+ "r" (__PCIBIOS_DS));
24311 return !(ret & 0xff00);
24312 }
24313 EXPORT_SYMBOL(pcibios_set_irq_routing);
24314diff -urNp linux-2.6.32.48/arch/x86/power/cpu.c linux-2.6.32.48/arch/x86/power/cpu.c
24315--- linux-2.6.32.48/arch/x86/power/cpu.c 2011-11-08 19:02:43.000000000 -0500
24316+++ linux-2.6.32.48/arch/x86/power/cpu.c 2011-11-15 19:59:43.000000000 -0500
24317@@ -129,7 +129,7 @@ static void do_fpu_end(void)
24318 static void fix_processor_context(void)
24319 {
24320 int cpu = smp_processor_id();
24321- struct tss_struct *t = &per_cpu(init_tss, cpu);
24322+ struct tss_struct *t = init_tss + cpu;
24323
24324 set_tss_desc(cpu, t); /*
24325 * This just modifies memory; should not be
24326@@ -139,7 +139,9 @@ static void fix_processor_context(void)
24327 */
24328
24329 #ifdef CONFIG_X86_64
24330+ pax_open_kernel();
24331 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
24332+ pax_close_kernel();
24333
24334 syscall_init(); /* This sets MSR_*STAR and related */
24335 #endif
24336diff -urNp linux-2.6.32.48/arch/x86/vdso/Makefile linux-2.6.32.48/arch/x86/vdso/Makefile
24337--- linux-2.6.32.48/arch/x86/vdso/Makefile 2011-11-08 19:02:43.000000000 -0500
24338+++ linux-2.6.32.48/arch/x86/vdso/Makefile 2011-11-15 19:59:43.000000000 -0500
24339@@ -122,7 +122,7 @@ quiet_cmd_vdso = VDSO $@
24340 $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
24341 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
24342
24343-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
24344+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
24345 GCOV_PROFILE := n
24346
24347 #
24348diff -urNp linux-2.6.32.48/arch/x86/vdso/vclock_gettime.c linux-2.6.32.48/arch/x86/vdso/vclock_gettime.c
24349--- linux-2.6.32.48/arch/x86/vdso/vclock_gettime.c 2011-11-08 19:02:43.000000000 -0500
24350+++ linux-2.6.32.48/arch/x86/vdso/vclock_gettime.c 2011-11-15 19:59:43.000000000 -0500
24351@@ -22,24 +22,48 @@
24352 #include <asm/hpet.h>
24353 #include <asm/unistd.h>
24354 #include <asm/io.h>
24355+#include <asm/fixmap.h>
24356 #include "vextern.h"
24357
24358 #define gtod vdso_vsyscall_gtod_data
24359
24360+notrace noinline long __vdso_fallback_time(long *t)
24361+{
24362+ long secs;
24363+ asm volatile("syscall"
24364+ : "=a" (secs)
24365+ : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
24366+ return secs;
24367+}
24368+
24369 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
24370 {
24371 long ret;
24372 asm("syscall" : "=a" (ret) :
24373- "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
24374+ "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
24375 return ret;
24376 }
24377
24378+notrace static inline cycle_t __vdso_vread_hpet(void)
24379+{
24380+ return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
24381+}
24382+
24383+notrace static inline cycle_t __vdso_vread_tsc(void)
24384+{
24385+ cycle_t ret = (cycle_t)vget_cycles();
24386+
24387+ return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
24388+}
24389+
24390 notrace static inline long vgetns(void)
24391 {
24392 long v;
24393- cycles_t (*vread)(void);
24394- vread = gtod->clock.vread;
24395- v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
24396+ if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
24397+ v = __vdso_vread_tsc();
24398+ else
24399+ v = __vdso_vread_hpet();
24400+ v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
24401 return (v * gtod->clock.mult) >> gtod->clock.shift;
24402 }
24403
24404@@ -113,7 +137,9 @@ notrace static noinline int do_monotonic
24405
24406 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
24407 {
24408- if (likely(gtod->sysctl_enabled))
24409+ if (likely(gtod->sysctl_enabled &&
24410+ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
24411+ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
24412 switch (clock) {
24413 case CLOCK_REALTIME:
24414 if (likely(gtod->clock.vread))
24415@@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid
24416 int clock_gettime(clockid_t, struct timespec *)
24417 __attribute__((weak, alias("__vdso_clock_gettime")));
24418
24419-notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
24420+notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
24421 {
24422 long ret;
24423- if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
24424+ asm("syscall" : "=a" (ret) :
24425+ "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
24426+ return ret;
24427+}
24428+
24429+notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
24430+{
24431+ if (likely(gtod->sysctl_enabled &&
24432+ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
24433+ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
24434+ {
24435 if (likely(tv != NULL)) {
24436 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
24437 offsetof(struct timespec, tv_nsec) ||
24438@@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct t
24439 }
24440 return 0;
24441 }
24442- asm("syscall" : "=a" (ret) :
24443- "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
24444- return ret;
24445+ return __vdso_fallback_gettimeofday(tv, tz);
24446 }
24447 int gettimeofday(struct timeval *, struct timezone *)
24448 __attribute__((weak, alias("__vdso_gettimeofday")));
24449diff -urNp linux-2.6.32.48/arch/x86/vdso/vdso32-setup.c linux-2.6.32.48/arch/x86/vdso/vdso32-setup.c
24450--- linux-2.6.32.48/arch/x86/vdso/vdso32-setup.c 2011-11-08 19:02:43.000000000 -0500
24451+++ linux-2.6.32.48/arch/x86/vdso/vdso32-setup.c 2011-11-15 19:59:43.000000000 -0500
24452@@ -25,6 +25,7 @@
24453 #include <asm/tlbflush.h>
24454 #include <asm/vdso.h>
24455 #include <asm/proto.h>
24456+#include <asm/mman.h>
24457
24458 enum {
24459 VDSO_DISABLED = 0,
24460@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
24461 void enable_sep_cpu(void)
24462 {
24463 int cpu = get_cpu();
24464- struct tss_struct *tss = &per_cpu(init_tss, cpu);
24465+ struct tss_struct *tss = init_tss + cpu;
24466
24467 if (!boot_cpu_has(X86_FEATURE_SEP)) {
24468 put_cpu();
24469@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
24470 gate_vma.vm_start = FIXADDR_USER_START;
24471 gate_vma.vm_end = FIXADDR_USER_END;
24472 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
24473- gate_vma.vm_page_prot = __P101;
24474+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
24475 /*
24476 * Make sure the vDSO gets into every core dump.
24477 * Dumping its contents makes post-mortem fully interpretable later
24478@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
24479 if (compat)
24480 addr = VDSO_HIGH_BASE;
24481 else {
24482- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
24483+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
24484 if (IS_ERR_VALUE(addr)) {
24485 ret = addr;
24486 goto up_fail;
24487 }
24488 }
24489
24490- current->mm->context.vdso = (void *)addr;
24491+ current->mm->context.vdso = addr;
24492
24493 if (compat_uses_vma || !compat) {
24494 /*
24495@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
24496 }
24497
24498 current_thread_info()->sysenter_return =
24499- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
24500+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
24501
24502 up_fail:
24503 if (ret)
24504- current->mm->context.vdso = NULL;
24505+ current->mm->context.vdso = 0;
24506
24507 up_write(&mm->mmap_sem);
24508
24509@@ -413,8 +414,14 @@ __initcall(ia32_binfmt_init);
24510
24511 const char *arch_vma_name(struct vm_area_struct *vma)
24512 {
24513- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
24514+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
24515 return "[vdso]";
24516+
24517+#ifdef CONFIG_PAX_SEGMEXEC
24518+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
24519+ return "[vdso]";
24520+#endif
24521+
24522 return NULL;
24523 }
24524
24525@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
24526 struct mm_struct *mm = tsk->mm;
24527
24528 /* Check to see if this task was created in compat vdso mode */
24529- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
24530+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
24531 return &gate_vma;
24532 return NULL;
24533 }
24534diff -urNp linux-2.6.32.48/arch/x86/vdso/vdso.lds.S linux-2.6.32.48/arch/x86/vdso/vdso.lds.S
24535--- linux-2.6.32.48/arch/x86/vdso/vdso.lds.S 2011-11-08 19:02:43.000000000 -0500
24536+++ linux-2.6.32.48/arch/x86/vdso/vdso.lds.S 2011-11-15 19:59:43.000000000 -0500
24537@@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
24538 #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
24539 #include "vextern.h"
24540 #undef VEXTERN
24541+
24542+#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
24543+VEXTERN(fallback_gettimeofday)
24544+VEXTERN(fallback_time)
24545+VEXTERN(getcpu)
24546+#undef VEXTERN
24547diff -urNp linux-2.6.32.48/arch/x86/vdso/vextern.h linux-2.6.32.48/arch/x86/vdso/vextern.h
24548--- linux-2.6.32.48/arch/x86/vdso/vextern.h 2011-11-08 19:02:43.000000000 -0500
24549+++ linux-2.6.32.48/arch/x86/vdso/vextern.h 2011-11-15 19:59:43.000000000 -0500
24550@@ -11,6 +11,5 @@
24551 put into vextern.h and be referenced as a pointer with vdso prefix.
24552 The main kernel later fills in the values. */
24553
24554-VEXTERN(jiffies)
24555 VEXTERN(vgetcpu_mode)
24556 VEXTERN(vsyscall_gtod_data)
24557diff -urNp linux-2.6.32.48/arch/x86/vdso/vma.c linux-2.6.32.48/arch/x86/vdso/vma.c
24558--- linux-2.6.32.48/arch/x86/vdso/vma.c 2011-11-08 19:02:43.000000000 -0500
24559+++ linux-2.6.32.48/arch/x86/vdso/vma.c 2011-11-15 19:59:43.000000000 -0500
24560@@ -17,8 +17,6 @@
24561 #include "vextern.h" /* Just for VMAGIC. */
24562 #undef VEXTERN
24563
24564-unsigned int __read_mostly vdso_enabled = 1;
24565-
24566 extern char vdso_start[], vdso_end[];
24567 extern unsigned short vdso_sync_cpuid;
24568
24569@@ -27,10 +25,8 @@ static unsigned vdso_size;
24570
24571 static inline void *var_ref(void *p, char *name)
24572 {
24573- if (*(void **)p != (void *)VMAGIC) {
24574- printk("VDSO: variable %s broken\n", name);
24575- vdso_enabled = 0;
24576- }
24577+ if (*(void **)p != (void *)VMAGIC)
24578+ panic("VDSO: variable %s broken\n", name);
24579 return p;
24580 }
24581
24582@@ -57,21 +53,18 @@ static int __init init_vdso_vars(void)
24583 if (!vbase)
24584 goto oom;
24585
24586- if (memcmp(vbase, "\177ELF", 4)) {
24587- printk("VDSO: I'm broken; not ELF\n");
24588- vdso_enabled = 0;
24589- }
24590+ if (memcmp(vbase, ELFMAG, SELFMAG))
24591+ panic("VDSO: I'm broken; not ELF\n");
24592
24593 #define VEXTERN(x) \
24594 *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
24595 #include "vextern.h"
24596 #undef VEXTERN
24597+ vunmap(vbase);
24598 return 0;
24599
24600 oom:
24601- printk("Cannot allocate vdso\n");
24602- vdso_enabled = 0;
24603- return -ENOMEM;
24604+ panic("Cannot allocate vdso\n");
24605 }
24606 __initcall(init_vdso_vars);
24607
24608@@ -105,9 +98,6 @@ int arch_setup_additional_pages(struct l
24609 unsigned long addr;
24610 int ret;
24611
24612- if (!vdso_enabled)
24613- return 0;
24614-
24615 down_write(&mm->mmap_sem);
24616 addr = vdso_addr(mm->start_stack, vdso_size);
24617 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
24618@@ -116,7 +106,7 @@ int arch_setup_additional_pages(struct l
24619 goto up_fail;
24620 }
24621
24622- current->mm->context.vdso = (void *)addr;
24623+ current->mm->context.vdso = addr;
24624
24625 ret = install_special_mapping(mm, addr, vdso_size,
24626 VM_READ|VM_EXEC|
24627@@ -124,7 +114,7 @@ int arch_setup_additional_pages(struct l
24628 VM_ALWAYSDUMP,
24629 vdso_pages);
24630 if (ret) {
24631- current->mm->context.vdso = NULL;
24632+ current->mm->context.vdso = 0;
24633 goto up_fail;
24634 }
24635
24636@@ -132,10 +122,3 @@ up_fail:
24637 up_write(&mm->mmap_sem);
24638 return ret;
24639 }
24640-
24641-static __init int vdso_setup(char *s)
24642-{
24643- vdso_enabled = simple_strtoul(s, NULL, 0);
24644- return 0;
24645-}
24646-__setup("vdso=", vdso_setup);
24647diff -urNp linux-2.6.32.48/arch/x86/xen/enlighten.c linux-2.6.32.48/arch/x86/xen/enlighten.c
24648--- linux-2.6.32.48/arch/x86/xen/enlighten.c 2011-11-08 19:02:43.000000000 -0500
24649+++ linux-2.6.32.48/arch/x86/xen/enlighten.c 2011-11-15 19:59:43.000000000 -0500
24650@@ -71,8 +71,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
24651
24652 struct shared_info xen_dummy_shared_info;
24653
24654-void *xen_initial_gdt;
24655-
24656 /*
24657 * Point at some empty memory to start with. We map the real shared_info
24658 * page as soon as fixmap is up and running.
24659@@ -548,7 +546,7 @@ static void xen_write_idt_entry(gate_des
24660
24661 preempt_disable();
24662
24663- start = __get_cpu_var(idt_desc).address;
24664+ start = (unsigned long)__get_cpu_var(idt_desc).address;
24665 end = start + __get_cpu_var(idt_desc).size + 1;
24666
24667 xen_mc_flush();
24668@@ -993,7 +991,7 @@ static const struct pv_apic_ops xen_apic
24669 #endif
24670 };
24671
24672-static void xen_reboot(int reason)
24673+static __noreturn void xen_reboot(int reason)
24674 {
24675 struct sched_shutdown r = { .reason = reason };
24676
24677@@ -1001,17 +999,17 @@ static void xen_reboot(int reason)
24678 BUG();
24679 }
24680
24681-static void xen_restart(char *msg)
24682+static __noreturn void xen_restart(char *msg)
24683 {
24684 xen_reboot(SHUTDOWN_reboot);
24685 }
24686
24687-static void xen_emergency_restart(void)
24688+static __noreturn void xen_emergency_restart(void)
24689 {
24690 xen_reboot(SHUTDOWN_reboot);
24691 }
24692
24693-static void xen_machine_halt(void)
24694+static __noreturn void xen_machine_halt(void)
24695 {
24696 xen_reboot(SHUTDOWN_poweroff);
24697 }
24698@@ -1095,9 +1093,20 @@ asmlinkage void __init xen_start_kernel(
24699 */
24700 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
24701
24702-#ifdef CONFIG_X86_64
24703 /* Work out if we support NX */
24704- check_efer();
24705+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
24706+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
24707+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
24708+ unsigned l, h;
24709+
24710+#ifdef CONFIG_X86_PAE
24711+ nx_enabled = 1;
24712+#endif
24713+ __supported_pte_mask |= _PAGE_NX;
24714+ rdmsr(MSR_EFER, l, h);
24715+ l |= EFER_NX;
24716+ wrmsr(MSR_EFER, l, h);
24717+ }
24718 #endif
24719
24720 xen_setup_features();
24721@@ -1129,13 +1138,6 @@ asmlinkage void __init xen_start_kernel(
24722
24723 machine_ops = xen_machine_ops;
24724
24725- /*
24726- * The only reliable way to retain the initial address of the
24727- * percpu gdt_page is to remember it here, so we can go and
24728- * mark it RW later, when the initial percpu area is freed.
24729- */
24730- xen_initial_gdt = &per_cpu(gdt_page, 0);
24731-
24732 xen_smp_init();
24733
24734 pgd = (pgd_t *)xen_start_info->pt_base;
24735diff -urNp linux-2.6.32.48/arch/x86/xen/mmu.c linux-2.6.32.48/arch/x86/xen/mmu.c
24736--- linux-2.6.32.48/arch/x86/xen/mmu.c 2011-11-08 19:02:43.000000000 -0500
24737+++ linux-2.6.32.48/arch/x86/xen/mmu.c 2011-11-15 19:59:43.000000000 -0500
24738@@ -1719,6 +1719,8 @@ __init pgd_t *xen_setup_kernel_pagetable
24739 convert_pfn_mfn(init_level4_pgt);
24740 convert_pfn_mfn(level3_ident_pgt);
24741 convert_pfn_mfn(level3_kernel_pgt);
24742+ convert_pfn_mfn(level3_vmalloc_pgt);
24743+ convert_pfn_mfn(level3_vmemmap_pgt);
24744
24745 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
24746 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
24747@@ -1737,7 +1739,10 @@ __init pgd_t *xen_setup_kernel_pagetable
24748 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
24749 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
24750 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
24751+ set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
24752+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
24753 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
24754+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
24755 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
24756 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
24757
24758@@ -1860,6 +1865,7 @@ static __init void xen_post_allocator_in
24759 pv_mmu_ops.set_pud = xen_set_pud;
24760 #if PAGETABLE_LEVELS == 4
24761 pv_mmu_ops.set_pgd = xen_set_pgd;
24762+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
24763 #endif
24764
24765 /* This will work as long as patching hasn't happened yet
24766@@ -1946,6 +1952,7 @@ static const struct pv_mmu_ops xen_mmu_o
24767 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
24768 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
24769 .set_pgd = xen_set_pgd_hyper,
24770+ .set_pgd_batched = xen_set_pgd_hyper,
24771
24772 .alloc_pud = xen_alloc_pmd_init,
24773 .release_pud = xen_release_pmd_init,
24774diff -urNp linux-2.6.32.48/arch/x86/xen/smp.c linux-2.6.32.48/arch/x86/xen/smp.c
24775--- linux-2.6.32.48/arch/x86/xen/smp.c 2011-11-08 19:02:43.000000000 -0500
24776+++ linux-2.6.32.48/arch/x86/xen/smp.c 2011-11-15 19:59:43.000000000 -0500
24777@@ -168,11 +168,6 @@ static void __init xen_smp_prepare_boot_
24778 {
24779 BUG_ON(smp_processor_id() != 0);
24780 native_smp_prepare_boot_cpu();
24781-
24782- /* We've switched to the "real" per-cpu gdt, so make sure the
24783- old memory can be recycled */
24784- make_lowmem_page_readwrite(xen_initial_gdt);
24785-
24786 xen_setup_vcpu_info_placement();
24787 }
24788
24789@@ -241,12 +236,12 @@ cpu_initialize_context(unsigned int cpu,
24790 gdt = get_cpu_gdt_table(cpu);
24791
24792 ctxt->flags = VGCF_IN_KERNEL;
24793- ctxt->user_regs.ds = __USER_DS;
24794- ctxt->user_regs.es = __USER_DS;
24795+ ctxt->user_regs.ds = __KERNEL_DS;
24796+ ctxt->user_regs.es = __KERNEL_DS;
24797 ctxt->user_regs.ss = __KERNEL_DS;
24798 #ifdef CONFIG_X86_32
24799 ctxt->user_regs.fs = __KERNEL_PERCPU;
24800- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
24801+ savesegment(gs, ctxt->user_regs.gs);
24802 #else
24803 ctxt->gs_base_kernel = per_cpu_offset(cpu);
24804 #endif
24805@@ -297,13 +292,12 @@ static int __cpuinit xen_cpu_up(unsigned
24806 int rc;
24807
24808 per_cpu(current_task, cpu) = idle;
24809+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
24810 #ifdef CONFIG_X86_32
24811 irq_ctx_init(cpu);
24812 #else
24813 clear_tsk_thread_flag(idle, TIF_FORK);
24814- per_cpu(kernel_stack, cpu) =
24815- (unsigned long)task_stack_page(idle) -
24816- KERNEL_STACK_OFFSET + THREAD_SIZE;
24817+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
24818 #endif
24819 xen_setup_runstate_info(cpu);
24820 xen_setup_timer(cpu);
24821diff -urNp linux-2.6.32.48/arch/x86/xen/xen-asm_32.S linux-2.6.32.48/arch/x86/xen/xen-asm_32.S
24822--- linux-2.6.32.48/arch/x86/xen/xen-asm_32.S 2011-11-08 19:02:43.000000000 -0500
24823+++ linux-2.6.32.48/arch/x86/xen/xen-asm_32.S 2011-11-15 19:59:43.000000000 -0500
24824@@ -83,14 +83,14 @@ ENTRY(xen_iret)
24825 ESP_OFFSET=4 # bytes pushed onto stack
24826
24827 /*
24828- * Store vcpu_info pointer for easy access. Do it this way to
24829- * avoid having to reload %fs
24830+ * Store vcpu_info pointer for easy access.
24831 */
24832 #ifdef CONFIG_SMP
24833- GET_THREAD_INFO(%eax)
24834- movl TI_cpu(%eax), %eax
24835- movl __per_cpu_offset(,%eax,4), %eax
24836- mov per_cpu__xen_vcpu(%eax), %eax
24837+ push %fs
24838+ mov $(__KERNEL_PERCPU), %eax
24839+ mov %eax, %fs
24840+ mov PER_CPU_VAR(xen_vcpu), %eax
24841+ pop %fs
24842 #else
24843 movl per_cpu__xen_vcpu, %eax
24844 #endif
24845diff -urNp linux-2.6.32.48/arch/x86/xen/xen-head.S linux-2.6.32.48/arch/x86/xen/xen-head.S
24846--- linux-2.6.32.48/arch/x86/xen/xen-head.S 2011-11-08 19:02:43.000000000 -0500
24847+++ linux-2.6.32.48/arch/x86/xen/xen-head.S 2011-11-15 19:59:43.000000000 -0500
24848@@ -19,6 +19,17 @@ ENTRY(startup_xen)
24849 #ifdef CONFIG_X86_32
24850 mov %esi,xen_start_info
24851 mov $init_thread_union+THREAD_SIZE,%esp
24852+#ifdef CONFIG_SMP
24853+ movl $cpu_gdt_table,%edi
24854+ movl $__per_cpu_load,%eax
24855+ movw %ax,__KERNEL_PERCPU + 2(%edi)
24856+ rorl $16,%eax
24857+ movb %al,__KERNEL_PERCPU + 4(%edi)
24858+ movb %ah,__KERNEL_PERCPU + 7(%edi)
24859+ movl $__per_cpu_end - 1,%eax
24860+ subl $__per_cpu_start,%eax
24861+ movw %ax,__KERNEL_PERCPU + 0(%edi)
24862+#endif
24863 #else
24864 mov %rsi,xen_start_info
24865 mov $init_thread_union+THREAD_SIZE,%rsp
24866diff -urNp linux-2.6.32.48/arch/x86/xen/xen-ops.h linux-2.6.32.48/arch/x86/xen/xen-ops.h
24867--- linux-2.6.32.48/arch/x86/xen/xen-ops.h 2011-11-08 19:02:43.000000000 -0500
24868+++ linux-2.6.32.48/arch/x86/xen/xen-ops.h 2011-11-15 19:59:43.000000000 -0500
24869@@ -10,8 +10,6 @@
24870 extern const char xen_hypervisor_callback[];
24871 extern const char xen_failsafe_callback[];
24872
24873-extern void *xen_initial_gdt;
24874-
24875 struct trap_info;
24876 void xen_copy_trap_info(struct trap_info *traps);
24877
24878diff -urNp linux-2.6.32.48/block/blk-integrity.c linux-2.6.32.48/block/blk-integrity.c
24879--- linux-2.6.32.48/block/blk-integrity.c 2011-11-08 19:02:43.000000000 -0500
24880+++ linux-2.6.32.48/block/blk-integrity.c 2011-11-15 19:59:43.000000000 -0500
24881@@ -278,7 +278,7 @@ static struct attribute *integrity_attrs
24882 NULL,
24883 };
24884
24885-static struct sysfs_ops integrity_ops = {
24886+static const struct sysfs_ops integrity_ops = {
24887 .show = &integrity_attr_show,
24888 .store = &integrity_attr_store,
24889 };
24890diff -urNp linux-2.6.32.48/block/blk-iopoll.c linux-2.6.32.48/block/blk-iopoll.c
24891--- linux-2.6.32.48/block/blk-iopoll.c 2011-11-08 19:02:43.000000000 -0500
24892+++ linux-2.6.32.48/block/blk-iopoll.c 2011-11-15 19:59:43.000000000 -0500
24893@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
24894 }
24895 EXPORT_SYMBOL(blk_iopoll_complete);
24896
24897-static void blk_iopoll_softirq(struct softirq_action *h)
24898+static void blk_iopoll_softirq(void)
24899 {
24900 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
24901 int rearm = 0, budget = blk_iopoll_budget;
24902diff -urNp linux-2.6.32.48/block/blk-map.c linux-2.6.32.48/block/blk-map.c
24903--- linux-2.6.32.48/block/blk-map.c 2011-11-08 19:02:43.000000000 -0500
24904+++ linux-2.6.32.48/block/blk-map.c 2011-11-15 19:59:43.000000000 -0500
24905@@ -54,7 +54,7 @@ static int __blk_rq_map_user(struct requ
24906 * direct dma. else, set up kernel bounce buffers
24907 */
24908 uaddr = (unsigned long) ubuf;
24909- if (blk_rq_aligned(q, ubuf, len) && !map_data)
24910+ if (blk_rq_aligned(q, (__force void *)ubuf, len) && !map_data)
24911 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
24912 else
24913 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
24914@@ -201,12 +201,13 @@ int blk_rq_map_user_iov(struct request_q
24915 for (i = 0; i < iov_count; i++) {
24916 unsigned long uaddr = (unsigned long)iov[i].iov_base;
24917
24918+ if (!iov[i].iov_len)
24919+ return -EINVAL;
24920+
24921 if (uaddr & queue_dma_alignment(q)) {
24922 unaligned = 1;
24923 break;
24924 }
24925- if (!iov[i].iov_len)
24926- return -EINVAL;
24927 }
24928
24929 if (unaligned || (q->dma_pad_mask & len) || map_data)
24930@@ -299,7 +300,7 @@ int blk_rq_map_kern(struct request_queue
24931 if (!len || !kbuf)
24932 return -EINVAL;
24933
24934- do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
24935+ do_copy = !blk_rq_aligned(q, kbuf, len) || object_starts_on_stack(kbuf);
24936 if (do_copy)
24937 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
24938 else
24939diff -urNp linux-2.6.32.48/block/blk-softirq.c linux-2.6.32.48/block/blk-softirq.c
24940--- linux-2.6.32.48/block/blk-softirq.c 2011-11-08 19:02:43.000000000 -0500
24941+++ linux-2.6.32.48/block/blk-softirq.c 2011-11-15 19:59:43.000000000 -0500
24942@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
24943 * Softirq action handler - move entries to local list and loop over them
24944 * while passing them to the queue registered handler.
24945 */
24946-static void blk_done_softirq(struct softirq_action *h)
24947+static void blk_done_softirq(void)
24948 {
24949 struct list_head *cpu_list, local_list;
24950
24951diff -urNp linux-2.6.32.48/block/blk-sysfs.c linux-2.6.32.48/block/blk-sysfs.c
24952--- linux-2.6.32.48/block/blk-sysfs.c 2011-11-08 19:02:43.000000000 -0500
24953+++ linux-2.6.32.48/block/blk-sysfs.c 2011-11-15 19:59:43.000000000 -0500
24954@@ -414,7 +414,7 @@ static void blk_release_queue(struct kob
24955 kmem_cache_free(blk_requestq_cachep, q);
24956 }
24957
24958-static struct sysfs_ops queue_sysfs_ops = {
24959+static const struct sysfs_ops queue_sysfs_ops = {
24960 .show = queue_attr_show,
24961 .store = queue_attr_store,
24962 };
24963diff -urNp linux-2.6.32.48/block/bsg.c linux-2.6.32.48/block/bsg.c
24964--- linux-2.6.32.48/block/bsg.c 2011-11-08 19:02:43.000000000 -0500
24965+++ linux-2.6.32.48/block/bsg.c 2011-11-15 19:59:43.000000000 -0500
24966@@ -175,16 +175,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
24967 struct sg_io_v4 *hdr, struct bsg_device *bd,
24968 fmode_t has_write_perm)
24969 {
24970+ unsigned char tmpcmd[sizeof(rq->__cmd)];
24971+ unsigned char *cmdptr;
24972+
24973 if (hdr->request_len > BLK_MAX_CDB) {
24974 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
24975 if (!rq->cmd)
24976 return -ENOMEM;
24977- }
24978+ cmdptr = rq->cmd;
24979+ } else
24980+ cmdptr = tmpcmd;
24981
24982- if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
24983+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
24984 hdr->request_len))
24985 return -EFAULT;
24986
24987+ if (cmdptr != rq->cmd)
24988+ memcpy(rq->cmd, cmdptr, hdr->request_len);
24989+
24990 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
24991 if (blk_verify_command(rq->cmd, has_write_perm))
24992 return -EPERM;
24993@@ -282,7 +290,7 @@ bsg_map_hdr(struct bsg_device *bd, struc
24994 rq->next_rq = next_rq;
24995 next_rq->cmd_type = rq->cmd_type;
24996
24997- dxferp = (void*)(unsigned long)hdr->din_xferp;
24998+ dxferp = (void __user *)(unsigned long)hdr->din_xferp;
24999 ret = blk_rq_map_user(q, next_rq, NULL, dxferp,
25000 hdr->din_xfer_len, GFP_KERNEL);
25001 if (ret)
25002@@ -291,10 +299,10 @@ bsg_map_hdr(struct bsg_device *bd, struc
25003
25004 if (hdr->dout_xfer_len) {
25005 dxfer_len = hdr->dout_xfer_len;
25006- dxferp = (void*)(unsigned long)hdr->dout_xferp;
25007+ dxferp = (void __user *)(unsigned long)hdr->dout_xferp;
25008 } else if (hdr->din_xfer_len) {
25009 dxfer_len = hdr->din_xfer_len;
25010- dxferp = (void*)(unsigned long)hdr->din_xferp;
25011+ dxferp = (void __user *)(unsigned long)hdr->din_xferp;
25012 } else
25013 dxfer_len = 0;
25014
25015@@ -436,7 +444,7 @@ static int blk_complete_sgv4_hdr_rq(stru
25016 int len = min_t(unsigned int, hdr->max_response_len,
25017 rq->sense_len);
25018
25019- ret = copy_to_user((void*)(unsigned long)hdr->response,
25020+ ret = copy_to_user((void __user *)(unsigned long)hdr->response,
25021 rq->sense, len);
25022 if (!ret)
25023 hdr->response_len = len;
25024diff -urNp linux-2.6.32.48/block/compat_ioctl.c linux-2.6.32.48/block/compat_ioctl.c
25025--- linux-2.6.32.48/block/compat_ioctl.c 2011-11-08 19:02:43.000000000 -0500
25026+++ linux-2.6.32.48/block/compat_ioctl.c 2011-11-15 19:59:43.000000000 -0500
25027@@ -354,7 +354,7 @@ static int compat_fd_ioctl(struct block_
25028 err |= __get_user(f->spec1, &uf->spec1);
25029 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
25030 err |= __get_user(name, &uf->name);
25031- f->name = compat_ptr(name);
25032+ f->name = (void __force_kernel *)compat_ptr(name);
25033 if (err) {
25034 err = -EFAULT;
25035 goto out;
25036diff -urNp linux-2.6.32.48/block/elevator.c linux-2.6.32.48/block/elevator.c
25037--- linux-2.6.32.48/block/elevator.c 2011-11-08 19:02:43.000000000 -0500
25038+++ linux-2.6.32.48/block/elevator.c 2011-11-15 19:59:43.000000000 -0500
25039@@ -889,7 +889,7 @@ elv_attr_store(struct kobject *kobj, str
25040 return error;
25041 }
25042
25043-static struct sysfs_ops elv_sysfs_ops = {
25044+static const struct sysfs_ops elv_sysfs_ops = {
25045 .show = elv_attr_show,
25046 .store = elv_attr_store,
25047 };
25048diff -urNp linux-2.6.32.48/block/scsi_ioctl.c linux-2.6.32.48/block/scsi_ioctl.c
25049--- linux-2.6.32.48/block/scsi_ioctl.c 2011-11-08 19:02:43.000000000 -0500
25050+++ linux-2.6.32.48/block/scsi_ioctl.c 2011-11-15 19:59:43.000000000 -0500
25051@@ -220,8 +220,20 @@ EXPORT_SYMBOL(blk_verify_command);
25052 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
25053 struct sg_io_hdr *hdr, fmode_t mode)
25054 {
25055- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
25056+ unsigned char tmpcmd[sizeof(rq->__cmd)];
25057+ unsigned char *cmdptr;
25058+
25059+ if (rq->cmd != rq->__cmd)
25060+ cmdptr = rq->cmd;
25061+ else
25062+ cmdptr = tmpcmd;
25063+
25064+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
25065 return -EFAULT;
25066+
25067+ if (cmdptr != rq->cmd)
25068+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
25069+
25070 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
25071 return -EPERM;
25072
25073@@ -430,6 +442,8 @@ int sg_scsi_ioctl(struct request_queue *
25074 int err;
25075 unsigned int in_len, out_len, bytes, opcode, cmdlen;
25076 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
25077+ unsigned char tmpcmd[sizeof(rq->__cmd)];
25078+ unsigned char *cmdptr;
25079
25080 if (!sic)
25081 return -EINVAL;
25082@@ -463,9 +477,18 @@ int sg_scsi_ioctl(struct request_queue *
25083 */
25084 err = -EFAULT;
25085 rq->cmd_len = cmdlen;
25086- if (copy_from_user(rq->cmd, sic->data, cmdlen))
25087+
25088+ if (rq->cmd != rq->__cmd)
25089+ cmdptr = rq->cmd;
25090+ else
25091+ cmdptr = tmpcmd;
25092+
25093+ if (copy_from_user(cmdptr, sic->data, cmdlen))
25094 goto error;
25095
25096+ if (rq->cmd != cmdptr)
25097+ memcpy(rq->cmd, cmdptr, cmdlen);
25098+
25099 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
25100 goto error;
25101
25102diff -urNp linux-2.6.32.48/crypto/cryptd.c linux-2.6.32.48/crypto/cryptd.c
25103--- linux-2.6.32.48/crypto/cryptd.c 2011-11-08 19:02:43.000000000 -0500
25104+++ linux-2.6.32.48/crypto/cryptd.c 2011-11-15 19:59:43.000000000 -0500
25105@@ -50,7 +50,7 @@ struct cryptd_blkcipher_ctx {
25106
25107 struct cryptd_blkcipher_request_ctx {
25108 crypto_completion_t complete;
25109-};
25110+} __no_const;
25111
25112 struct cryptd_hash_ctx {
25113 struct crypto_shash *child;
25114diff -urNp linux-2.6.32.48/crypto/gf128mul.c linux-2.6.32.48/crypto/gf128mul.c
25115--- linux-2.6.32.48/crypto/gf128mul.c 2011-11-08 19:02:43.000000000 -0500
25116+++ linux-2.6.32.48/crypto/gf128mul.c 2011-11-15 19:59:43.000000000 -0500
25117@@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128
25118 for (i = 0; i < 7; ++i)
25119 gf128mul_x_lle(&p[i + 1], &p[i]);
25120
25121- memset(r, 0, sizeof(r));
25122+ memset(r, 0, sizeof(*r));
25123 for (i = 0;;) {
25124 u8 ch = ((u8 *)b)[15 - i];
25125
25126@@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128
25127 for (i = 0; i < 7; ++i)
25128 gf128mul_x_bbe(&p[i + 1], &p[i]);
25129
25130- memset(r, 0, sizeof(r));
25131+ memset(r, 0, sizeof(*r));
25132 for (i = 0;;) {
25133 u8 ch = ((u8 *)b)[i];
25134
25135diff -urNp linux-2.6.32.48/crypto/serpent.c linux-2.6.32.48/crypto/serpent.c
25136--- linux-2.6.32.48/crypto/serpent.c 2011-11-08 19:02:43.000000000 -0500
25137+++ linux-2.6.32.48/crypto/serpent.c 2011-11-15 19:59:43.000000000 -0500
25138@@ -21,6 +21,7 @@
25139 #include <asm/byteorder.h>
25140 #include <linux/crypto.h>
25141 #include <linux/types.h>
25142+#include <linux/sched.h>
25143
25144 /* Key is padded to the maximum of 256 bits before round key generation.
25145 * Any key length <= 256 bits (32 bytes) is allowed by the algorithm.
25146@@ -224,6 +225,8 @@ static int serpent_setkey(struct crypto_
25147 u32 r0,r1,r2,r3,r4;
25148 int i;
25149
25150+ pax_track_stack();
25151+
25152 /* Copy key, add padding */
25153
25154 for (i = 0; i < keylen; ++i)
25155diff -urNp linux-2.6.32.48/Documentation/dontdiff linux-2.6.32.48/Documentation/dontdiff
25156--- linux-2.6.32.48/Documentation/dontdiff 2011-11-08 19:02:43.000000000 -0500
25157+++ linux-2.6.32.48/Documentation/dontdiff 2011-11-15 19:59:43.000000000 -0500
25158@@ -1,13 +1,16 @@
25159 *.a
25160 *.aux
25161 *.bin
25162+*.cis
25163 *.cpio
25164 *.csp
25165+*.dbg
25166 *.dsp
25167 *.dvi
25168 *.elf
25169 *.eps
25170 *.fw
25171+*.gcno
25172 *.gen.S
25173 *.gif
25174 *.grep
25175@@ -38,8 +41,10 @@
25176 *.tab.h
25177 *.tex
25178 *.ver
25179+*.vim
25180 *.xml
25181 *_MODULES
25182+*_reg_safe.h
25183 *_vga16.c
25184 *~
25185 *.9
25186@@ -49,11 +54,16 @@
25187 53c700_d.h
25188 CVS
25189 ChangeSet
25190+GPATH
25191+GRTAGS
25192+GSYMS
25193+GTAGS
25194 Image
25195 Kerntypes
25196 Module.markers
25197 Module.symvers
25198 PENDING
25199+PERF*
25200 SCCS
25201 System.map*
25202 TAGS
25203@@ -76,7 +86,11 @@ btfixupprep
25204 build
25205 bvmlinux
25206 bzImage*
25207+capability_names.h
25208+capflags.c
25209 classlist.h*
25210+clut_vga16.c
25211+common-cmds.h
25212 comp*.log
25213 compile.h*
25214 conf
25215@@ -97,19 +111,21 @@ elfconfig.h*
25216 fixdep
25217 fore200e_mkfirm
25218 fore200e_pca_fw.c*
25219+gate.lds
25220 gconf
25221 gen-devlist
25222 gen_crc32table
25223 gen_init_cpio
25224 genksyms
25225 *_gray256.c
25226+hash
25227 ihex2fw
25228 ikconfig.h*
25229 initramfs_data.cpio
25230+initramfs_data.cpio.bz2
25231 initramfs_data.cpio.gz
25232 initramfs_list
25233 kallsyms
25234-kconfig
25235 keywords.c
25236 ksym.c*
25237 ksym.h*
25238@@ -133,7 +149,9 @@ mkboot
25239 mkbugboot
25240 mkcpustr
25241 mkdep
25242+mkpiggy
25243 mkprep
25244+mkregtable
25245 mktables
25246 mktree
25247 modpost
25248@@ -149,6 +167,7 @@ patches*
25249 pca200e.bin
25250 pca200e_ecd.bin2
25251 piggy.gz
25252+piggy.S
25253 piggyback
25254 pnmtologo
25255 ppc_defs.h*
25256@@ -157,12 +176,15 @@ qconf
25257 raid6altivec*.c
25258 raid6int*.c
25259 raid6tables.c
25260+regdb.c
25261 relocs
25262+rlim_names.h
25263 series
25264 setup
25265 setup.bin
25266 setup.elf
25267 sImage
25268+slabinfo
25269 sm_tbl*
25270 split-include
25271 syscalltab.h
25272@@ -186,14 +208,20 @@ version.h*
25273 vmlinux
25274 vmlinux-*
25275 vmlinux.aout
25276+vmlinux.bin.all
25277+vmlinux.bin.bz2
25278 vmlinux.lds
25279+vmlinux.relocs
25280+voffset.h
25281 vsyscall.lds
25282 vsyscall_32.lds
25283 wanxlfw.inc
25284 uImage
25285 unifdef
25286+utsrelease.h
25287 wakeup.bin
25288 wakeup.elf
25289 wakeup.lds
25290 zImage*
25291 zconf.hash.c
25292+zoffset.h
25293diff -urNp linux-2.6.32.48/Documentation/kernel-parameters.txt linux-2.6.32.48/Documentation/kernel-parameters.txt
25294--- linux-2.6.32.48/Documentation/kernel-parameters.txt 2011-11-08 19:02:43.000000000 -0500
25295+++ linux-2.6.32.48/Documentation/kernel-parameters.txt 2011-11-15 19:59:43.000000000 -0500
25296@@ -1837,6 +1837,13 @@ and is between 256 and 4096 characters.
25297 the specified number of seconds. This is to be used if
25298 your oopses keep scrolling off the screen.
25299
25300+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
25301+ virtualization environments that don't cope well with the
25302+ expand down segment used by UDEREF on X86-32 or the frequent
25303+ page table updates on X86-64.
25304+
25305+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
25306+
25307 pcbit= [HW,ISDN]
25308
25309 pcd. [PARIDE]
25310diff -urNp linux-2.6.32.48/drivers/acpi/acpi_pad.c linux-2.6.32.48/drivers/acpi/acpi_pad.c
25311--- linux-2.6.32.48/drivers/acpi/acpi_pad.c 2011-11-08 19:02:43.000000000 -0500
25312+++ linux-2.6.32.48/drivers/acpi/acpi_pad.c 2011-11-15 19:59:43.000000000 -0500
25313@@ -30,7 +30,7 @@
25314 #include <acpi/acpi_bus.h>
25315 #include <acpi/acpi_drivers.h>
25316
25317-#define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator"
25318+#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
25319 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
25320 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
25321 static DEFINE_MUTEX(isolated_cpus_lock);
25322diff -urNp linux-2.6.32.48/drivers/acpi/battery.c linux-2.6.32.48/drivers/acpi/battery.c
25323--- linux-2.6.32.48/drivers/acpi/battery.c 2011-11-08 19:02:43.000000000 -0500
25324+++ linux-2.6.32.48/drivers/acpi/battery.c 2011-11-15 19:59:43.000000000 -0500
25325@@ -763,7 +763,7 @@ DECLARE_FILE_FUNCTIONS(alarm);
25326 }
25327
25328 static struct battery_file {
25329- struct file_operations ops;
25330+ const struct file_operations ops;
25331 mode_t mode;
25332 const char *name;
25333 } acpi_battery_file[] = {
25334diff -urNp linux-2.6.32.48/drivers/acpi/dock.c linux-2.6.32.48/drivers/acpi/dock.c
25335--- linux-2.6.32.48/drivers/acpi/dock.c 2011-11-08 19:02:43.000000000 -0500
25336+++ linux-2.6.32.48/drivers/acpi/dock.c 2011-11-15 19:59:43.000000000 -0500
25337@@ -77,7 +77,7 @@ struct dock_dependent_device {
25338 struct list_head list;
25339 struct list_head hotplug_list;
25340 acpi_handle handle;
25341- struct acpi_dock_ops *ops;
25342+ const struct acpi_dock_ops *ops;
25343 void *context;
25344 };
25345
25346@@ -605,7 +605,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifi
25347 * the dock driver after _DCK is executed.
25348 */
25349 int
25350-register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
25351+register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
25352 void *context)
25353 {
25354 struct dock_dependent_device *dd;
25355diff -urNp linux-2.6.32.48/drivers/acpi/osl.c linux-2.6.32.48/drivers/acpi/osl.c
25356--- linux-2.6.32.48/drivers/acpi/osl.c 2011-11-08 19:02:43.000000000 -0500
25357+++ linux-2.6.32.48/drivers/acpi/osl.c 2011-11-15 19:59:43.000000000 -0500
25358@@ -523,6 +523,8 @@ acpi_os_read_memory(acpi_physical_addres
25359 void __iomem *virt_addr;
25360
25361 virt_addr = ioremap(phys_addr, width);
25362+ if (!virt_addr)
25363+ return AE_NO_MEMORY;
25364 if (!value)
25365 value = &dummy;
25366
25367@@ -551,6 +553,8 @@ acpi_os_write_memory(acpi_physical_addre
25368 void __iomem *virt_addr;
25369
25370 virt_addr = ioremap(phys_addr, width);
25371+ if (!virt_addr)
25372+ return AE_NO_MEMORY;
25373
25374 switch (width) {
25375 case 8:
25376diff -urNp linux-2.6.32.48/drivers/acpi/power_meter.c linux-2.6.32.48/drivers/acpi/power_meter.c
25377--- linux-2.6.32.48/drivers/acpi/power_meter.c 2011-11-08 19:02:43.000000000 -0500
25378+++ linux-2.6.32.48/drivers/acpi/power_meter.c 2011-11-15 19:59:43.000000000 -0500
25379@@ -315,8 +315,6 @@ static ssize_t set_trip(struct device *d
25380 return res;
25381
25382 temp /= 1000;
25383- if (temp < 0)
25384- return -EINVAL;
25385
25386 mutex_lock(&resource->lock);
25387 resource->trip[attr->index - 7] = temp;
25388diff -urNp linux-2.6.32.48/drivers/acpi/proc.c linux-2.6.32.48/drivers/acpi/proc.c
25389--- linux-2.6.32.48/drivers/acpi/proc.c 2011-11-08 19:02:43.000000000 -0500
25390+++ linux-2.6.32.48/drivers/acpi/proc.c 2011-11-15 19:59:43.000000000 -0500
25391@@ -391,20 +391,15 @@ acpi_system_write_wakeup_device(struct f
25392 size_t count, loff_t * ppos)
25393 {
25394 struct list_head *node, *next;
25395- char strbuf[5];
25396- char str[5] = "";
25397- unsigned int len = count;
25398+ char strbuf[5] = {0};
25399 struct acpi_device *found_dev = NULL;
25400
25401- if (len > 4)
25402- len = 4;
25403- if (len < 0)
25404- return -EFAULT;
25405+ if (count > 4)
25406+ count = 4;
25407
25408- if (copy_from_user(strbuf, buffer, len))
25409+ if (copy_from_user(strbuf, buffer, count))
25410 return -EFAULT;
25411- strbuf[len] = '\0';
25412- sscanf(strbuf, "%s", str);
25413+ strbuf[count] = '\0';
25414
25415 mutex_lock(&acpi_device_lock);
25416 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
25417@@ -413,7 +408,7 @@ acpi_system_write_wakeup_device(struct f
25418 if (!dev->wakeup.flags.valid)
25419 continue;
25420
25421- if (!strncmp(dev->pnp.bus_id, str, 4)) {
25422+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
25423 dev->wakeup.state.enabled =
25424 dev->wakeup.state.enabled ? 0 : 1;
25425 found_dev = dev;
25426diff -urNp linux-2.6.32.48/drivers/acpi/processor_core.c linux-2.6.32.48/drivers/acpi/processor_core.c
25427--- linux-2.6.32.48/drivers/acpi/processor_core.c 2011-11-08 19:02:43.000000000 -0500
25428+++ linux-2.6.32.48/drivers/acpi/processor_core.c 2011-11-15 19:59:43.000000000 -0500
25429@@ -790,7 +790,7 @@ static int __cpuinit acpi_processor_add(
25430 return 0;
25431 }
25432
25433- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
25434+ BUG_ON(pr->id >= nr_cpu_ids);
25435
25436 /*
25437 * Buggy BIOS check
25438diff -urNp linux-2.6.32.48/drivers/acpi/sbshc.c linux-2.6.32.48/drivers/acpi/sbshc.c
25439--- linux-2.6.32.48/drivers/acpi/sbshc.c 2011-11-08 19:02:43.000000000 -0500
25440+++ linux-2.6.32.48/drivers/acpi/sbshc.c 2011-11-15 19:59:43.000000000 -0500
25441@@ -17,7 +17,7 @@
25442
25443 #define PREFIX "ACPI: "
25444
25445-#define ACPI_SMB_HC_CLASS "smbus_host_controller"
25446+#define ACPI_SMB_HC_CLASS "smbus_host_ctl"
25447 #define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
25448
25449 struct acpi_smb_hc {
25450diff -urNp linux-2.6.32.48/drivers/acpi/sleep.c linux-2.6.32.48/drivers/acpi/sleep.c
25451--- linux-2.6.32.48/drivers/acpi/sleep.c 2011-11-08 19:02:43.000000000 -0500
25452+++ linux-2.6.32.48/drivers/acpi/sleep.c 2011-11-15 19:59:43.000000000 -0500
25453@@ -283,7 +283,7 @@ static int acpi_suspend_state_valid(susp
25454 }
25455 }
25456
25457-static struct platform_suspend_ops acpi_suspend_ops = {
25458+static const struct platform_suspend_ops acpi_suspend_ops = {
25459 .valid = acpi_suspend_state_valid,
25460 .begin = acpi_suspend_begin,
25461 .prepare_late = acpi_pm_prepare,
25462@@ -311,7 +311,7 @@ static int acpi_suspend_begin_old(suspen
25463 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
25464 * been requested.
25465 */
25466-static struct platform_suspend_ops acpi_suspend_ops_old = {
25467+static const struct platform_suspend_ops acpi_suspend_ops_old = {
25468 .valid = acpi_suspend_state_valid,
25469 .begin = acpi_suspend_begin_old,
25470 .prepare_late = acpi_pm_disable_gpes,
25471@@ -460,7 +460,7 @@ static void acpi_pm_enable_gpes(void)
25472 acpi_enable_all_runtime_gpes();
25473 }
25474
25475-static struct platform_hibernation_ops acpi_hibernation_ops = {
25476+static const struct platform_hibernation_ops acpi_hibernation_ops = {
25477 .begin = acpi_hibernation_begin,
25478 .end = acpi_pm_end,
25479 .pre_snapshot = acpi_hibernation_pre_snapshot,
25480@@ -513,7 +513,7 @@ static int acpi_hibernation_pre_snapshot
25481 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
25482 * been requested.
25483 */
25484-static struct platform_hibernation_ops acpi_hibernation_ops_old = {
25485+static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
25486 .begin = acpi_hibernation_begin_old,
25487 .end = acpi_pm_end,
25488 .pre_snapshot = acpi_hibernation_pre_snapshot_old,
25489diff -urNp linux-2.6.32.48/drivers/acpi/video.c linux-2.6.32.48/drivers/acpi/video.c
25490--- linux-2.6.32.48/drivers/acpi/video.c 2011-11-08 19:02:43.000000000 -0500
25491+++ linux-2.6.32.48/drivers/acpi/video.c 2011-11-15 19:59:43.000000000 -0500
25492@@ -359,7 +359,7 @@ static int acpi_video_set_brightness(str
25493 vd->brightness->levels[request_level]);
25494 }
25495
25496-static struct backlight_ops acpi_backlight_ops = {
25497+static const struct backlight_ops acpi_backlight_ops = {
25498 .get_brightness = acpi_video_get_brightness,
25499 .update_status = acpi_video_set_brightness,
25500 };
25501diff -urNp linux-2.6.32.48/drivers/ata/ahci.c linux-2.6.32.48/drivers/ata/ahci.c
25502--- linux-2.6.32.48/drivers/ata/ahci.c 2011-11-08 19:02:43.000000000 -0500
25503+++ linux-2.6.32.48/drivers/ata/ahci.c 2011-11-15 19:59:43.000000000 -0500
25504@@ -387,7 +387,7 @@ static struct scsi_host_template ahci_sh
25505 .sdev_attrs = ahci_sdev_attrs,
25506 };
25507
25508-static struct ata_port_operations ahci_ops = {
25509+static const struct ata_port_operations ahci_ops = {
25510 .inherits = &sata_pmp_port_ops,
25511
25512 .qc_defer = sata_pmp_qc_defer_cmd_switch,
25513@@ -424,17 +424,17 @@ static struct ata_port_operations ahci_o
25514 .port_stop = ahci_port_stop,
25515 };
25516
25517-static struct ata_port_operations ahci_vt8251_ops = {
25518+static const struct ata_port_operations ahci_vt8251_ops = {
25519 .inherits = &ahci_ops,
25520 .hardreset = ahci_vt8251_hardreset,
25521 };
25522
25523-static struct ata_port_operations ahci_p5wdh_ops = {
25524+static const struct ata_port_operations ahci_p5wdh_ops = {
25525 .inherits = &ahci_ops,
25526 .hardreset = ahci_p5wdh_hardreset,
25527 };
25528
25529-static struct ata_port_operations ahci_sb600_ops = {
25530+static const struct ata_port_operations ahci_sb600_ops = {
25531 .inherits = &ahci_ops,
25532 .softreset = ahci_sb600_softreset,
25533 .pmp_softreset = ahci_sb600_softreset,
25534diff -urNp linux-2.6.32.48/drivers/ata/ata_generic.c linux-2.6.32.48/drivers/ata/ata_generic.c
25535--- linux-2.6.32.48/drivers/ata/ata_generic.c 2011-11-08 19:02:43.000000000 -0500
25536+++ linux-2.6.32.48/drivers/ata/ata_generic.c 2011-11-15 19:59:43.000000000 -0500
25537@@ -104,7 +104,7 @@ static struct scsi_host_template generic
25538 ATA_BMDMA_SHT(DRV_NAME),
25539 };
25540
25541-static struct ata_port_operations generic_port_ops = {
25542+static const struct ata_port_operations generic_port_ops = {
25543 .inherits = &ata_bmdma_port_ops,
25544 .cable_detect = ata_cable_unknown,
25545 .set_mode = generic_set_mode,
25546diff -urNp linux-2.6.32.48/drivers/ata/ata_piix.c linux-2.6.32.48/drivers/ata/ata_piix.c
25547--- linux-2.6.32.48/drivers/ata/ata_piix.c 2011-11-08 19:02:43.000000000 -0500
25548+++ linux-2.6.32.48/drivers/ata/ata_piix.c 2011-11-15 19:59:43.000000000 -0500
25549@@ -318,7 +318,7 @@ static struct scsi_host_template piix_sh
25550 ATA_BMDMA_SHT(DRV_NAME),
25551 };
25552
25553-static struct ata_port_operations piix_pata_ops = {
25554+static const struct ata_port_operations piix_pata_ops = {
25555 .inherits = &ata_bmdma32_port_ops,
25556 .cable_detect = ata_cable_40wire,
25557 .set_piomode = piix_set_piomode,
25558@@ -326,22 +326,22 @@ static struct ata_port_operations piix_p
25559 .prereset = piix_pata_prereset,
25560 };
25561
25562-static struct ata_port_operations piix_vmw_ops = {
25563+static const struct ata_port_operations piix_vmw_ops = {
25564 .inherits = &piix_pata_ops,
25565 .bmdma_status = piix_vmw_bmdma_status,
25566 };
25567
25568-static struct ata_port_operations ich_pata_ops = {
25569+static const struct ata_port_operations ich_pata_ops = {
25570 .inherits = &piix_pata_ops,
25571 .cable_detect = ich_pata_cable_detect,
25572 .set_dmamode = ich_set_dmamode,
25573 };
25574
25575-static struct ata_port_operations piix_sata_ops = {
25576+static const struct ata_port_operations piix_sata_ops = {
25577 .inherits = &ata_bmdma_port_ops,
25578 };
25579
25580-static struct ata_port_operations piix_sidpr_sata_ops = {
25581+static const struct ata_port_operations piix_sidpr_sata_ops = {
25582 .inherits = &piix_sata_ops,
25583 .hardreset = sata_std_hardreset,
25584 .scr_read = piix_sidpr_scr_read,
25585diff -urNp linux-2.6.32.48/drivers/ata/libata-acpi.c linux-2.6.32.48/drivers/ata/libata-acpi.c
25586--- linux-2.6.32.48/drivers/ata/libata-acpi.c 2011-11-08 19:02:43.000000000 -0500
25587+++ linux-2.6.32.48/drivers/ata/libata-acpi.c 2011-11-15 19:59:43.000000000 -0500
25588@@ -223,12 +223,12 @@ static void ata_acpi_dev_uevent(acpi_han
25589 ata_acpi_uevent(dev->link->ap, dev, event);
25590 }
25591
25592-static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
25593+static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
25594 .handler = ata_acpi_dev_notify_dock,
25595 .uevent = ata_acpi_dev_uevent,
25596 };
25597
25598-static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
25599+static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
25600 .handler = ata_acpi_ap_notify_dock,
25601 .uevent = ata_acpi_ap_uevent,
25602 };
25603diff -urNp linux-2.6.32.48/drivers/ata/libata-core.c linux-2.6.32.48/drivers/ata/libata-core.c
25604--- linux-2.6.32.48/drivers/ata/libata-core.c 2011-11-08 19:02:43.000000000 -0500
25605+++ linux-2.6.32.48/drivers/ata/libata-core.c 2011-11-15 19:59:43.000000000 -0500
25606@@ -4954,7 +4954,7 @@ void ata_qc_free(struct ata_queued_cmd *
25607 struct ata_port *ap;
25608 unsigned int tag;
25609
25610- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25611+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25612 ap = qc->ap;
25613
25614 qc->flags = 0;
25615@@ -4970,7 +4970,7 @@ void __ata_qc_complete(struct ata_queued
25616 struct ata_port *ap;
25617 struct ata_link *link;
25618
25619- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25620+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25621 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
25622 ap = qc->ap;
25623 link = qc->dev->link;
25624@@ -5987,7 +5987,7 @@ static void ata_host_stop(struct device
25625 * LOCKING:
25626 * None.
25627 */
25628-static void ata_finalize_port_ops(struct ata_port_operations *ops)
25629+static void ata_finalize_port_ops(const struct ata_port_operations *ops)
25630 {
25631 static DEFINE_SPINLOCK(lock);
25632 const struct ata_port_operations *cur;
25633@@ -5999,6 +5999,7 @@ static void ata_finalize_port_ops(struct
25634 return;
25635
25636 spin_lock(&lock);
25637+ pax_open_kernel();
25638
25639 for (cur = ops->inherits; cur; cur = cur->inherits) {
25640 void **inherit = (void **)cur;
25641@@ -6012,8 +6013,9 @@ static void ata_finalize_port_ops(struct
25642 if (IS_ERR(*pp))
25643 *pp = NULL;
25644
25645- ops->inherits = NULL;
25646+ *(struct ata_port_operations **)&ops->inherits = NULL;
25647
25648+ pax_close_kernel();
25649 spin_unlock(&lock);
25650 }
25651
25652@@ -6110,7 +6112,7 @@ int ata_host_start(struct ata_host *host
25653 */
25654 /* KILLME - the only user left is ipr */
25655 void ata_host_init(struct ata_host *host, struct device *dev,
25656- unsigned long flags, struct ata_port_operations *ops)
25657+ unsigned long flags, const struct ata_port_operations *ops)
25658 {
25659 spin_lock_init(&host->lock);
25660 host->dev = dev;
25661@@ -6773,7 +6775,7 @@ static void ata_dummy_error_handler(stru
25662 /* truly dummy */
25663 }
25664
25665-struct ata_port_operations ata_dummy_port_ops = {
25666+const struct ata_port_operations ata_dummy_port_ops = {
25667 .qc_prep = ata_noop_qc_prep,
25668 .qc_issue = ata_dummy_qc_issue,
25669 .error_handler = ata_dummy_error_handler,
25670diff -urNp linux-2.6.32.48/drivers/ata/libata-eh.c linux-2.6.32.48/drivers/ata/libata-eh.c
25671--- linux-2.6.32.48/drivers/ata/libata-eh.c 2011-11-08 19:02:43.000000000 -0500
25672+++ linux-2.6.32.48/drivers/ata/libata-eh.c 2011-11-15 19:59:43.000000000 -0500
25673@@ -2423,6 +2423,8 @@ void ata_eh_report(struct ata_port *ap)
25674 {
25675 struct ata_link *link;
25676
25677+ pax_track_stack();
25678+
25679 ata_for_each_link(link, ap, HOST_FIRST)
25680 ata_eh_link_report(link);
25681 }
25682@@ -3594,7 +3596,7 @@ void ata_do_eh(struct ata_port *ap, ata_
25683 */
25684 void ata_std_error_handler(struct ata_port *ap)
25685 {
25686- struct ata_port_operations *ops = ap->ops;
25687+ const struct ata_port_operations *ops = ap->ops;
25688 ata_reset_fn_t hardreset = ops->hardreset;
25689
25690 /* ignore built-in hardreset if SCR access is not available */
25691diff -urNp linux-2.6.32.48/drivers/ata/libata-pmp.c linux-2.6.32.48/drivers/ata/libata-pmp.c
25692--- linux-2.6.32.48/drivers/ata/libata-pmp.c 2011-11-08 19:02:43.000000000 -0500
25693+++ linux-2.6.32.48/drivers/ata/libata-pmp.c 2011-11-15 19:59:43.000000000 -0500
25694@@ -841,7 +841,7 @@ static int sata_pmp_handle_link_fail(str
25695 */
25696 static int sata_pmp_eh_recover(struct ata_port *ap)
25697 {
25698- struct ata_port_operations *ops = ap->ops;
25699+ const struct ata_port_operations *ops = ap->ops;
25700 int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
25701 struct ata_link *pmp_link = &ap->link;
25702 struct ata_device *pmp_dev = pmp_link->device;
25703diff -urNp linux-2.6.32.48/drivers/ata/pata_acpi.c linux-2.6.32.48/drivers/ata/pata_acpi.c
25704--- linux-2.6.32.48/drivers/ata/pata_acpi.c 2011-11-08 19:02:43.000000000 -0500
25705+++ linux-2.6.32.48/drivers/ata/pata_acpi.c 2011-11-15 19:59:43.000000000 -0500
25706@@ -215,7 +215,7 @@ static struct scsi_host_template pacpi_s
25707 ATA_BMDMA_SHT(DRV_NAME),
25708 };
25709
25710-static struct ata_port_operations pacpi_ops = {
25711+static const struct ata_port_operations pacpi_ops = {
25712 .inherits = &ata_bmdma_port_ops,
25713 .qc_issue = pacpi_qc_issue,
25714 .cable_detect = pacpi_cable_detect,
25715diff -urNp linux-2.6.32.48/drivers/ata/pata_ali.c linux-2.6.32.48/drivers/ata/pata_ali.c
25716--- linux-2.6.32.48/drivers/ata/pata_ali.c 2011-11-08 19:02:43.000000000 -0500
25717+++ linux-2.6.32.48/drivers/ata/pata_ali.c 2011-11-15 19:59:43.000000000 -0500
25718@@ -365,7 +365,7 @@ static struct scsi_host_template ali_sht
25719 * Port operations for PIO only ALi
25720 */
25721
25722-static struct ata_port_operations ali_early_port_ops = {
25723+static const struct ata_port_operations ali_early_port_ops = {
25724 .inherits = &ata_sff_port_ops,
25725 .cable_detect = ata_cable_40wire,
25726 .set_piomode = ali_set_piomode,
25727@@ -382,7 +382,7 @@ static const struct ata_port_operations
25728 * Port operations for DMA capable ALi without cable
25729 * detect
25730 */
25731-static struct ata_port_operations ali_20_port_ops = {
25732+static const struct ata_port_operations ali_20_port_ops = {
25733 .inherits = &ali_dma_base_ops,
25734 .cable_detect = ata_cable_40wire,
25735 .mode_filter = ali_20_filter,
25736@@ -393,7 +393,7 @@ static struct ata_port_operations ali_20
25737 /*
25738 * Port operations for DMA capable ALi with cable detect
25739 */
25740-static struct ata_port_operations ali_c2_port_ops = {
25741+static const struct ata_port_operations ali_c2_port_ops = {
25742 .inherits = &ali_dma_base_ops,
25743 .check_atapi_dma = ali_check_atapi_dma,
25744 .cable_detect = ali_c2_cable_detect,
25745@@ -404,7 +404,7 @@ static struct ata_port_operations ali_c2
25746 /*
25747 * Port operations for DMA capable ALi with cable detect
25748 */
25749-static struct ata_port_operations ali_c4_port_ops = {
25750+static const struct ata_port_operations ali_c4_port_ops = {
25751 .inherits = &ali_dma_base_ops,
25752 .check_atapi_dma = ali_check_atapi_dma,
25753 .cable_detect = ali_c2_cable_detect,
25754@@ -414,7 +414,7 @@ static struct ata_port_operations ali_c4
25755 /*
25756 * Port operations for DMA capable ALi with cable detect and LBA48
25757 */
25758-static struct ata_port_operations ali_c5_port_ops = {
25759+static const struct ata_port_operations ali_c5_port_ops = {
25760 .inherits = &ali_dma_base_ops,
25761 .check_atapi_dma = ali_check_atapi_dma,
25762 .dev_config = ali_warn_atapi_dma,
25763diff -urNp linux-2.6.32.48/drivers/ata/pata_amd.c linux-2.6.32.48/drivers/ata/pata_amd.c
25764--- linux-2.6.32.48/drivers/ata/pata_amd.c 2011-11-08 19:02:43.000000000 -0500
25765+++ linux-2.6.32.48/drivers/ata/pata_amd.c 2011-11-15 19:59:43.000000000 -0500
25766@@ -397,28 +397,28 @@ static const struct ata_port_operations
25767 .prereset = amd_pre_reset,
25768 };
25769
25770-static struct ata_port_operations amd33_port_ops = {
25771+static const struct ata_port_operations amd33_port_ops = {
25772 .inherits = &amd_base_port_ops,
25773 .cable_detect = ata_cable_40wire,
25774 .set_piomode = amd33_set_piomode,
25775 .set_dmamode = amd33_set_dmamode,
25776 };
25777
25778-static struct ata_port_operations amd66_port_ops = {
25779+static const struct ata_port_operations amd66_port_ops = {
25780 .inherits = &amd_base_port_ops,
25781 .cable_detect = ata_cable_unknown,
25782 .set_piomode = amd66_set_piomode,
25783 .set_dmamode = amd66_set_dmamode,
25784 };
25785
25786-static struct ata_port_operations amd100_port_ops = {
25787+static const struct ata_port_operations amd100_port_ops = {
25788 .inherits = &amd_base_port_ops,
25789 .cable_detect = ata_cable_unknown,
25790 .set_piomode = amd100_set_piomode,
25791 .set_dmamode = amd100_set_dmamode,
25792 };
25793
25794-static struct ata_port_operations amd133_port_ops = {
25795+static const struct ata_port_operations amd133_port_ops = {
25796 .inherits = &amd_base_port_ops,
25797 .cable_detect = amd_cable_detect,
25798 .set_piomode = amd133_set_piomode,
25799@@ -433,13 +433,13 @@ static const struct ata_port_operations
25800 .host_stop = nv_host_stop,
25801 };
25802
25803-static struct ata_port_operations nv100_port_ops = {
25804+static const struct ata_port_operations nv100_port_ops = {
25805 .inherits = &nv_base_port_ops,
25806 .set_piomode = nv100_set_piomode,
25807 .set_dmamode = nv100_set_dmamode,
25808 };
25809
25810-static struct ata_port_operations nv133_port_ops = {
25811+static const struct ata_port_operations nv133_port_ops = {
25812 .inherits = &nv_base_port_ops,
25813 .set_piomode = nv133_set_piomode,
25814 .set_dmamode = nv133_set_dmamode,
25815diff -urNp linux-2.6.32.48/drivers/ata/pata_artop.c linux-2.6.32.48/drivers/ata/pata_artop.c
25816--- linux-2.6.32.48/drivers/ata/pata_artop.c 2011-11-08 19:02:43.000000000 -0500
25817+++ linux-2.6.32.48/drivers/ata/pata_artop.c 2011-11-15 19:59:43.000000000 -0500
25818@@ -311,7 +311,7 @@ static struct scsi_host_template artop_s
25819 ATA_BMDMA_SHT(DRV_NAME),
25820 };
25821
25822-static struct ata_port_operations artop6210_ops = {
25823+static const struct ata_port_operations artop6210_ops = {
25824 .inherits = &ata_bmdma_port_ops,
25825 .cable_detect = ata_cable_40wire,
25826 .set_piomode = artop6210_set_piomode,
25827@@ -320,7 +320,7 @@ static struct ata_port_operations artop6
25828 .qc_defer = artop6210_qc_defer,
25829 };
25830
25831-static struct ata_port_operations artop6260_ops = {
25832+static const struct ata_port_operations artop6260_ops = {
25833 .inherits = &ata_bmdma_port_ops,
25834 .cable_detect = artop6260_cable_detect,
25835 .set_piomode = artop6260_set_piomode,
25836diff -urNp linux-2.6.32.48/drivers/ata/pata_at32.c linux-2.6.32.48/drivers/ata/pata_at32.c
25837--- linux-2.6.32.48/drivers/ata/pata_at32.c 2011-11-08 19:02:43.000000000 -0500
25838+++ linux-2.6.32.48/drivers/ata/pata_at32.c 2011-11-15 19:59:43.000000000 -0500
25839@@ -172,7 +172,7 @@ static struct scsi_host_template at32_sh
25840 ATA_PIO_SHT(DRV_NAME),
25841 };
25842
25843-static struct ata_port_operations at32_port_ops = {
25844+static const struct ata_port_operations at32_port_ops = {
25845 .inherits = &ata_sff_port_ops,
25846 .cable_detect = ata_cable_40wire,
25847 .set_piomode = pata_at32_set_piomode,
25848diff -urNp linux-2.6.32.48/drivers/ata/pata_at91.c linux-2.6.32.48/drivers/ata/pata_at91.c
25849--- linux-2.6.32.48/drivers/ata/pata_at91.c 2011-11-08 19:02:43.000000000 -0500
25850+++ linux-2.6.32.48/drivers/ata/pata_at91.c 2011-11-15 19:59:43.000000000 -0500
25851@@ -195,7 +195,7 @@ static struct scsi_host_template pata_at
25852 ATA_PIO_SHT(DRV_NAME),
25853 };
25854
25855-static struct ata_port_operations pata_at91_port_ops = {
25856+static const struct ata_port_operations pata_at91_port_ops = {
25857 .inherits = &ata_sff_port_ops,
25858
25859 .sff_data_xfer = pata_at91_data_xfer_noirq,
25860diff -urNp linux-2.6.32.48/drivers/ata/pata_atiixp.c linux-2.6.32.48/drivers/ata/pata_atiixp.c
25861--- linux-2.6.32.48/drivers/ata/pata_atiixp.c 2011-11-08 19:02:43.000000000 -0500
25862+++ linux-2.6.32.48/drivers/ata/pata_atiixp.c 2011-11-15 19:59:43.000000000 -0500
25863@@ -205,7 +205,7 @@ static struct scsi_host_template atiixp_
25864 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
25865 };
25866
25867-static struct ata_port_operations atiixp_port_ops = {
25868+static const struct ata_port_operations atiixp_port_ops = {
25869 .inherits = &ata_bmdma_port_ops,
25870
25871 .qc_prep = ata_sff_dumb_qc_prep,
25872diff -urNp linux-2.6.32.48/drivers/ata/pata_atp867x.c linux-2.6.32.48/drivers/ata/pata_atp867x.c
25873--- linux-2.6.32.48/drivers/ata/pata_atp867x.c 2011-11-08 19:02:43.000000000 -0500
25874+++ linux-2.6.32.48/drivers/ata/pata_atp867x.c 2011-11-15 19:59:43.000000000 -0500
25875@@ -274,7 +274,7 @@ static struct scsi_host_template atp867x
25876 ATA_BMDMA_SHT(DRV_NAME),
25877 };
25878
25879-static struct ata_port_operations atp867x_ops = {
25880+static const struct ata_port_operations atp867x_ops = {
25881 .inherits = &ata_bmdma_port_ops,
25882 .cable_detect = atp867x_cable_detect,
25883 .set_piomode = atp867x_set_piomode,
25884diff -urNp linux-2.6.32.48/drivers/ata/pata_bf54x.c linux-2.6.32.48/drivers/ata/pata_bf54x.c
25885--- linux-2.6.32.48/drivers/ata/pata_bf54x.c 2011-11-08 19:02:43.000000000 -0500
25886+++ linux-2.6.32.48/drivers/ata/pata_bf54x.c 2011-11-15 19:59:43.000000000 -0500
25887@@ -1464,7 +1464,7 @@ static struct scsi_host_template bfin_sh
25888 .dma_boundary = ATA_DMA_BOUNDARY,
25889 };
25890
25891-static struct ata_port_operations bfin_pata_ops = {
25892+static const struct ata_port_operations bfin_pata_ops = {
25893 .inherits = &ata_sff_port_ops,
25894
25895 .set_piomode = bfin_set_piomode,
25896diff -urNp linux-2.6.32.48/drivers/ata/pata_cmd640.c linux-2.6.32.48/drivers/ata/pata_cmd640.c
25897--- linux-2.6.32.48/drivers/ata/pata_cmd640.c 2011-11-08 19:02:43.000000000 -0500
25898+++ linux-2.6.32.48/drivers/ata/pata_cmd640.c 2011-11-15 19:59:43.000000000 -0500
25899@@ -168,7 +168,7 @@ static struct scsi_host_template cmd640_
25900 ATA_BMDMA_SHT(DRV_NAME),
25901 };
25902
25903-static struct ata_port_operations cmd640_port_ops = {
25904+static const struct ata_port_operations cmd640_port_ops = {
25905 .inherits = &ata_bmdma_port_ops,
25906 /* In theory xfer_noirq is not needed once we kill the prefetcher */
25907 .sff_data_xfer = ata_sff_data_xfer_noirq,
25908diff -urNp linux-2.6.32.48/drivers/ata/pata_cmd64x.c linux-2.6.32.48/drivers/ata/pata_cmd64x.c
25909--- linux-2.6.32.48/drivers/ata/pata_cmd64x.c 2011-11-08 19:02:43.000000000 -0500
25910+++ linux-2.6.32.48/drivers/ata/pata_cmd64x.c 2011-11-15 19:59:43.000000000 -0500
25911@@ -271,18 +271,18 @@ static const struct ata_port_operations
25912 .set_dmamode = cmd64x_set_dmamode,
25913 };
25914
25915-static struct ata_port_operations cmd64x_port_ops = {
25916+static const struct ata_port_operations cmd64x_port_ops = {
25917 .inherits = &cmd64x_base_ops,
25918 .cable_detect = ata_cable_40wire,
25919 };
25920
25921-static struct ata_port_operations cmd646r1_port_ops = {
25922+static const struct ata_port_operations cmd646r1_port_ops = {
25923 .inherits = &cmd64x_base_ops,
25924 .bmdma_stop = cmd646r1_bmdma_stop,
25925 .cable_detect = ata_cable_40wire,
25926 };
25927
25928-static struct ata_port_operations cmd648_port_ops = {
25929+static const struct ata_port_operations cmd648_port_ops = {
25930 .inherits = &cmd64x_base_ops,
25931 .bmdma_stop = cmd648_bmdma_stop,
25932 .cable_detect = cmd648_cable_detect,
25933diff -urNp linux-2.6.32.48/drivers/ata/pata_cs5520.c linux-2.6.32.48/drivers/ata/pata_cs5520.c
25934--- linux-2.6.32.48/drivers/ata/pata_cs5520.c 2011-11-08 19:02:43.000000000 -0500
25935+++ linux-2.6.32.48/drivers/ata/pata_cs5520.c 2011-11-15 19:59:43.000000000 -0500
25936@@ -144,7 +144,7 @@ static struct scsi_host_template cs5520_
25937 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
25938 };
25939
25940-static struct ata_port_operations cs5520_port_ops = {
25941+static const struct ata_port_operations cs5520_port_ops = {
25942 .inherits = &ata_bmdma_port_ops,
25943 .qc_prep = ata_sff_dumb_qc_prep,
25944 .cable_detect = ata_cable_40wire,
25945diff -urNp linux-2.6.32.48/drivers/ata/pata_cs5530.c linux-2.6.32.48/drivers/ata/pata_cs5530.c
25946--- linux-2.6.32.48/drivers/ata/pata_cs5530.c 2011-11-08 19:02:43.000000000 -0500
25947+++ linux-2.6.32.48/drivers/ata/pata_cs5530.c 2011-11-15 19:59:43.000000000 -0500
25948@@ -164,7 +164,7 @@ static struct scsi_host_template cs5530_
25949 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
25950 };
25951
25952-static struct ata_port_operations cs5530_port_ops = {
25953+static const struct ata_port_operations cs5530_port_ops = {
25954 .inherits = &ata_bmdma_port_ops,
25955
25956 .qc_prep = ata_sff_dumb_qc_prep,
25957diff -urNp linux-2.6.32.48/drivers/ata/pata_cs5535.c linux-2.6.32.48/drivers/ata/pata_cs5535.c
25958--- linux-2.6.32.48/drivers/ata/pata_cs5535.c 2011-11-08 19:02:43.000000000 -0500
25959+++ linux-2.6.32.48/drivers/ata/pata_cs5535.c 2011-11-15 19:59:43.000000000 -0500
25960@@ -160,7 +160,7 @@ static struct scsi_host_template cs5535_
25961 ATA_BMDMA_SHT(DRV_NAME),
25962 };
25963
25964-static struct ata_port_operations cs5535_port_ops = {
25965+static const struct ata_port_operations cs5535_port_ops = {
25966 .inherits = &ata_bmdma_port_ops,
25967 .cable_detect = cs5535_cable_detect,
25968 .set_piomode = cs5535_set_piomode,
25969diff -urNp linux-2.6.32.48/drivers/ata/pata_cs5536.c linux-2.6.32.48/drivers/ata/pata_cs5536.c
25970--- linux-2.6.32.48/drivers/ata/pata_cs5536.c 2011-11-08 19:02:43.000000000 -0500
25971+++ linux-2.6.32.48/drivers/ata/pata_cs5536.c 2011-11-15 19:59:43.000000000 -0500
25972@@ -223,7 +223,7 @@ static struct scsi_host_template cs5536_
25973 ATA_BMDMA_SHT(DRV_NAME),
25974 };
25975
25976-static struct ata_port_operations cs5536_port_ops = {
25977+static const struct ata_port_operations cs5536_port_ops = {
25978 .inherits = &ata_bmdma_port_ops,
25979 .cable_detect = cs5536_cable_detect,
25980 .set_piomode = cs5536_set_piomode,
25981diff -urNp linux-2.6.32.48/drivers/ata/pata_cypress.c linux-2.6.32.48/drivers/ata/pata_cypress.c
25982--- linux-2.6.32.48/drivers/ata/pata_cypress.c 2011-11-08 19:02:43.000000000 -0500
25983+++ linux-2.6.32.48/drivers/ata/pata_cypress.c 2011-11-15 19:59:43.000000000 -0500
25984@@ -113,7 +113,7 @@ static struct scsi_host_template cy82c69
25985 ATA_BMDMA_SHT(DRV_NAME),
25986 };
25987
25988-static struct ata_port_operations cy82c693_port_ops = {
25989+static const struct ata_port_operations cy82c693_port_ops = {
25990 .inherits = &ata_bmdma_port_ops,
25991 .cable_detect = ata_cable_40wire,
25992 .set_piomode = cy82c693_set_piomode,
25993diff -urNp linux-2.6.32.48/drivers/ata/pata_efar.c linux-2.6.32.48/drivers/ata/pata_efar.c
25994--- linux-2.6.32.48/drivers/ata/pata_efar.c 2011-11-08 19:02:43.000000000 -0500
25995+++ linux-2.6.32.48/drivers/ata/pata_efar.c 2011-11-15 19:59:43.000000000 -0500
25996@@ -222,7 +222,7 @@ static struct scsi_host_template efar_sh
25997 ATA_BMDMA_SHT(DRV_NAME),
25998 };
25999
26000-static struct ata_port_operations efar_ops = {
26001+static const struct ata_port_operations efar_ops = {
26002 .inherits = &ata_bmdma_port_ops,
26003 .cable_detect = efar_cable_detect,
26004 .set_piomode = efar_set_piomode,
26005diff -urNp linux-2.6.32.48/drivers/ata/pata_hpt366.c linux-2.6.32.48/drivers/ata/pata_hpt366.c
26006--- linux-2.6.32.48/drivers/ata/pata_hpt366.c 2011-11-08 19:02:43.000000000 -0500
26007+++ linux-2.6.32.48/drivers/ata/pata_hpt366.c 2011-11-15 19:59:43.000000000 -0500
26008@@ -282,7 +282,7 @@ static struct scsi_host_template hpt36x_
26009 * Configuration for HPT366/68
26010 */
26011
26012-static struct ata_port_operations hpt366_port_ops = {
26013+static const struct ata_port_operations hpt366_port_ops = {
26014 .inherits = &ata_bmdma_port_ops,
26015 .cable_detect = hpt36x_cable_detect,
26016 .mode_filter = hpt366_filter,
26017diff -urNp linux-2.6.32.48/drivers/ata/pata_hpt37x.c linux-2.6.32.48/drivers/ata/pata_hpt37x.c
26018--- linux-2.6.32.48/drivers/ata/pata_hpt37x.c 2011-11-08 19:02:43.000000000 -0500
26019+++ linux-2.6.32.48/drivers/ata/pata_hpt37x.c 2011-11-15 19:59:43.000000000 -0500
26020@@ -576,7 +576,7 @@ static struct scsi_host_template hpt37x_
26021 * Configuration for HPT370
26022 */
26023
26024-static struct ata_port_operations hpt370_port_ops = {
26025+static const struct ata_port_operations hpt370_port_ops = {
26026 .inherits = &ata_bmdma_port_ops,
26027
26028 .bmdma_stop = hpt370_bmdma_stop,
26029@@ -591,7 +591,7 @@ static struct ata_port_operations hpt370
26030 * Configuration for HPT370A. Close to 370 but less filters
26031 */
26032
26033-static struct ata_port_operations hpt370a_port_ops = {
26034+static const struct ata_port_operations hpt370a_port_ops = {
26035 .inherits = &hpt370_port_ops,
26036 .mode_filter = hpt370a_filter,
26037 };
26038@@ -601,7 +601,7 @@ static struct ata_port_operations hpt370
26039 * and DMA mode setting functionality.
26040 */
26041
26042-static struct ata_port_operations hpt372_port_ops = {
26043+static const struct ata_port_operations hpt372_port_ops = {
26044 .inherits = &ata_bmdma_port_ops,
26045
26046 .bmdma_stop = hpt37x_bmdma_stop,
26047@@ -616,7 +616,7 @@ static struct ata_port_operations hpt372
26048 * but we have a different cable detection procedure for function 1.
26049 */
26050
26051-static struct ata_port_operations hpt374_fn1_port_ops = {
26052+static const struct ata_port_operations hpt374_fn1_port_ops = {
26053 .inherits = &hpt372_port_ops,
26054 .prereset = hpt374_fn1_pre_reset,
26055 };
26056diff -urNp linux-2.6.32.48/drivers/ata/pata_hpt3x2n.c linux-2.6.32.48/drivers/ata/pata_hpt3x2n.c
26057--- linux-2.6.32.48/drivers/ata/pata_hpt3x2n.c 2011-11-08 19:02:43.000000000 -0500
26058+++ linux-2.6.32.48/drivers/ata/pata_hpt3x2n.c 2011-11-15 19:59:43.000000000 -0500
26059@@ -337,7 +337,7 @@ static struct scsi_host_template hpt3x2n
26060 * Configuration for HPT3x2n.
26061 */
26062
26063-static struct ata_port_operations hpt3x2n_port_ops = {
26064+static const struct ata_port_operations hpt3x2n_port_ops = {
26065 .inherits = &ata_bmdma_port_ops,
26066
26067 .bmdma_stop = hpt3x2n_bmdma_stop,
26068diff -urNp linux-2.6.32.48/drivers/ata/pata_hpt3x3.c linux-2.6.32.48/drivers/ata/pata_hpt3x3.c
26069--- linux-2.6.32.48/drivers/ata/pata_hpt3x3.c 2011-11-08 19:02:43.000000000 -0500
26070+++ linux-2.6.32.48/drivers/ata/pata_hpt3x3.c 2011-11-15 19:59:43.000000000 -0500
26071@@ -141,7 +141,7 @@ static struct scsi_host_template hpt3x3_
26072 ATA_BMDMA_SHT(DRV_NAME),
26073 };
26074
26075-static struct ata_port_operations hpt3x3_port_ops = {
26076+static const struct ata_port_operations hpt3x3_port_ops = {
26077 .inherits = &ata_bmdma_port_ops,
26078 .cable_detect = ata_cable_40wire,
26079 .set_piomode = hpt3x3_set_piomode,
26080diff -urNp linux-2.6.32.48/drivers/ata/pata_icside.c linux-2.6.32.48/drivers/ata/pata_icside.c
26081--- linux-2.6.32.48/drivers/ata/pata_icside.c 2011-11-08 19:02:43.000000000 -0500
26082+++ linux-2.6.32.48/drivers/ata/pata_icside.c 2011-11-15 19:59:43.000000000 -0500
26083@@ -319,7 +319,7 @@ static void pata_icside_postreset(struct
26084 }
26085 }
26086
26087-static struct ata_port_operations pata_icside_port_ops = {
26088+static const struct ata_port_operations pata_icside_port_ops = {
26089 .inherits = &ata_sff_port_ops,
26090 /* no need to build any PRD tables for DMA */
26091 .qc_prep = ata_noop_qc_prep,
26092diff -urNp linux-2.6.32.48/drivers/ata/pata_isapnp.c linux-2.6.32.48/drivers/ata/pata_isapnp.c
26093--- linux-2.6.32.48/drivers/ata/pata_isapnp.c 2011-11-08 19:02:43.000000000 -0500
26094+++ linux-2.6.32.48/drivers/ata/pata_isapnp.c 2011-11-15 19:59:43.000000000 -0500
26095@@ -23,12 +23,12 @@ static struct scsi_host_template isapnp_
26096 ATA_PIO_SHT(DRV_NAME),
26097 };
26098
26099-static struct ata_port_operations isapnp_port_ops = {
26100+static const struct ata_port_operations isapnp_port_ops = {
26101 .inherits = &ata_sff_port_ops,
26102 .cable_detect = ata_cable_40wire,
26103 };
26104
26105-static struct ata_port_operations isapnp_noalt_port_ops = {
26106+static const struct ata_port_operations isapnp_noalt_port_ops = {
26107 .inherits = &ata_sff_port_ops,
26108 .cable_detect = ata_cable_40wire,
26109 /* No altstatus so we don't want to use the lost interrupt poll */
26110diff -urNp linux-2.6.32.48/drivers/ata/pata_it8213.c linux-2.6.32.48/drivers/ata/pata_it8213.c
26111--- linux-2.6.32.48/drivers/ata/pata_it8213.c 2011-11-08 19:02:43.000000000 -0500
26112+++ linux-2.6.32.48/drivers/ata/pata_it8213.c 2011-11-15 19:59:43.000000000 -0500
26113@@ -234,7 +234,7 @@ static struct scsi_host_template it8213_
26114 };
26115
26116
26117-static struct ata_port_operations it8213_ops = {
26118+static const struct ata_port_operations it8213_ops = {
26119 .inherits = &ata_bmdma_port_ops,
26120 .cable_detect = it8213_cable_detect,
26121 .set_piomode = it8213_set_piomode,
26122diff -urNp linux-2.6.32.48/drivers/ata/pata_it821x.c linux-2.6.32.48/drivers/ata/pata_it821x.c
26123--- linux-2.6.32.48/drivers/ata/pata_it821x.c 2011-11-08 19:02:43.000000000 -0500
26124+++ linux-2.6.32.48/drivers/ata/pata_it821x.c 2011-11-15 19:59:43.000000000 -0500
26125@@ -800,7 +800,7 @@ static struct scsi_host_template it821x_
26126 ATA_BMDMA_SHT(DRV_NAME),
26127 };
26128
26129-static struct ata_port_operations it821x_smart_port_ops = {
26130+static const struct ata_port_operations it821x_smart_port_ops = {
26131 .inherits = &ata_bmdma_port_ops,
26132
26133 .check_atapi_dma= it821x_check_atapi_dma,
26134@@ -814,7 +814,7 @@ static struct ata_port_operations it821x
26135 .port_start = it821x_port_start,
26136 };
26137
26138-static struct ata_port_operations it821x_passthru_port_ops = {
26139+static const struct ata_port_operations it821x_passthru_port_ops = {
26140 .inherits = &ata_bmdma_port_ops,
26141
26142 .check_atapi_dma= it821x_check_atapi_dma,
26143@@ -830,7 +830,7 @@ static struct ata_port_operations it821x
26144 .port_start = it821x_port_start,
26145 };
26146
26147-static struct ata_port_operations it821x_rdc_port_ops = {
26148+static const struct ata_port_operations it821x_rdc_port_ops = {
26149 .inherits = &ata_bmdma_port_ops,
26150
26151 .check_atapi_dma= it821x_check_atapi_dma,
26152diff -urNp linux-2.6.32.48/drivers/ata/pata_ixp4xx_cf.c linux-2.6.32.48/drivers/ata/pata_ixp4xx_cf.c
26153--- linux-2.6.32.48/drivers/ata/pata_ixp4xx_cf.c 2011-11-08 19:02:43.000000000 -0500
26154+++ linux-2.6.32.48/drivers/ata/pata_ixp4xx_cf.c 2011-11-15 19:59:43.000000000 -0500
26155@@ -89,7 +89,7 @@ static struct scsi_host_template ixp4xx_
26156 ATA_PIO_SHT(DRV_NAME),
26157 };
26158
26159-static struct ata_port_operations ixp4xx_port_ops = {
26160+static const struct ata_port_operations ixp4xx_port_ops = {
26161 .inherits = &ata_sff_port_ops,
26162 .sff_data_xfer = ixp4xx_mmio_data_xfer,
26163 .cable_detect = ata_cable_40wire,
26164diff -urNp linux-2.6.32.48/drivers/ata/pata_jmicron.c linux-2.6.32.48/drivers/ata/pata_jmicron.c
26165--- linux-2.6.32.48/drivers/ata/pata_jmicron.c 2011-11-08 19:02:43.000000000 -0500
26166+++ linux-2.6.32.48/drivers/ata/pata_jmicron.c 2011-11-15 19:59:43.000000000 -0500
26167@@ -111,7 +111,7 @@ static struct scsi_host_template jmicron
26168 ATA_BMDMA_SHT(DRV_NAME),
26169 };
26170
26171-static struct ata_port_operations jmicron_ops = {
26172+static const struct ata_port_operations jmicron_ops = {
26173 .inherits = &ata_bmdma_port_ops,
26174 .prereset = jmicron_pre_reset,
26175 };
26176diff -urNp linux-2.6.32.48/drivers/ata/pata_legacy.c linux-2.6.32.48/drivers/ata/pata_legacy.c
26177--- linux-2.6.32.48/drivers/ata/pata_legacy.c 2011-11-08 19:02:43.000000000 -0500
26178+++ linux-2.6.32.48/drivers/ata/pata_legacy.c 2011-11-15 19:59:43.000000000 -0500
26179@@ -106,7 +106,7 @@ struct legacy_probe {
26180
26181 struct legacy_controller {
26182 const char *name;
26183- struct ata_port_operations *ops;
26184+ const struct ata_port_operations *ops;
26185 unsigned int pio_mask;
26186 unsigned int flags;
26187 unsigned int pflags;
26188@@ -223,12 +223,12 @@ static const struct ata_port_operations
26189 * pio_mask as well.
26190 */
26191
26192-static struct ata_port_operations simple_port_ops = {
26193+static const struct ata_port_operations simple_port_ops = {
26194 .inherits = &legacy_base_port_ops,
26195 .sff_data_xfer = ata_sff_data_xfer_noirq,
26196 };
26197
26198-static struct ata_port_operations legacy_port_ops = {
26199+static const struct ata_port_operations legacy_port_ops = {
26200 .inherits = &legacy_base_port_ops,
26201 .sff_data_xfer = ata_sff_data_xfer_noirq,
26202 .set_mode = legacy_set_mode,
26203@@ -324,7 +324,7 @@ static unsigned int pdc_data_xfer_vlb(st
26204 return buflen;
26205 }
26206
26207-static struct ata_port_operations pdc20230_port_ops = {
26208+static const struct ata_port_operations pdc20230_port_ops = {
26209 .inherits = &legacy_base_port_ops,
26210 .set_piomode = pdc20230_set_piomode,
26211 .sff_data_xfer = pdc_data_xfer_vlb,
26212@@ -357,7 +357,7 @@ static void ht6560a_set_piomode(struct a
26213 ioread8(ap->ioaddr.status_addr);
26214 }
26215
26216-static struct ata_port_operations ht6560a_port_ops = {
26217+static const struct ata_port_operations ht6560a_port_ops = {
26218 .inherits = &legacy_base_port_ops,
26219 .set_piomode = ht6560a_set_piomode,
26220 };
26221@@ -400,7 +400,7 @@ static void ht6560b_set_piomode(struct a
26222 ioread8(ap->ioaddr.status_addr);
26223 }
26224
26225-static struct ata_port_operations ht6560b_port_ops = {
26226+static const struct ata_port_operations ht6560b_port_ops = {
26227 .inherits = &legacy_base_port_ops,
26228 .set_piomode = ht6560b_set_piomode,
26229 };
26230@@ -499,7 +499,7 @@ static void opti82c611a_set_piomode(stru
26231 }
26232
26233
26234-static struct ata_port_operations opti82c611a_port_ops = {
26235+static const struct ata_port_operations opti82c611a_port_ops = {
26236 .inherits = &legacy_base_port_ops,
26237 .set_piomode = opti82c611a_set_piomode,
26238 };
26239@@ -609,7 +609,7 @@ static unsigned int opti82c46x_qc_issue(
26240 return ata_sff_qc_issue(qc);
26241 }
26242
26243-static struct ata_port_operations opti82c46x_port_ops = {
26244+static const struct ata_port_operations opti82c46x_port_ops = {
26245 .inherits = &legacy_base_port_ops,
26246 .set_piomode = opti82c46x_set_piomode,
26247 .qc_issue = opti82c46x_qc_issue,
26248@@ -771,20 +771,20 @@ static int qdi_port(struct platform_devi
26249 return 0;
26250 }
26251
26252-static struct ata_port_operations qdi6500_port_ops = {
26253+static const struct ata_port_operations qdi6500_port_ops = {
26254 .inherits = &legacy_base_port_ops,
26255 .set_piomode = qdi6500_set_piomode,
26256 .qc_issue = qdi_qc_issue,
26257 .sff_data_xfer = vlb32_data_xfer,
26258 };
26259
26260-static struct ata_port_operations qdi6580_port_ops = {
26261+static const struct ata_port_operations qdi6580_port_ops = {
26262 .inherits = &legacy_base_port_ops,
26263 .set_piomode = qdi6580_set_piomode,
26264 .sff_data_xfer = vlb32_data_xfer,
26265 };
26266
26267-static struct ata_port_operations qdi6580dp_port_ops = {
26268+static const struct ata_port_operations qdi6580dp_port_ops = {
26269 .inherits = &legacy_base_port_ops,
26270 .set_piomode = qdi6580dp_set_piomode,
26271 .sff_data_xfer = vlb32_data_xfer,
26272@@ -855,7 +855,7 @@ static int winbond_port(struct platform_
26273 return 0;
26274 }
26275
26276-static struct ata_port_operations winbond_port_ops = {
26277+static const struct ata_port_operations winbond_port_ops = {
26278 .inherits = &legacy_base_port_ops,
26279 .set_piomode = winbond_set_piomode,
26280 .sff_data_xfer = vlb32_data_xfer,
26281@@ -978,7 +978,7 @@ static __init int legacy_init_one(struct
26282 int pio_modes = controller->pio_mask;
26283 unsigned long io = probe->port;
26284 u32 mask = (1 << probe->slot);
26285- struct ata_port_operations *ops = controller->ops;
26286+ const struct ata_port_operations *ops = controller->ops;
26287 struct legacy_data *ld = &legacy_data[probe->slot];
26288 struct ata_host *host = NULL;
26289 struct ata_port *ap;
26290diff -urNp linux-2.6.32.48/drivers/ata/pata_marvell.c linux-2.6.32.48/drivers/ata/pata_marvell.c
26291--- linux-2.6.32.48/drivers/ata/pata_marvell.c 2011-11-08 19:02:43.000000000 -0500
26292+++ linux-2.6.32.48/drivers/ata/pata_marvell.c 2011-11-15 19:59:43.000000000 -0500
26293@@ -100,7 +100,7 @@ static struct scsi_host_template marvell
26294 ATA_BMDMA_SHT(DRV_NAME),
26295 };
26296
26297-static struct ata_port_operations marvell_ops = {
26298+static const struct ata_port_operations marvell_ops = {
26299 .inherits = &ata_bmdma_port_ops,
26300 .cable_detect = marvell_cable_detect,
26301 .prereset = marvell_pre_reset,
26302diff -urNp linux-2.6.32.48/drivers/ata/pata_mpc52xx.c linux-2.6.32.48/drivers/ata/pata_mpc52xx.c
26303--- linux-2.6.32.48/drivers/ata/pata_mpc52xx.c 2011-11-08 19:02:43.000000000 -0500
26304+++ linux-2.6.32.48/drivers/ata/pata_mpc52xx.c 2011-11-15 19:59:43.000000000 -0500
26305@@ -609,7 +609,7 @@ static struct scsi_host_template mpc52xx
26306 ATA_PIO_SHT(DRV_NAME),
26307 };
26308
26309-static struct ata_port_operations mpc52xx_ata_port_ops = {
26310+static const struct ata_port_operations mpc52xx_ata_port_ops = {
26311 .inherits = &ata_bmdma_port_ops,
26312 .sff_dev_select = mpc52xx_ata_dev_select,
26313 .set_piomode = mpc52xx_ata_set_piomode,
26314diff -urNp linux-2.6.32.48/drivers/ata/pata_mpiix.c linux-2.6.32.48/drivers/ata/pata_mpiix.c
26315--- linux-2.6.32.48/drivers/ata/pata_mpiix.c 2011-11-08 19:02:43.000000000 -0500
26316+++ linux-2.6.32.48/drivers/ata/pata_mpiix.c 2011-11-15 19:59:43.000000000 -0500
26317@@ -140,7 +140,7 @@ static struct scsi_host_template mpiix_s
26318 ATA_PIO_SHT(DRV_NAME),
26319 };
26320
26321-static struct ata_port_operations mpiix_port_ops = {
26322+static const struct ata_port_operations mpiix_port_ops = {
26323 .inherits = &ata_sff_port_ops,
26324 .qc_issue = mpiix_qc_issue,
26325 .cable_detect = ata_cable_40wire,
26326diff -urNp linux-2.6.32.48/drivers/ata/pata_netcell.c linux-2.6.32.48/drivers/ata/pata_netcell.c
26327--- linux-2.6.32.48/drivers/ata/pata_netcell.c 2011-11-08 19:02:43.000000000 -0500
26328+++ linux-2.6.32.48/drivers/ata/pata_netcell.c 2011-11-15 19:59:43.000000000 -0500
26329@@ -34,7 +34,7 @@ static struct scsi_host_template netcell
26330 ATA_BMDMA_SHT(DRV_NAME),
26331 };
26332
26333-static struct ata_port_operations netcell_ops = {
26334+static const struct ata_port_operations netcell_ops = {
26335 .inherits = &ata_bmdma_port_ops,
26336 .cable_detect = ata_cable_80wire,
26337 .read_id = netcell_read_id,
26338diff -urNp linux-2.6.32.48/drivers/ata/pata_ninja32.c linux-2.6.32.48/drivers/ata/pata_ninja32.c
26339--- linux-2.6.32.48/drivers/ata/pata_ninja32.c 2011-11-08 19:02:43.000000000 -0500
26340+++ linux-2.6.32.48/drivers/ata/pata_ninja32.c 2011-11-15 19:59:43.000000000 -0500
26341@@ -81,7 +81,7 @@ static struct scsi_host_template ninja32
26342 ATA_BMDMA_SHT(DRV_NAME),
26343 };
26344
26345-static struct ata_port_operations ninja32_port_ops = {
26346+static const struct ata_port_operations ninja32_port_ops = {
26347 .inherits = &ata_bmdma_port_ops,
26348 .sff_dev_select = ninja32_dev_select,
26349 .cable_detect = ata_cable_40wire,
26350diff -urNp linux-2.6.32.48/drivers/ata/pata_ns87410.c linux-2.6.32.48/drivers/ata/pata_ns87410.c
26351--- linux-2.6.32.48/drivers/ata/pata_ns87410.c 2011-11-08 19:02:43.000000000 -0500
26352+++ linux-2.6.32.48/drivers/ata/pata_ns87410.c 2011-11-15 19:59:43.000000000 -0500
26353@@ -132,7 +132,7 @@ static struct scsi_host_template ns87410
26354 ATA_PIO_SHT(DRV_NAME),
26355 };
26356
26357-static struct ata_port_operations ns87410_port_ops = {
26358+static const struct ata_port_operations ns87410_port_ops = {
26359 .inherits = &ata_sff_port_ops,
26360 .qc_issue = ns87410_qc_issue,
26361 .cable_detect = ata_cable_40wire,
26362diff -urNp linux-2.6.32.48/drivers/ata/pata_ns87415.c linux-2.6.32.48/drivers/ata/pata_ns87415.c
26363--- linux-2.6.32.48/drivers/ata/pata_ns87415.c 2011-11-08 19:02:43.000000000 -0500
26364+++ linux-2.6.32.48/drivers/ata/pata_ns87415.c 2011-11-15 19:59:43.000000000 -0500
26365@@ -299,7 +299,7 @@ static u8 ns87560_bmdma_status(struct at
26366 }
26367 #endif /* 87560 SuperIO Support */
26368
26369-static struct ata_port_operations ns87415_pata_ops = {
26370+static const struct ata_port_operations ns87415_pata_ops = {
26371 .inherits = &ata_bmdma_port_ops,
26372
26373 .check_atapi_dma = ns87415_check_atapi_dma,
26374@@ -313,7 +313,7 @@ static struct ata_port_operations ns8741
26375 };
26376
26377 #if defined(CONFIG_SUPERIO)
26378-static struct ata_port_operations ns87560_pata_ops = {
26379+static const struct ata_port_operations ns87560_pata_ops = {
26380 .inherits = &ns87415_pata_ops,
26381 .sff_tf_read = ns87560_tf_read,
26382 .sff_check_status = ns87560_check_status,
26383diff -urNp linux-2.6.32.48/drivers/ata/pata_octeon_cf.c linux-2.6.32.48/drivers/ata/pata_octeon_cf.c
26384--- linux-2.6.32.48/drivers/ata/pata_octeon_cf.c 2011-11-08 19:02:43.000000000 -0500
26385+++ linux-2.6.32.48/drivers/ata/pata_octeon_cf.c 2011-11-15 19:59:43.000000000 -0500
26386@@ -801,6 +801,7 @@ static unsigned int octeon_cf_qc_issue(s
26387 return 0;
26388 }
26389
26390+/* cannot be const */
26391 static struct ata_port_operations octeon_cf_ops = {
26392 .inherits = &ata_sff_port_ops,
26393 .check_atapi_dma = octeon_cf_check_atapi_dma,
26394diff -urNp linux-2.6.32.48/drivers/ata/pata_oldpiix.c linux-2.6.32.48/drivers/ata/pata_oldpiix.c
26395--- linux-2.6.32.48/drivers/ata/pata_oldpiix.c 2011-11-08 19:02:43.000000000 -0500
26396+++ linux-2.6.32.48/drivers/ata/pata_oldpiix.c 2011-11-15 19:59:43.000000000 -0500
26397@@ -208,7 +208,7 @@ static struct scsi_host_template oldpiix
26398 ATA_BMDMA_SHT(DRV_NAME),
26399 };
26400
26401-static struct ata_port_operations oldpiix_pata_ops = {
26402+static const struct ata_port_operations oldpiix_pata_ops = {
26403 .inherits = &ata_bmdma_port_ops,
26404 .qc_issue = oldpiix_qc_issue,
26405 .cable_detect = ata_cable_40wire,
26406diff -urNp linux-2.6.32.48/drivers/ata/pata_opti.c linux-2.6.32.48/drivers/ata/pata_opti.c
26407--- linux-2.6.32.48/drivers/ata/pata_opti.c 2011-11-08 19:02:43.000000000 -0500
26408+++ linux-2.6.32.48/drivers/ata/pata_opti.c 2011-11-15 19:59:43.000000000 -0500
26409@@ -152,7 +152,7 @@ static struct scsi_host_template opti_sh
26410 ATA_PIO_SHT(DRV_NAME),
26411 };
26412
26413-static struct ata_port_operations opti_port_ops = {
26414+static const struct ata_port_operations opti_port_ops = {
26415 .inherits = &ata_sff_port_ops,
26416 .cable_detect = ata_cable_40wire,
26417 .set_piomode = opti_set_piomode,
26418diff -urNp linux-2.6.32.48/drivers/ata/pata_optidma.c linux-2.6.32.48/drivers/ata/pata_optidma.c
26419--- linux-2.6.32.48/drivers/ata/pata_optidma.c 2011-11-08 19:02:43.000000000 -0500
26420+++ linux-2.6.32.48/drivers/ata/pata_optidma.c 2011-11-15 19:59:43.000000000 -0500
26421@@ -337,7 +337,7 @@ static struct scsi_host_template optidma
26422 ATA_BMDMA_SHT(DRV_NAME),
26423 };
26424
26425-static struct ata_port_operations optidma_port_ops = {
26426+static const struct ata_port_operations optidma_port_ops = {
26427 .inherits = &ata_bmdma_port_ops,
26428 .cable_detect = ata_cable_40wire,
26429 .set_piomode = optidma_set_pio_mode,
26430@@ -346,7 +346,7 @@ static struct ata_port_operations optidm
26431 .prereset = optidma_pre_reset,
26432 };
26433
26434-static struct ata_port_operations optiplus_port_ops = {
26435+static const struct ata_port_operations optiplus_port_ops = {
26436 .inherits = &optidma_port_ops,
26437 .set_piomode = optiplus_set_pio_mode,
26438 .set_dmamode = optiplus_set_dma_mode,
26439diff -urNp linux-2.6.32.48/drivers/ata/pata_palmld.c linux-2.6.32.48/drivers/ata/pata_palmld.c
26440--- linux-2.6.32.48/drivers/ata/pata_palmld.c 2011-11-08 19:02:43.000000000 -0500
26441+++ linux-2.6.32.48/drivers/ata/pata_palmld.c 2011-11-15 19:59:43.000000000 -0500
26442@@ -37,7 +37,7 @@ static struct scsi_host_template palmld_
26443 ATA_PIO_SHT(DRV_NAME),
26444 };
26445
26446-static struct ata_port_operations palmld_port_ops = {
26447+static const struct ata_port_operations palmld_port_ops = {
26448 .inherits = &ata_sff_port_ops,
26449 .sff_data_xfer = ata_sff_data_xfer_noirq,
26450 .cable_detect = ata_cable_40wire,
26451diff -urNp linux-2.6.32.48/drivers/ata/pata_pcmcia.c linux-2.6.32.48/drivers/ata/pata_pcmcia.c
26452--- linux-2.6.32.48/drivers/ata/pata_pcmcia.c 2011-11-08 19:02:43.000000000 -0500
26453+++ linux-2.6.32.48/drivers/ata/pata_pcmcia.c 2011-11-15 19:59:43.000000000 -0500
26454@@ -162,14 +162,14 @@ static struct scsi_host_template pcmcia_
26455 ATA_PIO_SHT(DRV_NAME),
26456 };
26457
26458-static struct ata_port_operations pcmcia_port_ops = {
26459+static const struct ata_port_operations pcmcia_port_ops = {
26460 .inherits = &ata_sff_port_ops,
26461 .sff_data_xfer = ata_sff_data_xfer_noirq,
26462 .cable_detect = ata_cable_40wire,
26463 .set_mode = pcmcia_set_mode,
26464 };
26465
26466-static struct ata_port_operations pcmcia_8bit_port_ops = {
26467+static const struct ata_port_operations pcmcia_8bit_port_ops = {
26468 .inherits = &ata_sff_port_ops,
26469 .sff_data_xfer = ata_data_xfer_8bit,
26470 .cable_detect = ata_cable_40wire,
26471@@ -256,7 +256,7 @@ static int pcmcia_init_one(struct pcmcia
26472 unsigned long io_base, ctl_base;
26473 void __iomem *io_addr, *ctl_addr;
26474 int n_ports = 1;
26475- struct ata_port_operations *ops = &pcmcia_port_ops;
26476+ const struct ata_port_operations *ops = &pcmcia_port_ops;
26477
26478 info = kzalloc(sizeof(*info), GFP_KERNEL);
26479 if (info == NULL)
26480diff -urNp linux-2.6.32.48/drivers/ata/pata_pdc2027x.c linux-2.6.32.48/drivers/ata/pata_pdc2027x.c
26481--- linux-2.6.32.48/drivers/ata/pata_pdc2027x.c 2011-11-08 19:02:43.000000000 -0500
26482+++ linux-2.6.32.48/drivers/ata/pata_pdc2027x.c 2011-11-15 19:59:43.000000000 -0500
26483@@ -132,14 +132,14 @@ static struct scsi_host_template pdc2027
26484 ATA_BMDMA_SHT(DRV_NAME),
26485 };
26486
26487-static struct ata_port_operations pdc2027x_pata100_ops = {
26488+static const struct ata_port_operations pdc2027x_pata100_ops = {
26489 .inherits = &ata_bmdma_port_ops,
26490 .check_atapi_dma = pdc2027x_check_atapi_dma,
26491 .cable_detect = pdc2027x_cable_detect,
26492 .prereset = pdc2027x_prereset,
26493 };
26494
26495-static struct ata_port_operations pdc2027x_pata133_ops = {
26496+static const struct ata_port_operations pdc2027x_pata133_ops = {
26497 .inherits = &pdc2027x_pata100_ops,
26498 .mode_filter = pdc2027x_mode_filter,
26499 .set_piomode = pdc2027x_set_piomode,
26500diff -urNp linux-2.6.32.48/drivers/ata/pata_pdc202xx_old.c linux-2.6.32.48/drivers/ata/pata_pdc202xx_old.c
26501--- linux-2.6.32.48/drivers/ata/pata_pdc202xx_old.c 2011-11-08 19:02:43.000000000 -0500
26502+++ linux-2.6.32.48/drivers/ata/pata_pdc202xx_old.c 2011-11-15 19:59:43.000000000 -0500
26503@@ -274,7 +274,7 @@ static struct scsi_host_template pdc202x
26504 ATA_BMDMA_SHT(DRV_NAME),
26505 };
26506
26507-static struct ata_port_operations pdc2024x_port_ops = {
26508+static const struct ata_port_operations pdc2024x_port_ops = {
26509 .inherits = &ata_bmdma_port_ops,
26510
26511 .cable_detect = ata_cable_40wire,
26512@@ -284,7 +284,7 @@ static struct ata_port_operations pdc202
26513 .sff_exec_command = pdc202xx_exec_command,
26514 };
26515
26516-static struct ata_port_operations pdc2026x_port_ops = {
26517+static const struct ata_port_operations pdc2026x_port_ops = {
26518 .inherits = &pdc2024x_port_ops,
26519
26520 .check_atapi_dma = pdc2026x_check_atapi_dma,
26521diff -urNp linux-2.6.32.48/drivers/ata/pata_platform.c linux-2.6.32.48/drivers/ata/pata_platform.c
26522--- linux-2.6.32.48/drivers/ata/pata_platform.c 2011-11-08 19:02:43.000000000 -0500
26523+++ linux-2.6.32.48/drivers/ata/pata_platform.c 2011-11-15 19:59:43.000000000 -0500
26524@@ -48,7 +48,7 @@ static struct scsi_host_template pata_pl
26525 ATA_PIO_SHT(DRV_NAME),
26526 };
26527
26528-static struct ata_port_operations pata_platform_port_ops = {
26529+static const struct ata_port_operations pata_platform_port_ops = {
26530 .inherits = &ata_sff_port_ops,
26531 .sff_data_xfer = ata_sff_data_xfer_noirq,
26532 .cable_detect = ata_cable_unknown,
26533diff -urNp linux-2.6.32.48/drivers/ata/pata_qdi.c linux-2.6.32.48/drivers/ata/pata_qdi.c
26534--- linux-2.6.32.48/drivers/ata/pata_qdi.c 2011-11-08 19:02:43.000000000 -0500
26535+++ linux-2.6.32.48/drivers/ata/pata_qdi.c 2011-11-15 19:59:43.000000000 -0500
26536@@ -157,7 +157,7 @@ static struct scsi_host_template qdi_sht
26537 ATA_PIO_SHT(DRV_NAME),
26538 };
26539
26540-static struct ata_port_operations qdi6500_port_ops = {
26541+static const struct ata_port_operations qdi6500_port_ops = {
26542 .inherits = &ata_sff_port_ops,
26543 .qc_issue = qdi_qc_issue,
26544 .sff_data_xfer = qdi_data_xfer,
26545@@ -165,7 +165,7 @@ static struct ata_port_operations qdi650
26546 .set_piomode = qdi6500_set_piomode,
26547 };
26548
26549-static struct ata_port_operations qdi6580_port_ops = {
26550+static const struct ata_port_operations qdi6580_port_ops = {
26551 .inherits = &qdi6500_port_ops,
26552 .set_piomode = qdi6580_set_piomode,
26553 };
26554diff -urNp linux-2.6.32.48/drivers/ata/pata_radisys.c linux-2.6.32.48/drivers/ata/pata_radisys.c
26555--- linux-2.6.32.48/drivers/ata/pata_radisys.c 2011-11-08 19:02:43.000000000 -0500
26556+++ linux-2.6.32.48/drivers/ata/pata_radisys.c 2011-11-15 19:59:43.000000000 -0500
26557@@ -187,7 +187,7 @@ static struct scsi_host_template radisys
26558 ATA_BMDMA_SHT(DRV_NAME),
26559 };
26560
26561-static struct ata_port_operations radisys_pata_ops = {
26562+static const struct ata_port_operations radisys_pata_ops = {
26563 .inherits = &ata_bmdma_port_ops,
26564 .qc_issue = radisys_qc_issue,
26565 .cable_detect = ata_cable_unknown,
26566diff -urNp linux-2.6.32.48/drivers/ata/pata_rb532_cf.c linux-2.6.32.48/drivers/ata/pata_rb532_cf.c
26567--- linux-2.6.32.48/drivers/ata/pata_rb532_cf.c 2011-11-08 19:02:43.000000000 -0500
26568+++ linux-2.6.32.48/drivers/ata/pata_rb532_cf.c 2011-11-15 19:59:43.000000000 -0500
26569@@ -68,7 +68,7 @@ static irqreturn_t rb532_pata_irq_handle
26570 return IRQ_HANDLED;
26571 }
26572
26573-static struct ata_port_operations rb532_pata_port_ops = {
26574+static const struct ata_port_operations rb532_pata_port_ops = {
26575 .inherits = &ata_sff_port_ops,
26576 .sff_data_xfer = ata_sff_data_xfer32,
26577 };
26578diff -urNp linux-2.6.32.48/drivers/ata/pata_rdc.c linux-2.6.32.48/drivers/ata/pata_rdc.c
26579--- linux-2.6.32.48/drivers/ata/pata_rdc.c 2011-11-08 19:02:43.000000000 -0500
26580+++ linux-2.6.32.48/drivers/ata/pata_rdc.c 2011-11-15 19:59:43.000000000 -0500
26581@@ -272,7 +272,7 @@ static void rdc_set_dmamode(struct ata_p
26582 pci_write_config_byte(dev, 0x48, udma_enable);
26583 }
26584
26585-static struct ata_port_operations rdc_pata_ops = {
26586+static const struct ata_port_operations rdc_pata_ops = {
26587 .inherits = &ata_bmdma32_port_ops,
26588 .cable_detect = rdc_pata_cable_detect,
26589 .set_piomode = rdc_set_piomode,
26590diff -urNp linux-2.6.32.48/drivers/ata/pata_rz1000.c linux-2.6.32.48/drivers/ata/pata_rz1000.c
26591--- linux-2.6.32.48/drivers/ata/pata_rz1000.c 2011-11-08 19:02:43.000000000 -0500
26592+++ linux-2.6.32.48/drivers/ata/pata_rz1000.c 2011-11-15 19:59:43.000000000 -0500
26593@@ -54,7 +54,7 @@ static struct scsi_host_template rz1000_
26594 ATA_PIO_SHT(DRV_NAME),
26595 };
26596
26597-static struct ata_port_operations rz1000_port_ops = {
26598+static const struct ata_port_operations rz1000_port_ops = {
26599 .inherits = &ata_sff_port_ops,
26600 .cable_detect = ata_cable_40wire,
26601 .set_mode = rz1000_set_mode,
26602diff -urNp linux-2.6.32.48/drivers/ata/pata_sc1200.c linux-2.6.32.48/drivers/ata/pata_sc1200.c
26603--- linux-2.6.32.48/drivers/ata/pata_sc1200.c 2011-11-08 19:02:43.000000000 -0500
26604+++ linux-2.6.32.48/drivers/ata/pata_sc1200.c 2011-11-15 19:59:43.000000000 -0500
26605@@ -207,7 +207,7 @@ static struct scsi_host_template sc1200_
26606 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
26607 };
26608
26609-static struct ata_port_operations sc1200_port_ops = {
26610+static const struct ata_port_operations sc1200_port_ops = {
26611 .inherits = &ata_bmdma_port_ops,
26612 .qc_prep = ata_sff_dumb_qc_prep,
26613 .qc_issue = sc1200_qc_issue,
26614diff -urNp linux-2.6.32.48/drivers/ata/pata_scc.c linux-2.6.32.48/drivers/ata/pata_scc.c
26615--- linux-2.6.32.48/drivers/ata/pata_scc.c 2011-11-08 19:02:43.000000000 -0500
26616+++ linux-2.6.32.48/drivers/ata/pata_scc.c 2011-11-15 19:59:43.000000000 -0500
26617@@ -965,7 +965,7 @@ static struct scsi_host_template scc_sht
26618 ATA_BMDMA_SHT(DRV_NAME),
26619 };
26620
26621-static struct ata_port_operations scc_pata_ops = {
26622+static const struct ata_port_operations scc_pata_ops = {
26623 .inherits = &ata_bmdma_port_ops,
26624
26625 .set_piomode = scc_set_piomode,
26626diff -urNp linux-2.6.32.48/drivers/ata/pata_sch.c linux-2.6.32.48/drivers/ata/pata_sch.c
26627--- linux-2.6.32.48/drivers/ata/pata_sch.c 2011-11-08 19:02:43.000000000 -0500
26628+++ linux-2.6.32.48/drivers/ata/pata_sch.c 2011-11-15 19:59:43.000000000 -0500
26629@@ -75,7 +75,7 @@ static struct scsi_host_template sch_sht
26630 ATA_BMDMA_SHT(DRV_NAME),
26631 };
26632
26633-static struct ata_port_operations sch_pata_ops = {
26634+static const struct ata_port_operations sch_pata_ops = {
26635 .inherits = &ata_bmdma_port_ops,
26636 .cable_detect = ata_cable_unknown,
26637 .set_piomode = sch_set_piomode,
26638diff -urNp linux-2.6.32.48/drivers/ata/pata_serverworks.c linux-2.6.32.48/drivers/ata/pata_serverworks.c
26639--- linux-2.6.32.48/drivers/ata/pata_serverworks.c 2011-11-08 19:02:43.000000000 -0500
26640+++ linux-2.6.32.48/drivers/ata/pata_serverworks.c 2011-11-15 19:59:43.000000000 -0500
26641@@ -299,7 +299,7 @@ static struct scsi_host_template serverw
26642 ATA_BMDMA_SHT(DRV_NAME),
26643 };
26644
26645-static struct ata_port_operations serverworks_osb4_port_ops = {
26646+static const struct ata_port_operations serverworks_osb4_port_ops = {
26647 .inherits = &ata_bmdma_port_ops,
26648 .cable_detect = serverworks_cable_detect,
26649 .mode_filter = serverworks_osb4_filter,
26650@@ -307,7 +307,7 @@ static struct ata_port_operations server
26651 .set_dmamode = serverworks_set_dmamode,
26652 };
26653
26654-static struct ata_port_operations serverworks_csb_port_ops = {
26655+static const struct ata_port_operations serverworks_csb_port_ops = {
26656 .inherits = &serverworks_osb4_port_ops,
26657 .mode_filter = serverworks_csb_filter,
26658 };
26659diff -urNp linux-2.6.32.48/drivers/ata/pata_sil680.c linux-2.6.32.48/drivers/ata/pata_sil680.c
26660--- linux-2.6.32.48/drivers/ata/pata_sil680.c 2011-11-08 19:02:43.000000000 -0500
26661+++ linux-2.6.32.48/drivers/ata/pata_sil680.c 2011-11-15 19:59:43.000000000 -0500
26662@@ -194,7 +194,7 @@ static struct scsi_host_template sil680_
26663 ATA_BMDMA_SHT(DRV_NAME),
26664 };
26665
26666-static struct ata_port_operations sil680_port_ops = {
26667+static const struct ata_port_operations sil680_port_ops = {
26668 .inherits = &ata_bmdma32_port_ops,
26669 .cable_detect = sil680_cable_detect,
26670 .set_piomode = sil680_set_piomode,
26671diff -urNp linux-2.6.32.48/drivers/ata/pata_sis.c linux-2.6.32.48/drivers/ata/pata_sis.c
26672--- linux-2.6.32.48/drivers/ata/pata_sis.c 2011-11-08 19:02:43.000000000 -0500
26673+++ linux-2.6.32.48/drivers/ata/pata_sis.c 2011-11-15 19:59:43.000000000 -0500
26674@@ -503,47 +503,47 @@ static struct scsi_host_template sis_sht
26675 ATA_BMDMA_SHT(DRV_NAME),
26676 };
26677
26678-static struct ata_port_operations sis_133_for_sata_ops = {
26679+static const struct ata_port_operations sis_133_for_sata_ops = {
26680 .inherits = &ata_bmdma_port_ops,
26681 .set_piomode = sis_133_set_piomode,
26682 .set_dmamode = sis_133_set_dmamode,
26683 .cable_detect = sis_133_cable_detect,
26684 };
26685
26686-static struct ata_port_operations sis_base_ops = {
26687+static const struct ata_port_operations sis_base_ops = {
26688 .inherits = &ata_bmdma_port_ops,
26689 .prereset = sis_pre_reset,
26690 };
26691
26692-static struct ata_port_operations sis_133_ops = {
26693+static const struct ata_port_operations sis_133_ops = {
26694 .inherits = &sis_base_ops,
26695 .set_piomode = sis_133_set_piomode,
26696 .set_dmamode = sis_133_set_dmamode,
26697 .cable_detect = sis_133_cable_detect,
26698 };
26699
26700-static struct ata_port_operations sis_133_early_ops = {
26701+static const struct ata_port_operations sis_133_early_ops = {
26702 .inherits = &sis_base_ops,
26703 .set_piomode = sis_100_set_piomode,
26704 .set_dmamode = sis_133_early_set_dmamode,
26705 .cable_detect = sis_66_cable_detect,
26706 };
26707
26708-static struct ata_port_operations sis_100_ops = {
26709+static const struct ata_port_operations sis_100_ops = {
26710 .inherits = &sis_base_ops,
26711 .set_piomode = sis_100_set_piomode,
26712 .set_dmamode = sis_100_set_dmamode,
26713 .cable_detect = sis_66_cable_detect,
26714 };
26715
26716-static struct ata_port_operations sis_66_ops = {
26717+static const struct ata_port_operations sis_66_ops = {
26718 .inherits = &sis_base_ops,
26719 .set_piomode = sis_old_set_piomode,
26720 .set_dmamode = sis_66_set_dmamode,
26721 .cable_detect = sis_66_cable_detect,
26722 };
26723
26724-static struct ata_port_operations sis_old_ops = {
26725+static const struct ata_port_operations sis_old_ops = {
26726 .inherits = &sis_base_ops,
26727 .set_piomode = sis_old_set_piomode,
26728 .set_dmamode = sis_old_set_dmamode,
26729diff -urNp linux-2.6.32.48/drivers/ata/pata_sl82c105.c linux-2.6.32.48/drivers/ata/pata_sl82c105.c
26730--- linux-2.6.32.48/drivers/ata/pata_sl82c105.c 2011-11-08 19:02:43.000000000 -0500
26731+++ linux-2.6.32.48/drivers/ata/pata_sl82c105.c 2011-11-15 19:59:43.000000000 -0500
26732@@ -231,7 +231,7 @@ static struct scsi_host_template sl82c10
26733 ATA_BMDMA_SHT(DRV_NAME),
26734 };
26735
26736-static struct ata_port_operations sl82c105_port_ops = {
26737+static const struct ata_port_operations sl82c105_port_ops = {
26738 .inherits = &ata_bmdma_port_ops,
26739 .qc_defer = sl82c105_qc_defer,
26740 .bmdma_start = sl82c105_bmdma_start,
26741diff -urNp linux-2.6.32.48/drivers/ata/pata_triflex.c linux-2.6.32.48/drivers/ata/pata_triflex.c
26742--- linux-2.6.32.48/drivers/ata/pata_triflex.c 2011-11-08 19:02:43.000000000 -0500
26743+++ linux-2.6.32.48/drivers/ata/pata_triflex.c 2011-11-15 19:59:43.000000000 -0500
26744@@ -178,7 +178,7 @@ static struct scsi_host_template triflex
26745 ATA_BMDMA_SHT(DRV_NAME),
26746 };
26747
26748-static struct ata_port_operations triflex_port_ops = {
26749+static const struct ata_port_operations triflex_port_ops = {
26750 .inherits = &ata_bmdma_port_ops,
26751 .bmdma_start = triflex_bmdma_start,
26752 .bmdma_stop = triflex_bmdma_stop,
26753diff -urNp linux-2.6.32.48/drivers/ata/pata_via.c linux-2.6.32.48/drivers/ata/pata_via.c
26754--- linux-2.6.32.48/drivers/ata/pata_via.c 2011-11-08 19:02:43.000000000 -0500
26755+++ linux-2.6.32.48/drivers/ata/pata_via.c 2011-11-15 19:59:43.000000000 -0500
26756@@ -419,7 +419,7 @@ static struct scsi_host_template via_sht
26757 ATA_BMDMA_SHT(DRV_NAME),
26758 };
26759
26760-static struct ata_port_operations via_port_ops = {
26761+static const struct ata_port_operations via_port_ops = {
26762 .inherits = &ata_bmdma_port_ops,
26763 .cable_detect = via_cable_detect,
26764 .set_piomode = via_set_piomode,
26765@@ -429,7 +429,7 @@ static struct ata_port_operations via_po
26766 .port_start = via_port_start,
26767 };
26768
26769-static struct ata_port_operations via_port_ops_noirq = {
26770+static const struct ata_port_operations via_port_ops_noirq = {
26771 .inherits = &via_port_ops,
26772 .sff_data_xfer = ata_sff_data_xfer_noirq,
26773 };
26774diff -urNp linux-2.6.32.48/drivers/ata/pata_winbond.c linux-2.6.32.48/drivers/ata/pata_winbond.c
26775--- linux-2.6.32.48/drivers/ata/pata_winbond.c 2011-11-08 19:02:43.000000000 -0500
26776+++ linux-2.6.32.48/drivers/ata/pata_winbond.c 2011-11-15 19:59:43.000000000 -0500
26777@@ -125,7 +125,7 @@ static struct scsi_host_template winbond
26778 ATA_PIO_SHT(DRV_NAME),
26779 };
26780
26781-static struct ata_port_operations winbond_port_ops = {
26782+static const struct ata_port_operations winbond_port_ops = {
26783 .inherits = &ata_sff_port_ops,
26784 .sff_data_xfer = winbond_data_xfer,
26785 .cable_detect = ata_cable_40wire,
26786diff -urNp linux-2.6.32.48/drivers/ata/pdc_adma.c linux-2.6.32.48/drivers/ata/pdc_adma.c
26787--- linux-2.6.32.48/drivers/ata/pdc_adma.c 2011-11-08 19:02:43.000000000 -0500
26788+++ linux-2.6.32.48/drivers/ata/pdc_adma.c 2011-11-15 19:59:43.000000000 -0500
26789@@ -145,7 +145,7 @@ static struct scsi_host_template adma_at
26790 .dma_boundary = ADMA_DMA_BOUNDARY,
26791 };
26792
26793-static struct ata_port_operations adma_ata_ops = {
26794+static const struct ata_port_operations adma_ata_ops = {
26795 .inherits = &ata_sff_port_ops,
26796
26797 .lost_interrupt = ATA_OP_NULL,
26798diff -urNp linux-2.6.32.48/drivers/ata/sata_fsl.c linux-2.6.32.48/drivers/ata/sata_fsl.c
26799--- linux-2.6.32.48/drivers/ata/sata_fsl.c 2011-11-08 19:02:43.000000000 -0500
26800+++ linux-2.6.32.48/drivers/ata/sata_fsl.c 2011-11-15 19:59:43.000000000 -0500
26801@@ -1258,7 +1258,7 @@ static struct scsi_host_template sata_fs
26802 .dma_boundary = ATA_DMA_BOUNDARY,
26803 };
26804
26805-static struct ata_port_operations sata_fsl_ops = {
26806+static const struct ata_port_operations sata_fsl_ops = {
26807 .inherits = &sata_pmp_port_ops,
26808
26809 .qc_defer = ata_std_qc_defer,
26810diff -urNp linux-2.6.32.48/drivers/ata/sata_inic162x.c linux-2.6.32.48/drivers/ata/sata_inic162x.c
26811--- linux-2.6.32.48/drivers/ata/sata_inic162x.c 2011-11-08 19:02:43.000000000 -0500
26812+++ linux-2.6.32.48/drivers/ata/sata_inic162x.c 2011-11-15 19:59:43.000000000 -0500
26813@@ -721,7 +721,7 @@ static int inic_port_start(struct ata_po
26814 return 0;
26815 }
26816
26817-static struct ata_port_operations inic_port_ops = {
26818+static const struct ata_port_operations inic_port_ops = {
26819 .inherits = &sata_port_ops,
26820
26821 .check_atapi_dma = inic_check_atapi_dma,
26822diff -urNp linux-2.6.32.48/drivers/ata/sata_mv.c linux-2.6.32.48/drivers/ata/sata_mv.c
26823--- linux-2.6.32.48/drivers/ata/sata_mv.c 2011-11-08 19:02:43.000000000 -0500
26824+++ linux-2.6.32.48/drivers/ata/sata_mv.c 2011-11-15 19:59:43.000000000 -0500
26825@@ -656,7 +656,7 @@ static struct scsi_host_template mv6_sht
26826 .dma_boundary = MV_DMA_BOUNDARY,
26827 };
26828
26829-static struct ata_port_operations mv5_ops = {
26830+static const struct ata_port_operations mv5_ops = {
26831 .inherits = &ata_sff_port_ops,
26832
26833 .lost_interrupt = ATA_OP_NULL,
26834@@ -678,7 +678,7 @@ static struct ata_port_operations mv5_op
26835 .port_stop = mv_port_stop,
26836 };
26837
26838-static struct ata_port_operations mv6_ops = {
26839+static const struct ata_port_operations mv6_ops = {
26840 .inherits = &mv5_ops,
26841 .dev_config = mv6_dev_config,
26842 .scr_read = mv_scr_read,
26843@@ -698,7 +698,7 @@ static struct ata_port_operations mv6_op
26844 .bmdma_status = mv_bmdma_status,
26845 };
26846
26847-static struct ata_port_operations mv_iie_ops = {
26848+static const struct ata_port_operations mv_iie_ops = {
26849 .inherits = &mv6_ops,
26850 .dev_config = ATA_OP_NULL,
26851 .qc_prep = mv_qc_prep_iie,
26852diff -urNp linux-2.6.32.48/drivers/ata/sata_nv.c linux-2.6.32.48/drivers/ata/sata_nv.c
26853--- linux-2.6.32.48/drivers/ata/sata_nv.c 2011-11-08 19:02:43.000000000 -0500
26854+++ linux-2.6.32.48/drivers/ata/sata_nv.c 2011-11-15 19:59:43.000000000 -0500
26855@@ -464,7 +464,7 @@ static struct scsi_host_template nv_swnc
26856 * cases. Define nv_hardreset() which only kicks in for post-boot
26857 * probing and use it for all variants.
26858 */
26859-static struct ata_port_operations nv_generic_ops = {
26860+static const struct ata_port_operations nv_generic_ops = {
26861 .inherits = &ata_bmdma_port_ops,
26862 .lost_interrupt = ATA_OP_NULL,
26863 .scr_read = nv_scr_read,
26864@@ -472,20 +472,20 @@ static struct ata_port_operations nv_gen
26865 .hardreset = nv_hardreset,
26866 };
26867
26868-static struct ata_port_operations nv_nf2_ops = {
26869+static const struct ata_port_operations nv_nf2_ops = {
26870 .inherits = &nv_generic_ops,
26871 .freeze = nv_nf2_freeze,
26872 .thaw = nv_nf2_thaw,
26873 };
26874
26875-static struct ata_port_operations nv_ck804_ops = {
26876+static const struct ata_port_operations nv_ck804_ops = {
26877 .inherits = &nv_generic_ops,
26878 .freeze = nv_ck804_freeze,
26879 .thaw = nv_ck804_thaw,
26880 .host_stop = nv_ck804_host_stop,
26881 };
26882
26883-static struct ata_port_operations nv_adma_ops = {
26884+static const struct ata_port_operations nv_adma_ops = {
26885 .inherits = &nv_ck804_ops,
26886
26887 .check_atapi_dma = nv_adma_check_atapi_dma,
26888@@ -509,7 +509,7 @@ static struct ata_port_operations nv_adm
26889 .host_stop = nv_adma_host_stop,
26890 };
26891
26892-static struct ata_port_operations nv_swncq_ops = {
26893+static const struct ata_port_operations nv_swncq_ops = {
26894 .inherits = &nv_generic_ops,
26895
26896 .qc_defer = ata_std_qc_defer,
26897diff -urNp linux-2.6.32.48/drivers/ata/sata_promise.c linux-2.6.32.48/drivers/ata/sata_promise.c
26898--- linux-2.6.32.48/drivers/ata/sata_promise.c 2011-11-08 19:02:43.000000000 -0500
26899+++ linux-2.6.32.48/drivers/ata/sata_promise.c 2011-11-15 19:59:43.000000000 -0500
26900@@ -195,7 +195,7 @@ static const struct ata_port_operations
26901 .error_handler = pdc_error_handler,
26902 };
26903
26904-static struct ata_port_operations pdc_sata_ops = {
26905+static const struct ata_port_operations pdc_sata_ops = {
26906 .inherits = &pdc_common_ops,
26907 .cable_detect = pdc_sata_cable_detect,
26908 .freeze = pdc_sata_freeze,
26909@@ -208,14 +208,14 @@ static struct ata_port_operations pdc_sa
26910
26911 /* First-generation chips need a more restrictive ->check_atapi_dma op,
26912 and ->freeze/thaw that ignore the hotplug controls. */
26913-static struct ata_port_operations pdc_old_sata_ops = {
26914+static const struct ata_port_operations pdc_old_sata_ops = {
26915 .inherits = &pdc_sata_ops,
26916 .freeze = pdc_freeze,
26917 .thaw = pdc_thaw,
26918 .check_atapi_dma = pdc_old_sata_check_atapi_dma,
26919 };
26920
26921-static struct ata_port_operations pdc_pata_ops = {
26922+static const struct ata_port_operations pdc_pata_ops = {
26923 .inherits = &pdc_common_ops,
26924 .cable_detect = pdc_pata_cable_detect,
26925 .freeze = pdc_freeze,
26926diff -urNp linux-2.6.32.48/drivers/ata/sata_qstor.c linux-2.6.32.48/drivers/ata/sata_qstor.c
26927--- linux-2.6.32.48/drivers/ata/sata_qstor.c 2011-11-08 19:02:43.000000000 -0500
26928+++ linux-2.6.32.48/drivers/ata/sata_qstor.c 2011-11-15 19:59:43.000000000 -0500
26929@@ -132,7 +132,7 @@ static struct scsi_host_template qs_ata_
26930 .dma_boundary = QS_DMA_BOUNDARY,
26931 };
26932
26933-static struct ata_port_operations qs_ata_ops = {
26934+static const struct ata_port_operations qs_ata_ops = {
26935 .inherits = &ata_sff_port_ops,
26936
26937 .check_atapi_dma = qs_check_atapi_dma,
26938diff -urNp linux-2.6.32.48/drivers/ata/sata_sil24.c linux-2.6.32.48/drivers/ata/sata_sil24.c
26939--- linux-2.6.32.48/drivers/ata/sata_sil24.c 2011-11-08 19:02:43.000000000 -0500
26940+++ linux-2.6.32.48/drivers/ata/sata_sil24.c 2011-11-15 19:59:43.000000000 -0500
26941@@ -388,7 +388,7 @@ static struct scsi_host_template sil24_s
26942 .dma_boundary = ATA_DMA_BOUNDARY,
26943 };
26944
26945-static struct ata_port_operations sil24_ops = {
26946+static const struct ata_port_operations sil24_ops = {
26947 .inherits = &sata_pmp_port_ops,
26948
26949 .qc_defer = sil24_qc_defer,
26950diff -urNp linux-2.6.32.48/drivers/ata/sata_sil.c linux-2.6.32.48/drivers/ata/sata_sil.c
26951--- linux-2.6.32.48/drivers/ata/sata_sil.c 2011-11-08 19:02:43.000000000 -0500
26952+++ linux-2.6.32.48/drivers/ata/sata_sil.c 2011-11-15 19:59:43.000000000 -0500
26953@@ -182,7 +182,7 @@ static struct scsi_host_template sil_sht
26954 .sg_tablesize = ATA_MAX_PRD
26955 };
26956
26957-static struct ata_port_operations sil_ops = {
26958+static const struct ata_port_operations sil_ops = {
26959 .inherits = &ata_bmdma32_port_ops,
26960 .dev_config = sil_dev_config,
26961 .set_mode = sil_set_mode,
26962diff -urNp linux-2.6.32.48/drivers/ata/sata_sis.c linux-2.6.32.48/drivers/ata/sata_sis.c
26963--- linux-2.6.32.48/drivers/ata/sata_sis.c 2011-11-08 19:02:43.000000000 -0500
26964+++ linux-2.6.32.48/drivers/ata/sata_sis.c 2011-11-15 19:59:43.000000000 -0500
26965@@ -89,7 +89,7 @@ static struct scsi_host_template sis_sht
26966 ATA_BMDMA_SHT(DRV_NAME),
26967 };
26968
26969-static struct ata_port_operations sis_ops = {
26970+static const struct ata_port_operations sis_ops = {
26971 .inherits = &ata_bmdma_port_ops,
26972 .scr_read = sis_scr_read,
26973 .scr_write = sis_scr_write,
26974diff -urNp linux-2.6.32.48/drivers/ata/sata_svw.c linux-2.6.32.48/drivers/ata/sata_svw.c
26975--- linux-2.6.32.48/drivers/ata/sata_svw.c 2011-11-08 19:02:43.000000000 -0500
26976+++ linux-2.6.32.48/drivers/ata/sata_svw.c 2011-11-15 19:59:43.000000000 -0500
26977@@ -344,7 +344,7 @@ static struct scsi_host_template k2_sata
26978 };
26979
26980
26981-static struct ata_port_operations k2_sata_ops = {
26982+static const struct ata_port_operations k2_sata_ops = {
26983 .inherits = &ata_bmdma_port_ops,
26984 .sff_tf_load = k2_sata_tf_load,
26985 .sff_tf_read = k2_sata_tf_read,
26986diff -urNp linux-2.6.32.48/drivers/ata/sata_sx4.c linux-2.6.32.48/drivers/ata/sata_sx4.c
26987--- linux-2.6.32.48/drivers/ata/sata_sx4.c 2011-11-08 19:02:43.000000000 -0500
26988+++ linux-2.6.32.48/drivers/ata/sata_sx4.c 2011-11-15 19:59:43.000000000 -0500
26989@@ -248,7 +248,7 @@ static struct scsi_host_template pdc_sat
26990 };
26991
26992 /* TODO: inherit from base port_ops after converting to new EH */
26993-static struct ata_port_operations pdc_20621_ops = {
26994+static const struct ata_port_operations pdc_20621_ops = {
26995 .inherits = &ata_sff_port_ops,
26996
26997 .check_atapi_dma = pdc_check_atapi_dma,
26998diff -urNp linux-2.6.32.48/drivers/ata/sata_uli.c linux-2.6.32.48/drivers/ata/sata_uli.c
26999--- linux-2.6.32.48/drivers/ata/sata_uli.c 2011-11-08 19:02:43.000000000 -0500
27000+++ linux-2.6.32.48/drivers/ata/sata_uli.c 2011-11-15 19:59:43.000000000 -0500
27001@@ -79,7 +79,7 @@ static struct scsi_host_template uli_sht
27002 ATA_BMDMA_SHT(DRV_NAME),
27003 };
27004
27005-static struct ata_port_operations uli_ops = {
27006+static const struct ata_port_operations uli_ops = {
27007 .inherits = &ata_bmdma_port_ops,
27008 .scr_read = uli_scr_read,
27009 .scr_write = uli_scr_write,
27010diff -urNp linux-2.6.32.48/drivers/ata/sata_via.c linux-2.6.32.48/drivers/ata/sata_via.c
27011--- linux-2.6.32.48/drivers/ata/sata_via.c 2011-11-08 19:02:43.000000000 -0500
27012+++ linux-2.6.32.48/drivers/ata/sata_via.c 2011-11-15 19:59:43.000000000 -0500
27013@@ -115,32 +115,32 @@ static struct scsi_host_template svia_sh
27014 ATA_BMDMA_SHT(DRV_NAME),
27015 };
27016
27017-static struct ata_port_operations svia_base_ops = {
27018+static const struct ata_port_operations svia_base_ops = {
27019 .inherits = &ata_bmdma_port_ops,
27020 .sff_tf_load = svia_tf_load,
27021 };
27022
27023-static struct ata_port_operations vt6420_sata_ops = {
27024+static const struct ata_port_operations vt6420_sata_ops = {
27025 .inherits = &svia_base_ops,
27026 .freeze = svia_noop_freeze,
27027 .prereset = vt6420_prereset,
27028 .bmdma_start = vt6420_bmdma_start,
27029 };
27030
27031-static struct ata_port_operations vt6421_pata_ops = {
27032+static const struct ata_port_operations vt6421_pata_ops = {
27033 .inherits = &svia_base_ops,
27034 .cable_detect = vt6421_pata_cable_detect,
27035 .set_piomode = vt6421_set_pio_mode,
27036 .set_dmamode = vt6421_set_dma_mode,
27037 };
27038
27039-static struct ata_port_operations vt6421_sata_ops = {
27040+static const struct ata_port_operations vt6421_sata_ops = {
27041 .inherits = &svia_base_ops,
27042 .scr_read = svia_scr_read,
27043 .scr_write = svia_scr_write,
27044 };
27045
27046-static struct ata_port_operations vt8251_ops = {
27047+static const struct ata_port_operations vt8251_ops = {
27048 .inherits = &svia_base_ops,
27049 .hardreset = sata_std_hardreset,
27050 .scr_read = vt8251_scr_read,
27051diff -urNp linux-2.6.32.48/drivers/ata/sata_vsc.c linux-2.6.32.48/drivers/ata/sata_vsc.c
27052--- linux-2.6.32.48/drivers/ata/sata_vsc.c 2011-11-08 19:02:43.000000000 -0500
27053+++ linux-2.6.32.48/drivers/ata/sata_vsc.c 2011-11-15 19:59:43.000000000 -0500
27054@@ -306,7 +306,7 @@ static struct scsi_host_template vsc_sat
27055 };
27056
27057
27058-static struct ata_port_operations vsc_sata_ops = {
27059+static const struct ata_port_operations vsc_sata_ops = {
27060 .inherits = &ata_bmdma_port_ops,
27061 /* The IRQ handling is not quite standard SFF behaviour so we
27062 cannot use the default lost interrupt handler */
27063diff -urNp linux-2.6.32.48/drivers/atm/adummy.c linux-2.6.32.48/drivers/atm/adummy.c
27064--- linux-2.6.32.48/drivers/atm/adummy.c 2011-11-08 19:02:43.000000000 -0500
27065+++ linux-2.6.32.48/drivers/atm/adummy.c 2011-11-15 19:59:43.000000000 -0500
27066@@ -77,7 +77,7 @@ adummy_send(struct atm_vcc *vcc, struct
27067 vcc->pop(vcc, skb);
27068 else
27069 dev_kfree_skb_any(skb);
27070- atomic_inc(&vcc->stats->tx);
27071+ atomic_inc_unchecked(&vcc->stats->tx);
27072
27073 return 0;
27074 }
27075diff -urNp linux-2.6.32.48/drivers/atm/ambassador.c linux-2.6.32.48/drivers/atm/ambassador.c
27076--- linux-2.6.32.48/drivers/atm/ambassador.c 2011-11-08 19:02:43.000000000 -0500
27077+++ linux-2.6.32.48/drivers/atm/ambassador.c 2011-11-15 19:59:43.000000000 -0500
27078@@ -453,7 +453,7 @@ static void tx_complete (amb_dev * dev,
27079 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
27080
27081 // VC layer stats
27082- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
27083+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
27084
27085 // free the descriptor
27086 kfree (tx_descr);
27087@@ -494,7 +494,7 @@ static void rx_complete (amb_dev * dev,
27088 dump_skb ("<<<", vc, skb);
27089
27090 // VC layer stats
27091- atomic_inc(&atm_vcc->stats->rx);
27092+ atomic_inc_unchecked(&atm_vcc->stats->rx);
27093 __net_timestamp(skb);
27094 // end of our responsability
27095 atm_vcc->push (atm_vcc, skb);
27096@@ -509,7 +509,7 @@ static void rx_complete (amb_dev * dev,
27097 } else {
27098 PRINTK (KERN_INFO, "dropped over-size frame");
27099 // should we count this?
27100- atomic_inc(&atm_vcc->stats->rx_drop);
27101+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
27102 }
27103
27104 } else {
27105@@ -1341,7 +1341,7 @@ static int amb_send (struct atm_vcc * at
27106 }
27107
27108 if (check_area (skb->data, skb->len)) {
27109- atomic_inc(&atm_vcc->stats->tx_err);
27110+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
27111 return -ENOMEM; // ?
27112 }
27113
27114diff -urNp linux-2.6.32.48/drivers/atm/atmtcp.c linux-2.6.32.48/drivers/atm/atmtcp.c
27115--- linux-2.6.32.48/drivers/atm/atmtcp.c 2011-11-08 19:02:43.000000000 -0500
27116+++ linux-2.6.32.48/drivers/atm/atmtcp.c 2011-11-15 19:59:43.000000000 -0500
27117@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc
27118 if (vcc->pop) vcc->pop(vcc,skb);
27119 else dev_kfree_skb(skb);
27120 if (dev_data) return 0;
27121- atomic_inc(&vcc->stats->tx_err);
27122+ atomic_inc_unchecked(&vcc->stats->tx_err);
27123 return -ENOLINK;
27124 }
27125 size = skb->len+sizeof(struct atmtcp_hdr);
27126@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc
27127 if (!new_skb) {
27128 if (vcc->pop) vcc->pop(vcc,skb);
27129 else dev_kfree_skb(skb);
27130- atomic_inc(&vcc->stats->tx_err);
27131+ atomic_inc_unchecked(&vcc->stats->tx_err);
27132 return -ENOBUFS;
27133 }
27134 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
27135@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc
27136 if (vcc->pop) vcc->pop(vcc,skb);
27137 else dev_kfree_skb(skb);
27138 out_vcc->push(out_vcc,new_skb);
27139- atomic_inc(&vcc->stats->tx);
27140- atomic_inc(&out_vcc->stats->rx);
27141+ atomic_inc_unchecked(&vcc->stats->tx);
27142+ atomic_inc_unchecked(&out_vcc->stats->rx);
27143 return 0;
27144 }
27145
27146@@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc
27147 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
27148 read_unlock(&vcc_sklist_lock);
27149 if (!out_vcc) {
27150- atomic_inc(&vcc->stats->tx_err);
27151+ atomic_inc_unchecked(&vcc->stats->tx_err);
27152 goto done;
27153 }
27154 skb_pull(skb,sizeof(struct atmtcp_hdr));
27155@@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc
27156 __net_timestamp(new_skb);
27157 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
27158 out_vcc->push(out_vcc,new_skb);
27159- atomic_inc(&vcc->stats->tx);
27160- atomic_inc(&out_vcc->stats->rx);
27161+ atomic_inc_unchecked(&vcc->stats->tx);
27162+ atomic_inc_unchecked(&out_vcc->stats->rx);
27163 done:
27164 if (vcc->pop) vcc->pop(vcc,skb);
27165 else dev_kfree_skb(skb);
27166diff -urNp linux-2.6.32.48/drivers/atm/eni.c linux-2.6.32.48/drivers/atm/eni.c
27167--- linux-2.6.32.48/drivers/atm/eni.c 2011-11-08 19:02:43.000000000 -0500
27168+++ linux-2.6.32.48/drivers/atm/eni.c 2011-11-15 19:59:43.000000000 -0500
27169@@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
27170 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
27171 vcc->dev->number);
27172 length = 0;
27173- atomic_inc(&vcc->stats->rx_err);
27174+ atomic_inc_unchecked(&vcc->stats->rx_err);
27175 }
27176 else {
27177 length = ATM_CELL_SIZE-1; /* no HEC */
27178@@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
27179 size);
27180 }
27181 eff = length = 0;
27182- atomic_inc(&vcc->stats->rx_err);
27183+ atomic_inc_unchecked(&vcc->stats->rx_err);
27184 }
27185 else {
27186 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
27187@@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
27188 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
27189 vcc->dev->number,vcc->vci,length,size << 2,descr);
27190 length = eff = 0;
27191- atomic_inc(&vcc->stats->rx_err);
27192+ atomic_inc_unchecked(&vcc->stats->rx_err);
27193 }
27194 }
27195 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
27196@@ -770,7 +770,7 @@ rx_dequeued++;
27197 vcc->push(vcc,skb);
27198 pushed++;
27199 }
27200- atomic_inc(&vcc->stats->rx);
27201+ atomic_inc_unchecked(&vcc->stats->rx);
27202 }
27203 wake_up(&eni_dev->rx_wait);
27204 }
27205@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *d
27206 PCI_DMA_TODEVICE);
27207 if (vcc->pop) vcc->pop(vcc,skb);
27208 else dev_kfree_skb_irq(skb);
27209- atomic_inc(&vcc->stats->tx);
27210+ atomic_inc_unchecked(&vcc->stats->tx);
27211 wake_up(&eni_dev->tx_wait);
27212 dma_complete++;
27213 }
27214diff -urNp linux-2.6.32.48/drivers/atm/firestream.c linux-2.6.32.48/drivers/atm/firestream.c
27215--- linux-2.6.32.48/drivers/atm/firestream.c 2011-11-08 19:02:43.000000000 -0500
27216+++ linux-2.6.32.48/drivers/atm/firestream.c 2011-11-15 19:59:43.000000000 -0500
27217@@ -748,7 +748,7 @@ static void process_txdone_queue (struct
27218 }
27219 }
27220
27221- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
27222+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
27223
27224 fs_dprintk (FS_DEBUG_TXMEM, "i");
27225 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
27226@@ -815,7 +815,7 @@ static void process_incoming (struct fs_
27227 #endif
27228 skb_put (skb, qe->p1 & 0xffff);
27229 ATM_SKB(skb)->vcc = atm_vcc;
27230- atomic_inc(&atm_vcc->stats->rx);
27231+ atomic_inc_unchecked(&atm_vcc->stats->rx);
27232 __net_timestamp(skb);
27233 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
27234 atm_vcc->push (atm_vcc, skb);
27235@@ -836,12 +836,12 @@ static void process_incoming (struct fs_
27236 kfree (pe);
27237 }
27238 if (atm_vcc)
27239- atomic_inc(&atm_vcc->stats->rx_drop);
27240+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
27241 break;
27242 case 0x1f: /* Reassembly abort: no buffers. */
27243 /* Silently increment error counter. */
27244 if (atm_vcc)
27245- atomic_inc(&atm_vcc->stats->rx_drop);
27246+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
27247 break;
27248 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
27249 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
27250diff -urNp linux-2.6.32.48/drivers/atm/fore200e.c linux-2.6.32.48/drivers/atm/fore200e.c
27251--- linux-2.6.32.48/drivers/atm/fore200e.c 2011-11-08 19:02:43.000000000 -0500
27252+++ linux-2.6.32.48/drivers/atm/fore200e.c 2011-11-15 19:59:43.000000000 -0500
27253@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200
27254 #endif
27255 /* check error condition */
27256 if (*entry->status & STATUS_ERROR)
27257- atomic_inc(&vcc->stats->tx_err);
27258+ atomic_inc_unchecked(&vcc->stats->tx_err);
27259 else
27260- atomic_inc(&vcc->stats->tx);
27261+ atomic_inc_unchecked(&vcc->stats->tx);
27262 }
27263 }
27264
27265@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore2
27266 if (skb == NULL) {
27267 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
27268
27269- atomic_inc(&vcc->stats->rx_drop);
27270+ atomic_inc_unchecked(&vcc->stats->rx_drop);
27271 return -ENOMEM;
27272 }
27273
27274@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore2
27275
27276 dev_kfree_skb_any(skb);
27277
27278- atomic_inc(&vcc->stats->rx_drop);
27279+ atomic_inc_unchecked(&vcc->stats->rx_drop);
27280 return -ENOMEM;
27281 }
27282
27283 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
27284
27285 vcc->push(vcc, skb);
27286- atomic_inc(&vcc->stats->rx);
27287+ atomic_inc_unchecked(&vcc->stats->rx);
27288
27289 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
27290
27291@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200
27292 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
27293 fore200e->atm_dev->number,
27294 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
27295- atomic_inc(&vcc->stats->rx_err);
27296+ atomic_inc_unchecked(&vcc->stats->rx_err);
27297 }
27298 }
27299
27300@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struc
27301 goto retry_here;
27302 }
27303
27304- atomic_inc(&vcc->stats->tx_err);
27305+ atomic_inc_unchecked(&vcc->stats->tx_err);
27306
27307 fore200e->tx_sat++;
27308 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
27309diff -urNp linux-2.6.32.48/drivers/atm/he.c linux-2.6.32.48/drivers/atm/he.c
27310--- linux-2.6.32.48/drivers/atm/he.c 2011-11-08 19:02:43.000000000 -0500
27311+++ linux-2.6.32.48/drivers/atm/he.c 2011-11-15 19:59:43.000000000 -0500
27312@@ -1769,7 +1769,7 @@ he_service_rbrq(struct he_dev *he_dev, i
27313
27314 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
27315 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
27316- atomic_inc(&vcc->stats->rx_drop);
27317+ atomic_inc_unchecked(&vcc->stats->rx_drop);
27318 goto return_host_buffers;
27319 }
27320
27321@@ -1802,7 +1802,7 @@ he_service_rbrq(struct he_dev *he_dev, i
27322 RBRQ_LEN_ERR(he_dev->rbrq_head)
27323 ? "LEN_ERR" : "",
27324 vcc->vpi, vcc->vci);
27325- atomic_inc(&vcc->stats->rx_err);
27326+ atomic_inc_unchecked(&vcc->stats->rx_err);
27327 goto return_host_buffers;
27328 }
27329
27330@@ -1861,7 +1861,7 @@ he_service_rbrq(struct he_dev *he_dev, i
27331 vcc->push(vcc, skb);
27332 spin_lock(&he_dev->global_lock);
27333
27334- atomic_inc(&vcc->stats->rx);
27335+ atomic_inc_unchecked(&vcc->stats->rx);
27336
27337 return_host_buffers:
27338 ++pdus_assembled;
27339@@ -2206,7 +2206,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
27340 tpd->vcc->pop(tpd->vcc, tpd->skb);
27341 else
27342 dev_kfree_skb_any(tpd->skb);
27343- atomic_inc(&tpd->vcc->stats->tx_err);
27344+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
27345 }
27346 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
27347 return;
27348@@ -2618,7 +2618,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
27349 vcc->pop(vcc, skb);
27350 else
27351 dev_kfree_skb_any(skb);
27352- atomic_inc(&vcc->stats->tx_err);
27353+ atomic_inc_unchecked(&vcc->stats->tx_err);
27354 return -EINVAL;
27355 }
27356
27357@@ -2629,7 +2629,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
27358 vcc->pop(vcc, skb);
27359 else
27360 dev_kfree_skb_any(skb);
27361- atomic_inc(&vcc->stats->tx_err);
27362+ atomic_inc_unchecked(&vcc->stats->tx_err);
27363 return -EINVAL;
27364 }
27365 #endif
27366@@ -2641,7 +2641,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
27367 vcc->pop(vcc, skb);
27368 else
27369 dev_kfree_skb_any(skb);
27370- atomic_inc(&vcc->stats->tx_err);
27371+ atomic_inc_unchecked(&vcc->stats->tx_err);
27372 spin_unlock_irqrestore(&he_dev->global_lock, flags);
27373 return -ENOMEM;
27374 }
27375@@ -2683,7 +2683,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
27376 vcc->pop(vcc, skb);
27377 else
27378 dev_kfree_skb_any(skb);
27379- atomic_inc(&vcc->stats->tx_err);
27380+ atomic_inc_unchecked(&vcc->stats->tx_err);
27381 spin_unlock_irqrestore(&he_dev->global_lock, flags);
27382 return -ENOMEM;
27383 }
27384@@ -2714,7 +2714,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
27385 __enqueue_tpd(he_dev, tpd, cid);
27386 spin_unlock_irqrestore(&he_dev->global_lock, flags);
27387
27388- atomic_inc(&vcc->stats->tx);
27389+ atomic_inc_unchecked(&vcc->stats->tx);
27390
27391 return 0;
27392 }
27393diff -urNp linux-2.6.32.48/drivers/atm/horizon.c linux-2.6.32.48/drivers/atm/horizon.c
27394--- linux-2.6.32.48/drivers/atm/horizon.c 2011-11-08 19:02:43.000000000 -0500
27395+++ linux-2.6.32.48/drivers/atm/horizon.c 2011-11-15 19:59:43.000000000 -0500
27396@@ -1033,7 +1033,7 @@ static void rx_schedule (hrz_dev * dev,
27397 {
27398 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
27399 // VC layer stats
27400- atomic_inc(&vcc->stats->rx);
27401+ atomic_inc_unchecked(&vcc->stats->rx);
27402 __net_timestamp(skb);
27403 // end of our responsability
27404 vcc->push (vcc, skb);
27405@@ -1185,7 +1185,7 @@ static void tx_schedule (hrz_dev * const
27406 dev->tx_iovec = NULL;
27407
27408 // VC layer stats
27409- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
27410+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
27411
27412 // free the skb
27413 hrz_kfree_skb (skb);
27414diff -urNp linux-2.6.32.48/drivers/atm/idt77252.c linux-2.6.32.48/drivers/atm/idt77252.c
27415--- linux-2.6.32.48/drivers/atm/idt77252.c 2011-11-08 19:02:43.000000000 -0500
27416+++ linux-2.6.32.48/drivers/atm/idt77252.c 2011-11-15 19:59:43.000000000 -0500
27417@@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, str
27418 else
27419 dev_kfree_skb(skb);
27420
27421- atomic_inc(&vcc->stats->tx);
27422+ atomic_inc_unchecked(&vcc->stats->tx);
27423 }
27424
27425 atomic_dec(&scq->used);
27426@@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, st
27427 if ((sb = dev_alloc_skb(64)) == NULL) {
27428 printk("%s: Can't allocate buffers for aal0.\n",
27429 card->name);
27430- atomic_add(i, &vcc->stats->rx_drop);
27431+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
27432 break;
27433 }
27434 if (!atm_charge(vcc, sb->truesize)) {
27435 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
27436 card->name);
27437- atomic_add(i - 1, &vcc->stats->rx_drop);
27438+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
27439 dev_kfree_skb(sb);
27440 break;
27441 }
27442@@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, st
27443 ATM_SKB(sb)->vcc = vcc;
27444 __net_timestamp(sb);
27445 vcc->push(vcc, sb);
27446- atomic_inc(&vcc->stats->rx);
27447+ atomic_inc_unchecked(&vcc->stats->rx);
27448
27449 cell += ATM_CELL_PAYLOAD;
27450 }
27451@@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, st
27452 "(CDC: %08x)\n",
27453 card->name, len, rpp->len, readl(SAR_REG_CDC));
27454 recycle_rx_pool_skb(card, rpp);
27455- atomic_inc(&vcc->stats->rx_err);
27456+ atomic_inc_unchecked(&vcc->stats->rx_err);
27457 return;
27458 }
27459 if (stat & SAR_RSQE_CRC) {
27460 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
27461 recycle_rx_pool_skb(card, rpp);
27462- atomic_inc(&vcc->stats->rx_err);
27463+ atomic_inc_unchecked(&vcc->stats->rx_err);
27464 return;
27465 }
27466 if (skb_queue_len(&rpp->queue) > 1) {
27467@@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, st
27468 RXPRINTK("%s: Can't alloc RX skb.\n",
27469 card->name);
27470 recycle_rx_pool_skb(card, rpp);
27471- atomic_inc(&vcc->stats->rx_err);
27472+ atomic_inc_unchecked(&vcc->stats->rx_err);
27473 return;
27474 }
27475 if (!atm_charge(vcc, skb->truesize)) {
27476@@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, st
27477 __net_timestamp(skb);
27478
27479 vcc->push(vcc, skb);
27480- atomic_inc(&vcc->stats->rx);
27481+ atomic_inc_unchecked(&vcc->stats->rx);
27482
27483 return;
27484 }
27485@@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, st
27486 __net_timestamp(skb);
27487
27488 vcc->push(vcc, skb);
27489- atomic_inc(&vcc->stats->rx);
27490+ atomic_inc_unchecked(&vcc->stats->rx);
27491
27492 if (skb->truesize > SAR_FB_SIZE_3)
27493 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
27494@@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *car
27495 if (vcc->qos.aal != ATM_AAL0) {
27496 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
27497 card->name, vpi, vci);
27498- atomic_inc(&vcc->stats->rx_drop);
27499+ atomic_inc_unchecked(&vcc->stats->rx_drop);
27500 goto drop;
27501 }
27502
27503 if ((sb = dev_alloc_skb(64)) == NULL) {
27504 printk("%s: Can't allocate buffers for AAL0.\n",
27505 card->name);
27506- atomic_inc(&vcc->stats->rx_err);
27507+ atomic_inc_unchecked(&vcc->stats->rx_err);
27508 goto drop;
27509 }
27510
27511@@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *car
27512 ATM_SKB(sb)->vcc = vcc;
27513 __net_timestamp(sb);
27514 vcc->push(vcc, sb);
27515- atomic_inc(&vcc->stats->rx);
27516+ atomic_inc_unchecked(&vcc->stats->rx);
27517
27518 drop:
27519 skb_pull(queue, 64);
27520@@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
27521
27522 if (vc == NULL) {
27523 printk("%s: NULL connection in send().\n", card->name);
27524- atomic_inc(&vcc->stats->tx_err);
27525+ atomic_inc_unchecked(&vcc->stats->tx_err);
27526 dev_kfree_skb(skb);
27527 return -EINVAL;
27528 }
27529 if (!test_bit(VCF_TX, &vc->flags)) {
27530 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
27531- atomic_inc(&vcc->stats->tx_err);
27532+ atomic_inc_unchecked(&vcc->stats->tx_err);
27533 dev_kfree_skb(skb);
27534 return -EINVAL;
27535 }
27536@@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
27537 break;
27538 default:
27539 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
27540- atomic_inc(&vcc->stats->tx_err);
27541+ atomic_inc_unchecked(&vcc->stats->tx_err);
27542 dev_kfree_skb(skb);
27543 return -EINVAL;
27544 }
27545
27546 if (skb_shinfo(skb)->nr_frags != 0) {
27547 printk("%s: No scatter-gather yet.\n", card->name);
27548- atomic_inc(&vcc->stats->tx_err);
27549+ atomic_inc_unchecked(&vcc->stats->tx_err);
27550 dev_kfree_skb(skb);
27551 return -EINVAL;
27552 }
27553@@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
27554
27555 err = queue_skb(card, vc, skb, oam);
27556 if (err) {
27557- atomic_inc(&vcc->stats->tx_err);
27558+ atomic_inc_unchecked(&vcc->stats->tx_err);
27559 dev_kfree_skb(skb);
27560 return err;
27561 }
27562@@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
27563 skb = dev_alloc_skb(64);
27564 if (!skb) {
27565 printk("%s: Out of memory in send_oam().\n", card->name);
27566- atomic_inc(&vcc->stats->tx_err);
27567+ atomic_inc_unchecked(&vcc->stats->tx_err);
27568 return -ENOMEM;
27569 }
27570 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
27571diff -urNp linux-2.6.32.48/drivers/atm/iphase.c linux-2.6.32.48/drivers/atm/iphase.c
27572--- linux-2.6.32.48/drivers/atm/iphase.c 2011-11-08 19:02:43.000000000 -0500
27573+++ linux-2.6.32.48/drivers/atm/iphase.c 2011-11-15 19:59:43.000000000 -0500
27574@@ -1123,7 +1123,7 @@ static int rx_pkt(struct atm_dev *dev)
27575 status = (u_short) (buf_desc_ptr->desc_mode);
27576 if (status & (RX_CER | RX_PTE | RX_OFL))
27577 {
27578- atomic_inc(&vcc->stats->rx_err);
27579+ atomic_inc_unchecked(&vcc->stats->rx_err);
27580 IF_ERR(printk("IA: bad packet, dropping it");)
27581 if (status & RX_CER) {
27582 IF_ERR(printk(" cause: packet CRC error\n");)
27583@@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
27584 len = dma_addr - buf_addr;
27585 if (len > iadev->rx_buf_sz) {
27586 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
27587- atomic_inc(&vcc->stats->rx_err);
27588+ atomic_inc_unchecked(&vcc->stats->rx_err);
27589 goto out_free_desc;
27590 }
27591
27592@@ -1296,7 +1296,7 @@ static void rx_dle_intr(struct atm_dev *
27593 ia_vcc = INPH_IA_VCC(vcc);
27594 if (ia_vcc == NULL)
27595 {
27596- atomic_inc(&vcc->stats->rx_err);
27597+ atomic_inc_unchecked(&vcc->stats->rx_err);
27598 dev_kfree_skb_any(skb);
27599 atm_return(vcc, atm_guess_pdu2truesize(len));
27600 goto INCR_DLE;
27601@@ -1308,7 +1308,7 @@ static void rx_dle_intr(struct atm_dev *
27602 if ((length > iadev->rx_buf_sz) || (length >
27603 (skb->len - sizeof(struct cpcs_trailer))))
27604 {
27605- atomic_inc(&vcc->stats->rx_err);
27606+ atomic_inc_unchecked(&vcc->stats->rx_err);
27607 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
27608 length, skb->len);)
27609 dev_kfree_skb_any(skb);
27610@@ -1324,7 +1324,7 @@ static void rx_dle_intr(struct atm_dev *
27611
27612 IF_RX(printk("rx_dle_intr: skb push");)
27613 vcc->push(vcc,skb);
27614- atomic_inc(&vcc->stats->rx);
27615+ atomic_inc_unchecked(&vcc->stats->rx);
27616 iadev->rx_pkt_cnt++;
27617 }
27618 INCR_DLE:
27619@@ -2806,15 +2806,15 @@ static int ia_ioctl(struct atm_dev *dev,
27620 {
27621 struct k_sonet_stats *stats;
27622 stats = &PRIV(_ia_dev[board])->sonet_stats;
27623- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
27624- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
27625- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
27626- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
27627- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
27628- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
27629- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
27630- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
27631- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
27632+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
27633+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
27634+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
27635+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
27636+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
27637+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
27638+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
27639+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
27640+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
27641 }
27642 ia_cmds.status = 0;
27643 break;
27644@@ -2919,7 +2919,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
27645 if ((desc == 0) || (desc > iadev->num_tx_desc))
27646 {
27647 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
27648- atomic_inc(&vcc->stats->tx);
27649+ atomic_inc_unchecked(&vcc->stats->tx);
27650 if (vcc->pop)
27651 vcc->pop(vcc, skb);
27652 else
27653@@ -3024,14 +3024,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
27654 ATM_DESC(skb) = vcc->vci;
27655 skb_queue_tail(&iadev->tx_dma_q, skb);
27656
27657- atomic_inc(&vcc->stats->tx);
27658+ atomic_inc_unchecked(&vcc->stats->tx);
27659 iadev->tx_pkt_cnt++;
27660 /* Increment transaction counter */
27661 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
27662
27663 #if 0
27664 /* add flow control logic */
27665- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
27666+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
27667 if (iavcc->vc_desc_cnt > 10) {
27668 vcc->tx_quota = vcc->tx_quota * 3 / 4;
27669 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
27670diff -urNp linux-2.6.32.48/drivers/atm/lanai.c linux-2.6.32.48/drivers/atm/lanai.c
27671--- linux-2.6.32.48/drivers/atm/lanai.c 2011-11-08 19:02:43.000000000 -0500
27672+++ linux-2.6.32.48/drivers/atm/lanai.c 2011-11-15 19:59:43.000000000 -0500
27673@@ -1305,7 +1305,7 @@ static void lanai_send_one_aal5(struct l
27674 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
27675 lanai_endtx(lanai, lvcc);
27676 lanai_free_skb(lvcc->tx.atmvcc, skb);
27677- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
27678+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
27679 }
27680
27681 /* Try to fill the buffer - don't call unless there is backlog */
27682@@ -1428,7 +1428,7 @@ static void vcc_rx_aal5(struct lanai_vcc
27683 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
27684 __net_timestamp(skb);
27685 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
27686- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
27687+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
27688 out:
27689 lvcc->rx.buf.ptr = end;
27690 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
27691@@ -1670,7 +1670,7 @@ static int handle_service(struct lanai_d
27692 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
27693 "vcc %d\n", lanai->number, (unsigned int) s, vci);
27694 lanai->stats.service_rxnotaal5++;
27695- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
27696+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
27697 return 0;
27698 }
27699 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
27700@@ -1682,7 +1682,7 @@ static int handle_service(struct lanai_d
27701 int bytes;
27702 read_unlock(&vcc_sklist_lock);
27703 DPRINTK("got trashed rx pdu on vci %d\n", vci);
27704- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
27705+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
27706 lvcc->stats.x.aal5.service_trash++;
27707 bytes = (SERVICE_GET_END(s) * 16) -
27708 (((unsigned long) lvcc->rx.buf.ptr) -
27709@@ -1694,7 +1694,7 @@ static int handle_service(struct lanai_d
27710 }
27711 if (s & SERVICE_STREAM) {
27712 read_unlock(&vcc_sklist_lock);
27713- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
27714+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
27715 lvcc->stats.x.aal5.service_stream++;
27716 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
27717 "PDU on VCI %d!\n", lanai->number, vci);
27718@@ -1702,7 +1702,7 @@ static int handle_service(struct lanai_d
27719 return 0;
27720 }
27721 DPRINTK("got rx crc error on vci %d\n", vci);
27722- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
27723+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
27724 lvcc->stats.x.aal5.service_rxcrc++;
27725 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
27726 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
27727diff -urNp linux-2.6.32.48/drivers/atm/nicstar.c linux-2.6.32.48/drivers/atm/nicstar.c
27728--- linux-2.6.32.48/drivers/atm/nicstar.c 2011-11-08 19:02:43.000000000 -0500
27729+++ linux-2.6.32.48/drivers/atm/nicstar.c 2011-11-15 19:59:43.000000000 -0500
27730@@ -1723,7 +1723,7 @@ static int ns_send(struct atm_vcc *vcc,
27731 if ((vc = (vc_map *) vcc->dev_data) == NULL)
27732 {
27733 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index);
27734- atomic_inc(&vcc->stats->tx_err);
27735+ atomic_inc_unchecked(&vcc->stats->tx_err);
27736 dev_kfree_skb_any(skb);
27737 return -EINVAL;
27738 }
27739@@ -1731,7 +1731,7 @@ static int ns_send(struct atm_vcc *vcc,
27740 if (!vc->tx)
27741 {
27742 printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index);
27743- atomic_inc(&vcc->stats->tx_err);
27744+ atomic_inc_unchecked(&vcc->stats->tx_err);
27745 dev_kfree_skb_any(skb);
27746 return -EINVAL;
27747 }
27748@@ -1739,7 +1739,7 @@ static int ns_send(struct atm_vcc *vcc,
27749 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
27750 {
27751 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index);
27752- atomic_inc(&vcc->stats->tx_err);
27753+ atomic_inc_unchecked(&vcc->stats->tx_err);
27754 dev_kfree_skb_any(skb);
27755 return -EINVAL;
27756 }
27757@@ -1747,7 +1747,7 @@ static int ns_send(struct atm_vcc *vcc,
27758 if (skb_shinfo(skb)->nr_frags != 0)
27759 {
27760 printk("nicstar%d: No scatter-gather yet.\n", card->index);
27761- atomic_inc(&vcc->stats->tx_err);
27762+ atomic_inc_unchecked(&vcc->stats->tx_err);
27763 dev_kfree_skb_any(skb);
27764 return -EINVAL;
27765 }
27766@@ -1792,11 +1792,11 @@ static int ns_send(struct atm_vcc *vcc,
27767
27768 if (push_scqe(card, vc, scq, &scqe, skb) != 0)
27769 {
27770- atomic_inc(&vcc->stats->tx_err);
27771+ atomic_inc_unchecked(&vcc->stats->tx_err);
27772 dev_kfree_skb_any(skb);
27773 return -EIO;
27774 }
27775- atomic_inc(&vcc->stats->tx);
27776+ atomic_inc_unchecked(&vcc->stats->tx);
27777
27778 return 0;
27779 }
27780@@ -2111,14 +2111,14 @@ static void dequeue_rx(ns_dev *card, ns_
27781 {
27782 printk("nicstar%d: Can't allocate buffers for aal0.\n",
27783 card->index);
27784- atomic_add(i,&vcc->stats->rx_drop);
27785+ atomic_add_unchecked(i,&vcc->stats->rx_drop);
27786 break;
27787 }
27788 if (!atm_charge(vcc, sb->truesize))
27789 {
27790 RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n",
27791 card->index);
27792- atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
27793+ atomic_add_unchecked(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
27794 dev_kfree_skb_any(sb);
27795 break;
27796 }
27797@@ -2133,7 +2133,7 @@ static void dequeue_rx(ns_dev *card, ns_
27798 ATM_SKB(sb)->vcc = vcc;
27799 __net_timestamp(sb);
27800 vcc->push(vcc, sb);
27801- atomic_inc(&vcc->stats->rx);
27802+ atomic_inc_unchecked(&vcc->stats->rx);
27803 cell += ATM_CELL_PAYLOAD;
27804 }
27805
27806@@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev *card, ns_
27807 if (iovb == NULL)
27808 {
27809 printk("nicstar%d: Out of iovec buffers.\n", card->index);
27810- atomic_inc(&vcc->stats->rx_drop);
27811+ atomic_inc_unchecked(&vcc->stats->rx_drop);
27812 recycle_rx_buf(card, skb);
27813 return;
27814 }
27815@@ -2182,7 +2182,7 @@ static void dequeue_rx(ns_dev *card, ns_
27816 else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS)
27817 {
27818 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
27819- atomic_inc(&vcc->stats->rx_err);
27820+ atomic_inc_unchecked(&vcc->stats->rx_err);
27821 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
27822 NS_SKB(iovb)->iovcnt = 0;
27823 iovb->len = 0;
27824@@ -2202,7 +2202,7 @@ static void dequeue_rx(ns_dev *card, ns_
27825 printk("nicstar%d: Expected a small buffer, and this is not one.\n",
27826 card->index);
27827 which_list(card, skb);
27828- atomic_inc(&vcc->stats->rx_err);
27829+ atomic_inc_unchecked(&vcc->stats->rx_err);
27830 recycle_rx_buf(card, skb);
27831 vc->rx_iov = NULL;
27832 recycle_iov_buf(card, iovb);
27833@@ -2216,7 +2216,7 @@ static void dequeue_rx(ns_dev *card, ns_
27834 printk("nicstar%d: Expected a large buffer, and this is not one.\n",
27835 card->index);
27836 which_list(card, skb);
27837- atomic_inc(&vcc->stats->rx_err);
27838+ atomic_inc_unchecked(&vcc->stats->rx_err);
27839 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
27840 NS_SKB(iovb)->iovcnt);
27841 vc->rx_iov = NULL;
27842@@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev *card, ns_
27843 printk(" - PDU size mismatch.\n");
27844 else
27845 printk(".\n");
27846- atomic_inc(&vcc->stats->rx_err);
27847+ atomic_inc_unchecked(&vcc->stats->rx_err);
27848 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
27849 NS_SKB(iovb)->iovcnt);
27850 vc->rx_iov = NULL;
27851@@ -2256,7 +2256,7 @@ static void dequeue_rx(ns_dev *card, ns_
27852 if (!atm_charge(vcc, skb->truesize))
27853 {
27854 push_rxbufs(card, skb);
27855- atomic_inc(&vcc->stats->rx_drop);
27856+ atomic_inc_unchecked(&vcc->stats->rx_drop);
27857 }
27858 else
27859 {
27860@@ -2268,7 +2268,7 @@ static void dequeue_rx(ns_dev *card, ns_
27861 ATM_SKB(skb)->vcc = vcc;
27862 __net_timestamp(skb);
27863 vcc->push(vcc, skb);
27864- atomic_inc(&vcc->stats->rx);
27865+ atomic_inc_unchecked(&vcc->stats->rx);
27866 }
27867 }
27868 else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */
27869@@ -2283,7 +2283,7 @@ static void dequeue_rx(ns_dev *card, ns_
27870 if (!atm_charge(vcc, sb->truesize))
27871 {
27872 push_rxbufs(card, sb);
27873- atomic_inc(&vcc->stats->rx_drop);
27874+ atomic_inc_unchecked(&vcc->stats->rx_drop);
27875 }
27876 else
27877 {
27878@@ -2295,7 +2295,7 @@ static void dequeue_rx(ns_dev *card, ns_
27879 ATM_SKB(sb)->vcc = vcc;
27880 __net_timestamp(sb);
27881 vcc->push(vcc, sb);
27882- atomic_inc(&vcc->stats->rx);
27883+ atomic_inc_unchecked(&vcc->stats->rx);
27884 }
27885
27886 push_rxbufs(card, skb);
27887@@ -2306,7 +2306,7 @@ static void dequeue_rx(ns_dev *card, ns_
27888 if (!atm_charge(vcc, skb->truesize))
27889 {
27890 push_rxbufs(card, skb);
27891- atomic_inc(&vcc->stats->rx_drop);
27892+ atomic_inc_unchecked(&vcc->stats->rx_drop);
27893 }
27894 else
27895 {
27896@@ -2320,7 +2320,7 @@ static void dequeue_rx(ns_dev *card, ns_
27897 ATM_SKB(skb)->vcc = vcc;
27898 __net_timestamp(skb);
27899 vcc->push(vcc, skb);
27900- atomic_inc(&vcc->stats->rx);
27901+ atomic_inc_unchecked(&vcc->stats->rx);
27902 }
27903
27904 push_rxbufs(card, sb);
27905@@ -2342,7 +2342,7 @@ static void dequeue_rx(ns_dev *card, ns_
27906 if (hb == NULL)
27907 {
27908 printk("nicstar%d: Out of huge buffers.\n", card->index);
27909- atomic_inc(&vcc->stats->rx_drop);
27910+ atomic_inc_unchecked(&vcc->stats->rx_drop);
27911 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
27912 NS_SKB(iovb)->iovcnt);
27913 vc->rx_iov = NULL;
27914@@ -2393,7 +2393,7 @@ static void dequeue_rx(ns_dev *card, ns_
27915 }
27916 else
27917 dev_kfree_skb_any(hb);
27918- atomic_inc(&vcc->stats->rx_drop);
27919+ atomic_inc_unchecked(&vcc->stats->rx_drop);
27920 }
27921 else
27922 {
27923@@ -2427,7 +2427,7 @@ static void dequeue_rx(ns_dev *card, ns_
27924 #endif /* NS_USE_DESTRUCTORS */
27925 __net_timestamp(hb);
27926 vcc->push(vcc, hb);
27927- atomic_inc(&vcc->stats->rx);
27928+ atomic_inc_unchecked(&vcc->stats->rx);
27929 }
27930 }
27931
27932diff -urNp linux-2.6.32.48/drivers/atm/solos-pci.c linux-2.6.32.48/drivers/atm/solos-pci.c
27933--- linux-2.6.32.48/drivers/atm/solos-pci.c 2011-11-08 19:02:43.000000000 -0500
27934+++ linux-2.6.32.48/drivers/atm/solos-pci.c 2011-11-15 19:59:43.000000000 -0500
27935@@ -708,7 +708,7 @@ void solos_bh(unsigned long card_arg)
27936 }
27937 atm_charge(vcc, skb->truesize);
27938 vcc->push(vcc, skb);
27939- atomic_inc(&vcc->stats->rx);
27940+ atomic_inc_unchecked(&vcc->stats->rx);
27941 break;
27942
27943 case PKT_STATUS:
27944@@ -914,6 +914,8 @@ static int print_buffer(struct sk_buff *
27945 char msg[500];
27946 char item[10];
27947
27948+ pax_track_stack();
27949+
27950 len = buf->len;
27951 for (i = 0; i < len; i++){
27952 if(i % 8 == 0)
27953@@ -1023,7 +1025,7 @@ static uint32_t fpga_tx(struct solos_car
27954 vcc = SKB_CB(oldskb)->vcc;
27955
27956 if (vcc) {
27957- atomic_inc(&vcc->stats->tx);
27958+ atomic_inc_unchecked(&vcc->stats->tx);
27959 solos_pop(vcc, oldskb);
27960 } else
27961 dev_kfree_skb_irq(oldskb);
27962diff -urNp linux-2.6.32.48/drivers/atm/suni.c linux-2.6.32.48/drivers/atm/suni.c
27963--- linux-2.6.32.48/drivers/atm/suni.c 2011-11-08 19:02:43.000000000 -0500
27964+++ linux-2.6.32.48/drivers/atm/suni.c 2011-11-15 19:59:43.000000000 -0500
27965@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
27966
27967
27968 #define ADD_LIMITED(s,v) \
27969- atomic_add((v),&stats->s); \
27970- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
27971+ atomic_add_unchecked((v),&stats->s); \
27972+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
27973
27974
27975 static void suni_hz(unsigned long from_timer)
27976diff -urNp linux-2.6.32.48/drivers/atm/uPD98402.c linux-2.6.32.48/drivers/atm/uPD98402.c
27977--- linux-2.6.32.48/drivers/atm/uPD98402.c 2011-11-08 19:02:43.000000000 -0500
27978+++ linux-2.6.32.48/drivers/atm/uPD98402.c 2011-11-15 19:59:43.000000000 -0500
27979@@ -41,7 +41,7 @@ static int fetch_stats(struct atm_dev *d
27980 struct sonet_stats tmp;
27981 int error = 0;
27982
27983- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
27984+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
27985 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
27986 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
27987 if (zero && !error) {
27988@@ -160,9 +160,9 @@ static int uPD98402_ioctl(struct atm_dev
27989
27990
27991 #define ADD_LIMITED(s,v) \
27992- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
27993- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
27994- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
27995+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
27996+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
27997+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
27998
27999
28000 static void stat_event(struct atm_dev *dev)
28001@@ -193,7 +193,7 @@ static void uPD98402_int(struct atm_dev
28002 if (reason & uPD98402_INT_PFM) stat_event(dev);
28003 if (reason & uPD98402_INT_PCO) {
28004 (void) GET(PCOCR); /* clear interrupt cause */
28005- atomic_add(GET(HECCT),
28006+ atomic_add_unchecked(GET(HECCT),
28007 &PRIV(dev)->sonet_stats.uncorr_hcs);
28008 }
28009 if ((reason & uPD98402_INT_RFO) &&
28010@@ -221,9 +221,9 @@ static int uPD98402_start(struct atm_dev
28011 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
28012 uPD98402_INT_LOS),PIMR); /* enable them */
28013 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
28014- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
28015- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
28016- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
28017+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
28018+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
28019+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
28020 return 0;
28021 }
28022
28023diff -urNp linux-2.6.32.48/drivers/atm/zatm.c linux-2.6.32.48/drivers/atm/zatm.c
28024--- linux-2.6.32.48/drivers/atm/zatm.c 2011-11-08 19:02:43.000000000 -0500
28025+++ linux-2.6.32.48/drivers/atm/zatm.c 2011-11-15 19:59:43.000000000 -0500
28026@@ -458,7 +458,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
28027 }
28028 if (!size) {
28029 dev_kfree_skb_irq(skb);
28030- if (vcc) atomic_inc(&vcc->stats->rx_err);
28031+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
28032 continue;
28033 }
28034 if (!atm_charge(vcc,skb->truesize)) {
28035@@ -468,7 +468,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
28036 skb->len = size;
28037 ATM_SKB(skb)->vcc = vcc;
28038 vcc->push(vcc,skb);
28039- atomic_inc(&vcc->stats->rx);
28040+ atomic_inc_unchecked(&vcc->stats->rx);
28041 }
28042 zout(pos & 0xffff,MTA(mbx));
28043 #if 0 /* probably a stupid idea */
28044@@ -732,7 +732,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
28045 skb_queue_head(&zatm_vcc->backlog,skb);
28046 break;
28047 }
28048- atomic_inc(&vcc->stats->tx);
28049+ atomic_inc_unchecked(&vcc->stats->tx);
28050 wake_up(&zatm_vcc->tx_wait);
28051 }
28052
28053diff -urNp linux-2.6.32.48/drivers/base/bus.c linux-2.6.32.48/drivers/base/bus.c
28054--- linux-2.6.32.48/drivers/base/bus.c 2011-11-08 19:02:43.000000000 -0500
28055+++ linux-2.6.32.48/drivers/base/bus.c 2011-11-15 19:59:43.000000000 -0500
28056@@ -70,7 +70,7 @@ static ssize_t drv_attr_store(struct kob
28057 return ret;
28058 }
28059
28060-static struct sysfs_ops driver_sysfs_ops = {
28061+static const struct sysfs_ops driver_sysfs_ops = {
28062 .show = drv_attr_show,
28063 .store = drv_attr_store,
28064 };
28065@@ -115,7 +115,7 @@ static ssize_t bus_attr_store(struct kob
28066 return ret;
28067 }
28068
28069-static struct sysfs_ops bus_sysfs_ops = {
28070+static const struct sysfs_ops bus_sysfs_ops = {
28071 .show = bus_attr_show,
28072 .store = bus_attr_store,
28073 };
28074@@ -154,7 +154,7 @@ static int bus_uevent_filter(struct kset
28075 return 0;
28076 }
28077
28078-static struct kset_uevent_ops bus_uevent_ops = {
28079+static const struct kset_uevent_ops bus_uevent_ops = {
28080 .filter = bus_uevent_filter,
28081 };
28082
28083diff -urNp linux-2.6.32.48/drivers/base/class.c linux-2.6.32.48/drivers/base/class.c
28084--- linux-2.6.32.48/drivers/base/class.c 2011-11-08 19:02:43.000000000 -0500
28085+++ linux-2.6.32.48/drivers/base/class.c 2011-11-15 19:59:43.000000000 -0500
28086@@ -63,7 +63,7 @@ static void class_release(struct kobject
28087 kfree(cp);
28088 }
28089
28090-static struct sysfs_ops class_sysfs_ops = {
28091+static const struct sysfs_ops class_sysfs_ops = {
28092 .show = class_attr_show,
28093 .store = class_attr_store,
28094 };
28095diff -urNp linux-2.6.32.48/drivers/base/core.c linux-2.6.32.48/drivers/base/core.c
28096--- linux-2.6.32.48/drivers/base/core.c 2011-11-08 19:02:43.000000000 -0500
28097+++ linux-2.6.32.48/drivers/base/core.c 2011-11-15 19:59:43.000000000 -0500
28098@@ -100,7 +100,7 @@ static ssize_t dev_attr_store(struct kob
28099 return ret;
28100 }
28101
28102-static struct sysfs_ops dev_sysfs_ops = {
28103+static const struct sysfs_ops dev_sysfs_ops = {
28104 .show = dev_attr_show,
28105 .store = dev_attr_store,
28106 };
28107@@ -252,7 +252,7 @@ static int dev_uevent(struct kset *kset,
28108 return retval;
28109 }
28110
28111-static struct kset_uevent_ops device_uevent_ops = {
28112+static const struct kset_uevent_ops device_uevent_ops = {
28113 .filter = dev_uevent_filter,
28114 .name = dev_uevent_name,
28115 .uevent = dev_uevent,
28116diff -urNp linux-2.6.32.48/drivers/base/memory.c linux-2.6.32.48/drivers/base/memory.c
28117--- linux-2.6.32.48/drivers/base/memory.c 2011-11-08 19:02:43.000000000 -0500
28118+++ linux-2.6.32.48/drivers/base/memory.c 2011-11-15 19:59:43.000000000 -0500
28119@@ -44,7 +44,7 @@ static int memory_uevent(struct kset *ks
28120 return retval;
28121 }
28122
28123-static struct kset_uevent_ops memory_uevent_ops = {
28124+static const struct kset_uevent_ops memory_uevent_ops = {
28125 .name = memory_uevent_name,
28126 .uevent = memory_uevent,
28127 };
28128diff -urNp linux-2.6.32.48/drivers/base/sys.c linux-2.6.32.48/drivers/base/sys.c
28129--- linux-2.6.32.48/drivers/base/sys.c 2011-11-08 19:02:43.000000000 -0500
28130+++ linux-2.6.32.48/drivers/base/sys.c 2011-11-15 19:59:43.000000000 -0500
28131@@ -54,7 +54,7 @@ sysdev_store(struct kobject *kobj, struc
28132 return -EIO;
28133 }
28134
28135-static struct sysfs_ops sysfs_ops = {
28136+static const struct sysfs_ops sysfs_ops = {
28137 .show = sysdev_show,
28138 .store = sysdev_store,
28139 };
28140@@ -104,7 +104,7 @@ static ssize_t sysdev_class_store(struct
28141 return -EIO;
28142 }
28143
28144-static struct sysfs_ops sysfs_class_ops = {
28145+static const struct sysfs_ops sysfs_class_ops = {
28146 .show = sysdev_class_show,
28147 .store = sysdev_class_store,
28148 };
28149diff -urNp linux-2.6.32.48/drivers/block/cciss.c linux-2.6.32.48/drivers/block/cciss.c
28150--- linux-2.6.32.48/drivers/block/cciss.c 2011-11-08 19:02:43.000000000 -0500
28151+++ linux-2.6.32.48/drivers/block/cciss.c 2011-11-15 19:59:43.000000000 -0500
28152@@ -1011,6 +1011,8 @@ static int cciss_ioctl32_passthru(struct
28153 int err;
28154 u32 cp;
28155
28156+ memset(&arg64, 0, sizeof(arg64));
28157+
28158 err = 0;
28159 err |=
28160 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
28161@@ -2852,7 +2854,7 @@ static unsigned long pollcomplete(int ct
28162 /* Wait (up to 20 seconds) for a command to complete */
28163
28164 for (i = 20 * HZ; i > 0; i--) {
28165- done = hba[ctlr]->access.command_completed(hba[ctlr]);
28166+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
28167 if (done == FIFO_EMPTY)
28168 schedule_timeout_uninterruptible(1);
28169 else
28170@@ -2876,7 +2878,7 @@ static int sendcmd_core(ctlr_info_t *h,
28171 resend_cmd1:
28172
28173 /* Disable interrupt on the board. */
28174- h->access.set_intr_mask(h, CCISS_INTR_OFF);
28175+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
28176
28177 /* Make sure there is room in the command FIFO */
28178 /* Actually it should be completely empty at this time */
28179@@ -2884,13 +2886,13 @@ resend_cmd1:
28180 /* tape side of the driver. */
28181 for (i = 200000; i > 0; i--) {
28182 /* if fifo isn't full go */
28183- if (!(h->access.fifo_full(h)))
28184+ if (!(h->access->fifo_full(h)))
28185 break;
28186 udelay(10);
28187 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
28188 " waiting!\n", h->ctlr);
28189 }
28190- h->access.submit_command(h, c); /* Send the cmd */
28191+ h->access->submit_command(h, c); /* Send the cmd */
28192 do {
28193 complete = pollcomplete(h->ctlr);
28194
28195@@ -3023,7 +3025,7 @@ static void start_io(ctlr_info_t *h)
28196 while (!hlist_empty(&h->reqQ)) {
28197 c = hlist_entry(h->reqQ.first, CommandList_struct, list);
28198 /* can't do anything if fifo is full */
28199- if ((h->access.fifo_full(h))) {
28200+ if ((h->access->fifo_full(h))) {
28201 printk(KERN_WARNING "cciss: fifo full\n");
28202 break;
28203 }
28204@@ -3033,7 +3035,7 @@ static void start_io(ctlr_info_t *h)
28205 h->Qdepth--;
28206
28207 /* Tell the controller execute command */
28208- h->access.submit_command(h, c);
28209+ h->access->submit_command(h, c);
28210
28211 /* Put job onto the completed Q */
28212 addQ(&h->cmpQ, c);
28213@@ -3393,17 +3395,17 @@ startio:
28214
28215 static inline unsigned long get_next_completion(ctlr_info_t *h)
28216 {
28217- return h->access.command_completed(h);
28218+ return h->access->command_completed(h);
28219 }
28220
28221 static inline int interrupt_pending(ctlr_info_t *h)
28222 {
28223- return h->access.intr_pending(h);
28224+ return h->access->intr_pending(h);
28225 }
28226
28227 static inline long interrupt_not_for_us(ctlr_info_t *h)
28228 {
28229- return (((h->access.intr_pending(h) == 0) ||
28230+ return (((h->access->intr_pending(h) == 0) ||
28231 (h->interrupts_enabled == 0)));
28232 }
28233
28234@@ -3892,7 +3894,7 @@ static int __devinit cciss_pci_init(ctlr
28235 */
28236 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
28237 c->product_name = products[prod_index].product_name;
28238- c->access = *(products[prod_index].access);
28239+ c->access = products[prod_index].access;
28240 c->nr_cmds = c->max_commands - 4;
28241 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
28242 (readb(&c->cfgtable->Signature[1]) != 'I') ||
28243@@ -4291,7 +4293,7 @@ static int __devinit cciss_init_one(stru
28244 }
28245
28246 /* make sure the board interrupts are off */
28247- hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
28248+ hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_OFF);
28249 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
28250 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
28251 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
28252@@ -4341,7 +4343,7 @@ static int __devinit cciss_init_one(stru
28253 cciss_scsi_setup(i);
28254
28255 /* Turn the interrupts on so we can service requests */
28256- hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
28257+ hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_ON);
28258
28259 /* Get the firmware version */
28260 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
28261diff -urNp linux-2.6.32.48/drivers/block/cciss.h linux-2.6.32.48/drivers/block/cciss.h
28262--- linux-2.6.32.48/drivers/block/cciss.h 2011-11-08 19:02:43.000000000 -0500
28263+++ linux-2.6.32.48/drivers/block/cciss.h 2011-11-15 19:59:43.000000000 -0500
28264@@ -90,7 +90,7 @@ struct ctlr_info
28265 // information about each logical volume
28266 drive_info_struct *drv[CISS_MAX_LUN];
28267
28268- struct access_method access;
28269+ struct access_method *access;
28270
28271 /* queue and queue Info */
28272 struct hlist_head reqQ;
28273diff -urNp linux-2.6.32.48/drivers/block/cpqarray.c linux-2.6.32.48/drivers/block/cpqarray.c
28274--- linux-2.6.32.48/drivers/block/cpqarray.c 2011-11-08 19:02:43.000000000 -0500
28275+++ linux-2.6.32.48/drivers/block/cpqarray.c 2011-11-15 19:59:43.000000000 -0500
28276@@ -402,7 +402,7 @@ static int __init cpqarray_register_ctlr
28277 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
28278 goto Enomem4;
28279 }
28280- hba[i]->access.set_intr_mask(hba[i], 0);
28281+ hba[i]->access->set_intr_mask(hba[i], 0);
28282 if (request_irq(hba[i]->intr, do_ida_intr,
28283 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
28284 {
28285@@ -460,7 +460,7 @@ static int __init cpqarray_register_ctlr
28286 add_timer(&hba[i]->timer);
28287
28288 /* Enable IRQ now that spinlock and rate limit timer are set up */
28289- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
28290+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
28291
28292 for(j=0; j<NWD; j++) {
28293 struct gendisk *disk = ida_gendisk[i][j];
28294@@ -695,7 +695,7 @@ DBGINFO(
28295 for(i=0; i<NR_PRODUCTS; i++) {
28296 if (board_id == products[i].board_id) {
28297 c->product_name = products[i].product_name;
28298- c->access = *(products[i].access);
28299+ c->access = products[i].access;
28300 break;
28301 }
28302 }
28303@@ -793,7 +793,7 @@ static int __init cpqarray_eisa_detect(v
28304 hba[ctlr]->intr = intr;
28305 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
28306 hba[ctlr]->product_name = products[j].product_name;
28307- hba[ctlr]->access = *(products[j].access);
28308+ hba[ctlr]->access = products[j].access;
28309 hba[ctlr]->ctlr = ctlr;
28310 hba[ctlr]->board_id = board_id;
28311 hba[ctlr]->pci_dev = NULL; /* not PCI */
28312@@ -896,6 +896,8 @@ static void do_ida_request(struct reques
28313 struct scatterlist tmp_sg[SG_MAX];
28314 int i, dir, seg;
28315
28316+ pax_track_stack();
28317+
28318 if (blk_queue_plugged(q))
28319 goto startio;
28320
28321@@ -968,7 +970,7 @@ static void start_io(ctlr_info_t *h)
28322
28323 while((c = h->reqQ) != NULL) {
28324 /* Can't do anything if we're busy */
28325- if (h->access.fifo_full(h) == 0)
28326+ if (h->access->fifo_full(h) == 0)
28327 return;
28328
28329 /* Get the first entry from the request Q */
28330@@ -976,7 +978,7 @@ static void start_io(ctlr_info_t *h)
28331 h->Qdepth--;
28332
28333 /* Tell the controller to do our bidding */
28334- h->access.submit_command(h, c);
28335+ h->access->submit_command(h, c);
28336
28337 /* Get onto the completion Q */
28338 addQ(&h->cmpQ, c);
28339@@ -1038,7 +1040,7 @@ static irqreturn_t do_ida_intr(int irq,
28340 unsigned long flags;
28341 __u32 a,a1;
28342
28343- istat = h->access.intr_pending(h);
28344+ istat = h->access->intr_pending(h);
28345 /* Is this interrupt for us? */
28346 if (istat == 0)
28347 return IRQ_NONE;
28348@@ -1049,7 +1051,7 @@ static irqreturn_t do_ida_intr(int irq,
28349 */
28350 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
28351 if (istat & FIFO_NOT_EMPTY) {
28352- while((a = h->access.command_completed(h))) {
28353+ while((a = h->access->command_completed(h))) {
28354 a1 = a; a &= ~3;
28355 if ((c = h->cmpQ) == NULL)
28356 {
28357@@ -1434,11 +1436,11 @@ static int sendcmd(
28358 /*
28359 * Disable interrupt
28360 */
28361- info_p->access.set_intr_mask(info_p, 0);
28362+ info_p->access->set_intr_mask(info_p, 0);
28363 /* Make sure there is room in the command FIFO */
28364 /* Actually it should be completely empty at this time. */
28365 for (i = 200000; i > 0; i--) {
28366- temp = info_p->access.fifo_full(info_p);
28367+ temp = info_p->access->fifo_full(info_p);
28368 if (temp != 0) {
28369 break;
28370 }
28371@@ -1451,7 +1453,7 @@ DBG(
28372 /*
28373 * Send the cmd
28374 */
28375- info_p->access.submit_command(info_p, c);
28376+ info_p->access->submit_command(info_p, c);
28377 complete = pollcomplete(ctlr);
28378
28379 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
28380@@ -1534,9 +1536,9 @@ static int revalidate_allvol(ctlr_info_t
28381 * we check the new geometry. Then turn interrupts back on when
28382 * we're done.
28383 */
28384- host->access.set_intr_mask(host, 0);
28385+ host->access->set_intr_mask(host, 0);
28386 getgeometry(ctlr);
28387- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
28388+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
28389
28390 for(i=0; i<NWD; i++) {
28391 struct gendisk *disk = ida_gendisk[ctlr][i];
28392@@ -1576,7 +1578,7 @@ static int pollcomplete(int ctlr)
28393 /* Wait (up to 2 seconds) for a command to complete */
28394
28395 for (i = 200000; i > 0; i--) {
28396- done = hba[ctlr]->access.command_completed(hba[ctlr]);
28397+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
28398 if (done == 0) {
28399 udelay(10); /* a short fixed delay */
28400 } else
28401diff -urNp linux-2.6.32.48/drivers/block/cpqarray.h linux-2.6.32.48/drivers/block/cpqarray.h
28402--- linux-2.6.32.48/drivers/block/cpqarray.h 2011-11-08 19:02:43.000000000 -0500
28403+++ linux-2.6.32.48/drivers/block/cpqarray.h 2011-11-15 19:59:43.000000000 -0500
28404@@ -99,7 +99,7 @@ struct ctlr_info {
28405 drv_info_t drv[NWD];
28406 struct proc_dir_entry *proc;
28407
28408- struct access_method access;
28409+ struct access_method *access;
28410
28411 cmdlist_t *reqQ;
28412 cmdlist_t *cmpQ;
28413diff -urNp linux-2.6.32.48/drivers/block/DAC960.c linux-2.6.32.48/drivers/block/DAC960.c
28414--- linux-2.6.32.48/drivers/block/DAC960.c 2011-11-08 19:02:43.000000000 -0500
28415+++ linux-2.6.32.48/drivers/block/DAC960.c 2011-11-15 19:59:43.000000000 -0500
28416@@ -1973,6 +1973,8 @@ static bool DAC960_V1_ReadDeviceConfigur
28417 unsigned long flags;
28418 int Channel, TargetID;
28419
28420+ pax_track_stack();
28421+
28422 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
28423 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
28424 sizeof(DAC960_SCSI_Inquiry_T) +
28425diff -urNp linux-2.6.32.48/drivers/block/loop.c linux-2.6.32.48/drivers/block/loop.c
28426--- linux-2.6.32.48/drivers/block/loop.c 2011-11-08 19:02:43.000000000 -0500
28427+++ linux-2.6.32.48/drivers/block/loop.c 2011-11-15 19:59:43.000000000 -0500
28428@@ -282,7 +282,7 @@ static int __do_lo_send_write(struct fil
28429 mm_segment_t old_fs = get_fs();
28430
28431 set_fs(get_ds());
28432- bw = file->f_op->write(file, buf, len, &pos);
28433+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
28434 set_fs(old_fs);
28435 if (likely(bw == len))
28436 return 0;
28437diff -urNp linux-2.6.32.48/drivers/block/nbd.c linux-2.6.32.48/drivers/block/nbd.c
28438--- linux-2.6.32.48/drivers/block/nbd.c 2011-11-08 19:02:43.000000000 -0500
28439+++ linux-2.6.32.48/drivers/block/nbd.c 2011-11-15 19:59:43.000000000 -0500
28440@@ -155,6 +155,8 @@ static int sock_xmit(struct nbd_device *
28441 struct kvec iov;
28442 sigset_t blocked, oldset;
28443
28444+ pax_track_stack();
28445+
28446 if (unlikely(!sock)) {
28447 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
28448 lo->disk->disk_name, (send ? "send" : "recv"));
28449@@ -569,6 +571,8 @@ static void do_nbd_request(struct reques
28450 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
28451 unsigned int cmd, unsigned long arg)
28452 {
28453+ pax_track_stack();
28454+
28455 switch (cmd) {
28456 case NBD_DISCONNECT: {
28457 struct request sreq;
28458diff -urNp linux-2.6.32.48/drivers/block/pktcdvd.c linux-2.6.32.48/drivers/block/pktcdvd.c
28459--- linux-2.6.32.48/drivers/block/pktcdvd.c 2011-11-08 19:02:43.000000000 -0500
28460+++ linux-2.6.32.48/drivers/block/pktcdvd.c 2011-11-15 19:59:43.000000000 -0500
28461@@ -284,7 +284,7 @@ static ssize_t kobj_pkt_store(struct kob
28462 return len;
28463 }
28464
28465-static struct sysfs_ops kobj_pkt_ops = {
28466+static const struct sysfs_ops kobj_pkt_ops = {
28467 .show = kobj_pkt_show,
28468 .store = kobj_pkt_store
28469 };
28470diff -urNp linux-2.6.32.48/drivers/char/agp/frontend.c linux-2.6.32.48/drivers/char/agp/frontend.c
28471--- linux-2.6.32.48/drivers/char/agp/frontend.c 2011-11-08 19:02:43.000000000 -0500
28472+++ linux-2.6.32.48/drivers/char/agp/frontend.c 2011-11-15 19:59:43.000000000 -0500
28473@@ -824,7 +824,7 @@ static int agpioc_reserve_wrap(struct ag
28474 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
28475 return -EFAULT;
28476
28477- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
28478+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
28479 return -EFAULT;
28480
28481 client = agp_find_client_by_pid(reserve.pid);
28482diff -urNp linux-2.6.32.48/drivers/char/briq_panel.c linux-2.6.32.48/drivers/char/briq_panel.c
28483--- linux-2.6.32.48/drivers/char/briq_panel.c 2011-11-08 19:02:43.000000000 -0500
28484+++ linux-2.6.32.48/drivers/char/briq_panel.c 2011-11-15 19:59:43.000000000 -0500
28485@@ -10,6 +10,7 @@
28486 #include <linux/types.h>
28487 #include <linux/errno.h>
28488 #include <linux/tty.h>
28489+#include <linux/mutex.h>
28490 #include <linux/timer.h>
28491 #include <linux/kernel.h>
28492 #include <linux/wait.h>
28493@@ -36,6 +37,7 @@ static int vfd_is_open;
28494 static unsigned char vfd[40];
28495 static int vfd_cursor;
28496 static unsigned char ledpb, led;
28497+static DEFINE_MUTEX(vfd_mutex);
28498
28499 static void update_vfd(void)
28500 {
28501@@ -142,12 +144,15 @@ static ssize_t briq_panel_write(struct f
28502 if (!vfd_is_open)
28503 return -EBUSY;
28504
28505+ mutex_lock(&vfd_mutex);
28506 for (;;) {
28507 char c;
28508 if (!indx)
28509 break;
28510- if (get_user(c, buf))
28511+ if (get_user(c, buf)) {
28512+ mutex_unlock(&vfd_mutex);
28513 return -EFAULT;
28514+ }
28515 if (esc) {
28516 set_led(c);
28517 esc = 0;
28518@@ -177,6 +182,7 @@ static ssize_t briq_panel_write(struct f
28519 buf++;
28520 }
28521 update_vfd();
28522+ mutex_unlock(&vfd_mutex);
28523
28524 return len;
28525 }
28526diff -urNp linux-2.6.32.48/drivers/char/genrtc.c linux-2.6.32.48/drivers/char/genrtc.c
28527--- linux-2.6.32.48/drivers/char/genrtc.c 2011-11-08 19:02:43.000000000 -0500
28528+++ linux-2.6.32.48/drivers/char/genrtc.c 2011-11-15 19:59:43.000000000 -0500
28529@@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct inode *i
28530 switch (cmd) {
28531
28532 case RTC_PLL_GET:
28533+ memset(&pll, 0, sizeof(pll));
28534 if (get_rtc_pll(&pll))
28535 return -EINVAL;
28536 else
28537diff -urNp linux-2.6.32.48/drivers/char/hpet.c linux-2.6.32.48/drivers/char/hpet.c
28538--- linux-2.6.32.48/drivers/char/hpet.c 2011-11-08 19:02:43.000000000 -0500
28539+++ linux-2.6.32.48/drivers/char/hpet.c 2011-11-15 19:59:43.000000000 -0500
28540@@ -430,7 +430,7 @@ static int hpet_release(struct inode *in
28541 return 0;
28542 }
28543
28544-static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int);
28545+static int hpet_ioctl_common(struct hpet_dev *, unsigned int, unsigned long, int);
28546
28547 static int
28548 hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
28549@@ -565,7 +565,7 @@ static inline unsigned long hpet_time_di
28550 }
28551
28552 static int
28553-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
28554+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, int kernel)
28555 {
28556 struct hpet_timer __iomem *timer;
28557 struct hpet __iomem *hpet;
28558@@ -608,11 +608,11 @@ hpet_ioctl_common(struct hpet_dev *devp,
28559 {
28560 struct hpet_info info;
28561
28562+ memset(&info, 0, sizeof(info));
28563+
28564 if (devp->hd_ireqfreq)
28565 info.hi_ireqfreq =
28566 hpet_time_div(hpetp, devp->hd_ireqfreq);
28567- else
28568- info.hi_ireqfreq = 0;
28569 info.hi_flags =
28570 readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
28571 info.hi_hpet = hpetp->hp_which;
28572diff -urNp linux-2.6.32.48/drivers/char/hvc_beat.c linux-2.6.32.48/drivers/char/hvc_beat.c
28573--- linux-2.6.32.48/drivers/char/hvc_beat.c 2011-11-08 19:02:43.000000000 -0500
28574+++ linux-2.6.32.48/drivers/char/hvc_beat.c 2011-11-15 19:59:43.000000000 -0500
28575@@ -84,7 +84,7 @@ static int hvc_beat_put_chars(uint32_t v
28576 return cnt;
28577 }
28578
28579-static struct hv_ops hvc_beat_get_put_ops = {
28580+static const struct hv_ops hvc_beat_get_put_ops = {
28581 .get_chars = hvc_beat_get_chars,
28582 .put_chars = hvc_beat_put_chars,
28583 };
28584diff -urNp linux-2.6.32.48/drivers/char/hvc_console.c linux-2.6.32.48/drivers/char/hvc_console.c
28585--- linux-2.6.32.48/drivers/char/hvc_console.c 2011-11-08 19:02:43.000000000 -0500
28586+++ linux-2.6.32.48/drivers/char/hvc_console.c 2011-11-15 19:59:43.000000000 -0500
28587@@ -125,7 +125,7 @@ static struct hvc_struct *hvc_get_by_ind
28588 * console interfaces but can still be used as a tty device. This has to be
28589 * static because kmalloc will not work during early console init.
28590 */
28591-static struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
28592+static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
28593 static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] =
28594 {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1};
28595
28596@@ -249,7 +249,7 @@ static void destroy_hvc_struct(struct kr
28597 * vty adapters do NOT get an hvc_instantiate() callback since they
28598 * appear after early console init.
28599 */
28600-int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops)
28601+int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
28602 {
28603 struct hvc_struct *hp;
28604
28605@@ -758,7 +758,7 @@ static const struct tty_operations hvc_o
28606 };
28607
28608 struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
28609- struct hv_ops *ops, int outbuf_size)
28610+ const struct hv_ops *ops, int outbuf_size)
28611 {
28612 struct hvc_struct *hp;
28613 int i;
28614diff -urNp linux-2.6.32.48/drivers/char/hvc_console.h linux-2.6.32.48/drivers/char/hvc_console.h
28615--- linux-2.6.32.48/drivers/char/hvc_console.h 2011-11-08 19:02:43.000000000 -0500
28616+++ linux-2.6.32.48/drivers/char/hvc_console.h 2011-11-15 19:59:43.000000000 -0500
28617@@ -55,7 +55,7 @@ struct hvc_struct {
28618 int outbuf_size;
28619 int n_outbuf;
28620 uint32_t vtermno;
28621- struct hv_ops *ops;
28622+ const struct hv_ops *ops;
28623 int irq_requested;
28624 int data;
28625 struct winsize ws;
28626@@ -76,11 +76,11 @@ struct hv_ops {
28627 };
28628
28629 /* Register a vterm and a slot index for use as a console (console_init) */
28630-extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
28631+extern int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops);
28632
28633 /* register a vterm for hvc tty operation (module_init or hotplug add) */
28634 extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data,
28635- struct hv_ops *ops, int outbuf_size);
28636+ const struct hv_ops *ops, int outbuf_size);
28637 /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
28638 extern int hvc_remove(struct hvc_struct *hp);
28639
28640diff -urNp linux-2.6.32.48/drivers/char/hvc_iseries.c linux-2.6.32.48/drivers/char/hvc_iseries.c
28641--- linux-2.6.32.48/drivers/char/hvc_iseries.c 2011-11-08 19:02:43.000000000 -0500
28642+++ linux-2.6.32.48/drivers/char/hvc_iseries.c 2011-11-15 19:59:43.000000000 -0500
28643@@ -197,7 +197,7 @@ done:
28644 return sent;
28645 }
28646
28647-static struct hv_ops hvc_get_put_ops = {
28648+static const struct hv_ops hvc_get_put_ops = {
28649 .get_chars = get_chars,
28650 .put_chars = put_chars,
28651 .notifier_add = notifier_add_irq,
28652diff -urNp linux-2.6.32.48/drivers/char/hvc_iucv.c linux-2.6.32.48/drivers/char/hvc_iucv.c
28653--- linux-2.6.32.48/drivers/char/hvc_iucv.c 2011-11-08 19:02:43.000000000 -0500
28654+++ linux-2.6.32.48/drivers/char/hvc_iucv.c 2011-11-15 19:59:43.000000000 -0500
28655@@ -924,7 +924,7 @@ static int hvc_iucv_pm_restore_thaw(stru
28656
28657
28658 /* HVC operations */
28659-static struct hv_ops hvc_iucv_ops = {
28660+static const struct hv_ops hvc_iucv_ops = {
28661 .get_chars = hvc_iucv_get_chars,
28662 .put_chars = hvc_iucv_put_chars,
28663 .notifier_add = hvc_iucv_notifier_add,
28664diff -urNp linux-2.6.32.48/drivers/char/hvc_rtas.c linux-2.6.32.48/drivers/char/hvc_rtas.c
28665--- linux-2.6.32.48/drivers/char/hvc_rtas.c 2011-11-08 19:02:43.000000000 -0500
28666+++ linux-2.6.32.48/drivers/char/hvc_rtas.c 2011-11-15 19:59:43.000000000 -0500
28667@@ -71,7 +71,7 @@ static int hvc_rtas_read_console(uint32_
28668 return i;
28669 }
28670
28671-static struct hv_ops hvc_rtas_get_put_ops = {
28672+static const struct hv_ops hvc_rtas_get_put_ops = {
28673 .get_chars = hvc_rtas_read_console,
28674 .put_chars = hvc_rtas_write_console,
28675 };
28676diff -urNp linux-2.6.32.48/drivers/char/hvcs.c linux-2.6.32.48/drivers/char/hvcs.c
28677--- linux-2.6.32.48/drivers/char/hvcs.c 2011-11-08 19:02:43.000000000 -0500
28678+++ linux-2.6.32.48/drivers/char/hvcs.c 2011-11-15 19:59:43.000000000 -0500
28679@@ -82,6 +82,7 @@
28680 #include <asm/hvcserver.h>
28681 #include <asm/uaccess.h>
28682 #include <asm/vio.h>
28683+#include <asm/local.h>
28684
28685 /*
28686 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
28687@@ -269,7 +270,7 @@ struct hvcs_struct {
28688 unsigned int index;
28689
28690 struct tty_struct *tty;
28691- int open_count;
28692+ local_t open_count;
28693
28694 /*
28695 * Used to tell the driver kernel_thread what operations need to take
28696@@ -419,7 +420,7 @@ static ssize_t hvcs_vterm_state_store(st
28697
28698 spin_lock_irqsave(&hvcsd->lock, flags);
28699
28700- if (hvcsd->open_count > 0) {
28701+ if (local_read(&hvcsd->open_count) > 0) {
28702 spin_unlock_irqrestore(&hvcsd->lock, flags);
28703 printk(KERN_INFO "HVCS: vterm state unchanged. "
28704 "The hvcs device node is still in use.\n");
28705@@ -1135,7 +1136,7 @@ static int hvcs_open(struct tty_struct *
28706 if ((retval = hvcs_partner_connect(hvcsd)))
28707 goto error_release;
28708
28709- hvcsd->open_count = 1;
28710+ local_set(&hvcsd->open_count, 1);
28711 hvcsd->tty = tty;
28712 tty->driver_data = hvcsd;
28713
28714@@ -1169,7 +1170,7 @@ fast_open:
28715
28716 spin_lock_irqsave(&hvcsd->lock, flags);
28717 kref_get(&hvcsd->kref);
28718- hvcsd->open_count++;
28719+ local_inc(&hvcsd->open_count);
28720 hvcsd->todo_mask |= HVCS_SCHED_READ;
28721 spin_unlock_irqrestore(&hvcsd->lock, flags);
28722
28723@@ -1213,7 +1214,7 @@ static void hvcs_close(struct tty_struct
28724 hvcsd = tty->driver_data;
28725
28726 spin_lock_irqsave(&hvcsd->lock, flags);
28727- if (--hvcsd->open_count == 0) {
28728+ if (local_dec_and_test(&hvcsd->open_count)) {
28729
28730 vio_disable_interrupts(hvcsd->vdev);
28731
28732@@ -1239,10 +1240,10 @@ static void hvcs_close(struct tty_struct
28733 free_irq(irq, hvcsd);
28734 kref_put(&hvcsd->kref, destroy_hvcs_struct);
28735 return;
28736- } else if (hvcsd->open_count < 0) {
28737+ } else if (local_read(&hvcsd->open_count) < 0) {
28738 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
28739 " is missmanaged.\n",
28740- hvcsd->vdev->unit_address, hvcsd->open_count);
28741+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
28742 }
28743
28744 spin_unlock_irqrestore(&hvcsd->lock, flags);
28745@@ -1258,7 +1259,7 @@ static void hvcs_hangup(struct tty_struc
28746
28747 spin_lock_irqsave(&hvcsd->lock, flags);
28748 /* Preserve this so that we know how many kref refs to put */
28749- temp_open_count = hvcsd->open_count;
28750+ temp_open_count = local_read(&hvcsd->open_count);
28751
28752 /*
28753 * Don't kref put inside the spinlock because the destruction
28754@@ -1273,7 +1274,7 @@ static void hvcs_hangup(struct tty_struc
28755 hvcsd->tty->driver_data = NULL;
28756 hvcsd->tty = NULL;
28757
28758- hvcsd->open_count = 0;
28759+ local_set(&hvcsd->open_count, 0);
28760
28761 /* This will drop any buffered data on the floor which is OK in a hangup
28762 * scenario. */
28763@@ -1344,7 +1345,7 @@ static int hvcs_write(struct tty_struct
28764 * the middle of a write operation? This is a crummy place to do this
28765 * but we want to keep it all in the spinlock.
28766 */
28767- if (hvcsd->open_count <= 0) {
28768+ if (local_read(&hvcsd->open_count) <= 0) {
28769 spin_unlock_irqrestore(&hvcsd->lock, flags);
28770 return -ENODEV;
28771 }
28772@@ -1418,7 +1419,7 @@ static int hvcs_write_room(struct tty_st
28773 {
28774 struct hvcs_struct *hvcsd = tty->driver_data;
28775
28776- if (!hvcsd || hvcsd->open_count <= 0)
28777+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
28778 return 0;
28779
28780 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
28781diff -urNp linux-2.6.32.48/drivers/char/hvc_udbg.c linux-2.6.32.48/drivers/char/hvc_udbg.c
28782--- linux-2.6.32.48/drivers/char/hvc_udbg.c 2011-11-08 19:02:43.000000000 -0500
28783+++ linux-2.6.32.48/drivers/char/hvc_udbg.c 2011-11-15 19:59:43.000000000 -0500
28784@@ -58,7 +58,7 @@ static int hvc_udbg_get(uint32_t vtermno
28785 return i;
28786 }
28787
28788-static struct hv_ops hvc_udbg_ops = {
28789+static const struct hv_ops hvc_udbg_ops = {
28790 .get_chars = hvc_udbg_get,
28791 .put_chars = hvc_udbg_put,
28792 };
28793diff -urNp linux-2.6.32.48/drivers/char/hvc_vio.c linux-2.6.32.48/drivers/char/hvc_vio.c
28794--- linux-2.6.32.48/drivers/char/hvc_vio.c 2011-11-08 19:02:43.000000000 -0500
28795+++ linux-2.6.32.48/drivers/char/hvc_vio.c 2011-11-15 19:59:43.000000000 -0500
28796@@ -77,7 +77,7 @@ static int filtered_get_chars(uint32_t v
28797 return got;
28798 }
28799
28800-static struct hv_ops hvc_get_put_ops = {
28801+static const struct hv_ops hvc_get_put_ops = {
28802 .get_chars = filtered_get_chars,
28803 .put_chars = hvc_put_chars,
28804 .notifier_add = notifier_add_irq,
28805diff -urNp linux-2.6.32.48/drivers/char/hvc_xen.c linux-2.6.32.48/drivers/char/hvc_xen.c
28806--- linux-2.6.32.48/drivers/char/hvc_xen.c 2011-11-08 19:02:43.000000000 -0500
28807+++ linux-2.6.32.48/drivers/char/hvc_xen.c 2011-11-15 19:59:43.000000000 -0500
28808@@ -120,7 +120,7 @@ static int read_console(uint32_t vtermno
28809 return recv;
28810 }
28811
28812-static struct hv_ops hvc_ops = {
28813+static const struct hv_ops hvc_ops = {
28814 .get_chars = read_console,
28815 .put_chars = write_console,
28816 .notifier_add = notifier_add_irq,
28817diff -urNp linux-2.6.32.48/drivers/char/ipmi/ipmi_msghandler.c linux-2.6.32.48/drivers/char/ipmi/ipmi_msghandler.c
28818--- linux-2.6.32.48/drivers/char/ipmi/ipmi_msghandler.c 2011-11-08 19:02:43.000000000 -0500
28819+++ linux-2.6.32.48/drivers/char/ipmi/ipmi_msghandler.c 2011-11-15 19:59:43.000000000 -0500
28820@@ -414,7 +414,7 @@ struct ipmi_smi {
28821 struct proc_dir_entry *proc_dir;
28822 char proc_dir_name[10];
28823
28824- atomic_t stats[IPMI_NUM_STATS];
28825+ atomic_unchecked_t stats[IPMI_NUM_STATS];
28826
28827 /*
28828 * run_to_completion duplicate of smb_info, smi_info
28829@@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
28830
28831
28832 #define ipmi_inc_stat(intf, stat) \
28833- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
28834+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
28835 #define ipmi_get_stat(intf, stat) \
28836- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
28837+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
28838
28839 static int is_lan_addr(struct ipmi_addr *addr)
28840 {
28841@@ -2808,7 +2808,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
28842 INIT_LIST_HEAD(&intf->cmd_rcvrs);
28843 init_waitqueue_head(&intf->waitq);
28844 for (i = 0; i < IPMI_NUM_STATS; i++)
28845- atomic_set(&intf->stats[i], 0);
28846+ atomic_set_unchecked(&intf->stats[i], 0);
28847
28848 intf->proc_dir = NULL;
28849
28850@@ -4160,6 +4160,8 @@ static void send_panic_events(char *str)
28851 struct ipmi_smi_msg smi_msg;
28852 struct ipmi_recv_msg recv_msg;
28853
28854+ pax_track_stack();
28855+
28856 si = (struct ipmi_system_interface_addr *) &addr;
28857 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
28858 si->channel = IPMI_BMC_CHANNEL;
28859diff -urNp linux-2.6.32.48/drivers/char/ipmi/ipmi_si_intf.c linux-2.6.32.48/drivers/char/ipmi/ipmi_si_intf.c
28860--- linux-2.6.32.48/drivers/char/ipmi/ipmi_si_intf.c 2011-11-08 19:02:43.000000000 -0500
28861+++ linux-2.6.32.48/drivers/char/ipmi/ipmi_si_intf.c 2011-11-15 19:59:43.000000000 -0500
28862@@ -277,7 +277,7 @@ struct smi_info {
28863 unsigned char slave_addr;
28864
28865 /* Counters and things for the proc filesystem. */
28866- atomic_t stats[SI_NUM_STATS];
28867+ atomic_unchecked_t stats[SI_NUM_STATS];
28868
28869 struct task_struct *thread;
28870
28871@@ -285,9 +285,9 @@ struct smi_info {
28872 };
28873
28874 #define smi_inc_stat(smi, stat) \
28875- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
28876+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
28877 #define smi_get_stat(smi, stat) \
28878- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
28879+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
28880
28881 #define SI_MAX_PARMS 4
28882
28883@@ -2931,7 +2931,7 @@ static int try_smi_init(struct smi_info
28884 atomic_set(&new_smi->req_events, 0);
28885 new_smi->run_to_completion = 0;
28886 for (i = 0; i < SI_NUM_STATS; i++)
28887- atomic_set(&new_smi->stats[i], 0);
28888+ atomic_set_unchecked(&new_smi->stats[i], 0);
28889
28890 new_smi->interrupt_disabled = 0;
28891 atomic_set(&new_smi->stop_operation, 0);
28892diff -urNp linux-2.6.32.48/drivers/char/istallion.c linux-2.6.32.48/drivers/char/istallion.c
28893--- linux-2.6.32.48/drivers/char/istallion.c 2011-11-08 19:02:43.000000000 -0500
28894+++ linux-2.6.32.48/drivers/char/istallion.c 2011-11-15 19:59:43.000000000 -0500
28895@@ -187,7 +187,6 @@ static struct ktermios stli_deftermios
28896 * re-used for each stats call.
28897 */
28898 static comstats_t stli_comstats;
28899-static combrd_t stli_brdstats;
28900 static struct asystats stli_cdkstats;
28901
28902 /*****************************************************************************/
28903@@ -4058,6 +4057,7 @@ static int stli_getbrdstats(combrd_t __u
28904 {
28905 struct stlibrd *brdp;
28906 unsigned int i;
28907+ combrd_t stli_brdstats;
28908
28909 if (copy_from_user(&stli_brdstats, bp, sizeof(combrd_t)))
28910 return -EFAULT;
28911@@ -4269,6 +4269,8 @@ static int stli_getportstruct(struct stl
28912 struct stliport stli_dummyport;
28913 struct stliport *portp;
28914
28915+ pax_track_stack();
28916+
28917 if (copy_from_user(&stli_dummyport, arg, sizeof(struct stliport)))
28918 return -EFAULT;
28919 portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
28920@@ -4291,6 +4293,8 @@ static int stli_getbrdstruct(struct stli
28921 struct stlibrd stli_dummybrd;
28922 struct stlibrd *brdp;
28923
28924+ pax_track_stack();
28925+
28926 if (copy_from_user(&stli_dummybrd, arg, sizeof(struct stlibrd)))
28927 return -EFAULT;
28928 if (stli_dummybrd.brdnr >= STL_MAXBRDS)
28929diff -urNp linux-2.6.32.48/drivers/char/Kconfig linux-2.6.32.48/drivers/char/Kconfig
28930--- linux-2.6.32.48/drivers/char/Kconfig 2011-11-08 19:02:43.000000000 -0500
28931+++ linux-2.6.32.48/drivers/char/Kconfig 2011-11-15 19:59:43.000000000 -0500
28932@@ -90,7 +90,8 @@ config VT_HW_CONSOLE_BINDING
28933
28934 config DEVKMEM
28935 bool "/dev/kmem virtual device support"
28936- default y
28937+ default n
28938+ depends on !GRKERNSEC_KMEM
28939 help
28940 Say Y here if you want to support the /dev/kmem device. The
28941 /dev/kmem device is rarely used, but can be used for certain
28942@@ -1114,6 +1115,7 @@ config DEVPORT
28943 bool
28944 depends on !M68K
28945 depends on ISA || PCI
28946+ depends on !GRKERNSEC_KMEM
28947 default y
28948
28949 source "drivers/s390/char/Kconfig"
28950diff -urNp linux-2.6.32.48/drivers/char/keyboard.c linux-2.6.32.48/drivers/char/keyboard.c
28951--- linux-2.6.32.48/drivers/char/keyboard.c 2011-11-08 19:02:43.000000000 -0500
28952+++ linux-2.6.32.48/drivers/char/keyboard.c 2011-11-15 19:59:43.000000000 -0500
28953@@ -635,6 +635,16 @@ static void k_spec(struct vc_data *vc, u
28954 kbd->kbdmode == VC_MEDIUMRAW) &&
28955 value != KVAL(K_SAK))
28956 return; /* SAK is allowed even in raw mode */
28957+
28958+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
28959+ {
28960+ void *func = fn_handler[value];
28961+ if (func == fn_show_state || func == fn_show_ptregs ||
28962+ func == fn_show_mem)
28963+ return;
28964+ }
28965+#endif
28966+
28967 fn_handler[value](vc);
28968 }
28969
28970@@ -1386,7 +1396,7 @@ static const struct input_device_id kbd_
28971 .evbit = { BIT_MASK(EV_SND) },
28972 },
28973
28974- { }, /* Terminating entry */
28975+ { 0 }, /* Terminating entry */
28976 };
28977
28978 MODULE_DEVICE_TABLE(input, kbd_ids);
28979diff -urNp linux-2.6.32.48/drivers/char/mem.c linux-2.6.32.48/drivers/char/mem.c
28980--- linux-2.6.32.48/drivers/char/mem.c 2011-11-08 19:02:43.000000000 -0500
28981+++ linux-2.6.32.48/drivers/char/mem.c 2011-11-15 19:59:43.000000000 -0500
28982@@ -18,6 +18,7 @@
28983 #include <linux/raw.h>
28984 #include <linux/tty.h>
28985 #include <linux/capability.h>
28986+#include <linux/security.h>
28987 #include <linux/ptrace.h>
28988 #include <linux/device.h>
28989 #include <linux/highmem.h>
28990@@ -35,6 +36,10 @@
28991 # include <linux/efi.h>
28992 #endif
28993
28994+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
28995+extern struct file_operations grsec_fops;
28996+#endif
28997+
28998 static inline unsigned long size_inside_page(unsigned long start,
28999 unsigned long size)
29000 {
29001@@ -102,9 +107,13 @@ static inline int range_is_allowed(unsig
29002
29003 while (cursor < to) {
29004 if (!devmem_is_allowed(pfn)) {
29005+#ifdef CONFIG_GRKERNSEC_KMEM
29006+ gr_handle_mem_readwrite(from, to);
29007+#else
29008 printk(KERN_INFO
29009 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
29010 current->comm, from, to);
29011+#endif
29012 return 0;
29013 }
29014 cursor += PAGE_SIZE;
29015@@ -112,6 +121,11 @@ static inline int range_is_allowed(unsig
29016 }
29017 return 1;
29018 }
29019+#elif defined(CONFIG_GRKERNSEC_KMEM)
29020+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29021+{
29022+ return 0;
29023+}
29024 #else
29025 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29026 {
29027@@ -155,6 +169,8 @@ static ssize_t read_mem(struct file * fi
29028 #endif
29029
29030 while (count > 0) {
29031+ char *temp;
29032+
29033 /*
29034 * Handle first page in case it's not aligned
29035 */
29036@@ -177,11 +193,31 @@ static ssize_t read_mem(struct file * fi
29037 if (!ptr)
29038 return -EFAULT;
29039
29040- if (copy_to_user(buf, ptr, sz)) {
29041+#ifdef CONFIG_PAX_USERCOPY
29042+ temp = kmalloc(sz, GFP_KERNEL);
29043+ if (!temp) {
29044+ unxlate_dev_mem_ptr(p, ptr);
29045+ return -ENOMEM;
29046+ }
29047+ memcpy(temp, ptr, sz);
29048+#else
29049+ temp = ptr;
29050+#endif
29051+
29052+ if (copy_to_user(buf, temp, sz)) {
29053+
29054+#ifdef CONFIG_PAX_USERCOPY
29055+ kfree(temp);
29056+#endif
29057+
29058 unxlate_dev_mem_ptr(p, ptr);
29059 return -EFAULT;
29060 }
29061
29062+#ifdef CONFIG_PAX_USERCOPY
29063+ kfree(temp);
29064+#endif
29065+
29066 unxlate_dev_mem_ptr(p, ptr);
29067
29068 buf += sz;
29069@@ -419,9 +455,8 @@ static ssize_t read_kmem(struct file *fi
29070 size_t count, loff_t *ppos)
29071 {
29072 unsigned long p = *ppos;
29073- ssize_t low_count, read, sz;
29074+ ssize_t low_count, read, sz, err = 0;
29075 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
29076- int err = 0;
29077
29078 read = 0;
29079 if (p < (unsigned long) high_memory) {
29080@@ -444,6 +479,8 @@ static ssize_t read_kmem(struct file *fi
29081 }
29082 #endif
29083 while (low_count > 0) {
29084+ char *temp;
29085+
29086 sz = size_inside_page(p, low_count);
29087
29088 /*
29089@@ -453,7 +490,22 @@ static ssize_t read_kmem(struct file *fi
29090 */
29091 kbuf = xlate_dev_kmem_ptr((char *)p);
29092
29093- if (copy_to_user(buf, kbuf, sz))
29094+#ifdef CONFIG_PAX_USERCOPY
29095+ temp = kmalloc(sz, GFP_KERNEL);
29096+ if (!temp)
29097+ return -ENOMEM;
29098+ memcpy(temp, kbuf, sz);
29099+#else
29100+ temp = kbuf;
29101+#endif
29102+
29103+ err = copy_to_user(buf, temp, sz);
29104+
29105+#ifdef CONFIG_PAX_USERCOPY
29106+ kfree(temp);
29107+#endif
29108+
29109+ if (err)
29110 return -EFAULT;
29111 buf += sz;
29112 p += sz;
29113@@ -889,6 +941,9 @@ static const struct memdev {
29114 #ifdef CONFIG_CRASH_DUMP
29115 [12] = { "oldmem", 0, &oldmem_fops, NULL },
29116 #endif
29117+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
29118+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
29119+#endif
29120 };
29121
29122 static int memory_open(struct inode *inode, struct file *filp)
29123diff -urNp linux-2.6.32.48/drivers/char/pcmcia/ipwireless/tty.c linux-2.6.32.48/drivers/char/pcmcia/ipwireless/tty.c
29124--- linux-2.6.32.48/drivers/char/pcmcia/ipwireless/tty.c 2011-11-08 19:02:43.000000000 -0500
29125+++ linux-2.6.32.48/drivers/char/pcmcia/ipwireless/tty.c 2011-11-15 19:59:43.000000000 -0500
29126@@ -29,6 +29,7 @@
29127 #include <linux/tty_driver.h>
29128 #include <linux/tty_flip.h>
29129 #include <linux/uaccess.h>
29130+#include <asm/local.h>
29131
29132 #include "tty.h"
29133 #include "network.h"
29134@@ -51,7 +52,7 @@ struct ipw_tty {
29135 int tty_type;
29136 struct ipw_network *network;
29137 struct tty_struct *linux_tty;
29138- int open_count;
29139+ local_t open_count;
29140 unsigned int control_lines;
29141 struct mutex ipw_tty_mutex;
29142 int tx_bytes_queued;
29143@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
29144 mutex_unlock(&tty->ipw_tty_mutex);
29145 return -ENODEV;
29146 }
29147- if (tty->open_count == 0)
29148+ if (local_read(&tty->open_count) == 0)
29149 tty->tx_bytes_queued = 0;
29150
29151- tty->open_count++;
29152+ local_inc(&tty->open_count);
29153
29154 tty->linux_tty = linux_tty;
29155 linux_tty->driver_data = tty;
29156@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
29157
29158 static void do_ipw_close(struct ipw_tty *tty)
29159 {
29160- tty->open_count--;
29161-
29162- if (tty->open_count == 0) {
29163+ if (local_dec_return(&tty->open_count) == 0) {
29164 struct tty_struct *linux_tty = tty->linux_tty;
29165
29166 if (linux_tty != NULL) {
29167@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
29168 return;
29169
29170 mutex_lock(&tty->ipw_tty_mutex);
29171- if (tty->open_count == 0) {
29172+ if (local_read(&tty->open_count) == 0) {
29173 mutex_unlock(&tty->ipw_tty_mutex);
29174 return;
29175 }
29176@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
29177 return;
29178 }
29179
29180- if (!tty->open_count) {
29181+ if (!local_read(&tty->open_count)) {
29182 mutex_unlock(&tty->ipw_tty_mutex);
29183 return;
29184 }
29185@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
29186 return -ENODEV;
29187
29188 mutex_lock(&tty->ipw_tty_mutex);
29189- if (!tty->open_count) {
29190+ if (!local_read(&tty->open_count)) {
29191 mutex_unlock(&tty->ipw_tty_mutex);
29192 return -EINVAL;
29193 }
29194@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
29195 if (!tty)
29196 return -ENODEV;
29197
29198- if (!tty->open_count)
29199+ if (!local_read(&tty->open_count))
29200 return -EINVAL;
29201
29202 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
29203@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
29204 if (!tty)
29205 return 0;
29206
29207- if (!tty->open_count)
29208+ if (!local_read(&tty->open_count))
29209 return 0;
29210
29211 return tty->tx_bytes_queued;
29212@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
29213 if (!tty)
29214 return -ENODEV;
29215
29216- if (!tty->open_count)
29217+ if (!local_read(&tty->open_count))
29218 return -EINVAL;
29219
29220 return get_control_lines(tty);
29221@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
29222 if (!tty)
29223 return -ENODEV;
29224
29225- if (!tty->open_count)
29226+ if (!local_read(&tty->open_count))
29227 return -EINVAL;
29228
29229 return set_control_lines(tty, set, clear);
29230@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
29231 if (!tty)
29232 return -ENODEV;
29233
29234- if (!tty->open_count)
29235+ if (!local_read(&tty->open_count))
29236 return -EINVAL;
29237
29238 /* FIXME: Exactly how is the tty object locked here .. */
29239@@ -591,7 +590,7 @@ void ipwireless_tty_free(struct ipw_tty
29240 against a parallel ioctl etc */
29241 mutex_lock(&ttyj->ipw_tty_mutex);
29242 }
29243- while (ttyj->open_count)
29244+ while (local_read(&ttyj->open_count))
29245 do_ipw_close(ttyj);
29246 ipwireless_disassociate_network_ttys(network,
29247 ttyj->channel_idx);
29248diff -urNp linux-2.6.32.48/drivers/char/pty.c linux-2.6.32.48/drivers/char/pty.c
29249--- linux-2.6.32.48/drivers/char/pty.c 2011-11-08 19:02:43.000000000 -0500
29250+++ linux-2.6.32.48/drivers/char/pty.c 2011-11-15 19:59:43.000000000 -0500
29251@@ -736,8 +736,10 @@ static void __init unix98_pty_init(void)
29252 register_sysctl_table(pty_root_table);
29253
29254 /* Now create the /dev/ptmx special device */
29255+ pax_open_kernel();
29256 tty_default_fops(&ptmx_fops);
29257- ptmx_fops.open = ptmx_open;
29258+ *(void **)&ptmx_fops.open = ptmx_open;
29259+ pax_close_kernel();
29260
29261 cdev_init(&ptmx_cdev, &ptmx_fops);
29262 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
29263diff -urNp linux-2.6.32.48/drivers/char/random.c linux-2.6.32.48/drivers/char/random.c
29264--- linux-2.6.32.48/drivers/char/random.c 2011-11-08 19:02:43.000000000 -0500
29265+++ linux-2.6.32.48/drivers/char/random.c 2011-11-15 19:59:43.000000000 -0500
29266@@ -254,8 +254,13 @@
29267 /*
29268 * Configuration information
29269 */
29270+#ifdef CONFIG_GRKERNSEC_RANDNET
29271+#define INPUT_POOL_WORDS 512
29272+#define OUTPUT_POOL_WORDS 128
29273+#else
29274 #define INPUT_POOL_WORDS 128
29275 #define OUTPUT_POOL_WORDS 32
29276+#endif
29277 #define SEC_XFER_SIZE 512
29278
29279 /*
29280@@ -292,10 +297,17 @@ static struct poolinfo {
29281 int poolwords;
29282 int tap1, tap2, tap3, tap4, tap5;
29283 } poolinfo_table[] = {
29284+#ifdef CONFIG_GRKERNSEC_RANDNET
29285+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
29286+ { 512, 411, 308, 208, 104, 1 },
29287+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
29288+ { 128, 103, 76, 51, 25, 1 },
29289+#else
29290 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
29291 { 128, 103, 76, 51, 25, 1 },
29292 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
29293 { 32, 26, 20, 14, 7, 1 },
29294+#endif
29295 #if 0
29296 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
29297 { 2048, 1638, 1231, 819, 411, 1 },
29298@@ -1209,7 +1221,7 @@ EXPORT_SYMBOL(generate_random_uuid);
29299 #include <linux/sysctl.h>
29300
29301 static int min_read_thresh = 8, min_write_thresh;
29302-static int max_read_thresh = INPUT_POOL_WORDS * 32;
29303+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
29304 static int max_write_thresh = INPUT_POOL_WORDS * 32;
29305 static char sysctl_bootid[16];
29306
29307diff -urNp linux-2.6.32.48/drivers/char/rocket.c linux-2.6.32.48/drivers/char/rocket.c
29308--- linux-2.6.32.48/drivers/char/rocket.c 2011-11-08 19:02:43.000000000 -0500
29309+++ linux-2.6.32.48/drivers/char/rocket.c 2011-11-15 19:59:43.000000000 -0500
29310@@ -1266,6 +1266,8 @@ static int get_ports(struct r_port *info
29311 struct rocket_ports tmp;
29312 int board;
29313
29314+ pax_track_stack();
29315+
29316 if (!retports)
29317 return -EFAULT;
29318 memset(&tmp, 0, sizeof (tmp));
29319diff -urNp linux-2.6.32.48/drivers/char/sonypi.c linux-2.6.32.48/drivers/char/sonypi.c
29320--- linux-2.6.32.48/drivers/char/sonypi.c 2011-11-08 19:02:43.000000000 -0500
29321+++ linux-2.6.32.48/drivers/char/sonypi.c 2011-11-15 19:59:43.000000000 -0500
29322@@ -55,6 +55,7 @@
29323 #include <asm/uaccess.h>
29324 #include <asm/io.h>
29325 #include <asm/system.h>
29326+#include <asm/local.h>
29327
29328 #include <linux/sonypi.h>
29329
29330@@ -491,7 +492,7 @@ static struct sonypi_device {
29331 spinlock_t fifo_lock;
29332 wait_queue_head_t fifo_proc_list;
29333 struct fasync_struct *fifo_async;
29334- int open_count;
29335+ local_t open_count;
29336 int model;
29337 struct input_dev *input_jog_dev;
29338 struct input_dev *input_key_dev;
29339@@ -895,7 +896,7 @@ static int sonypi_misc_fasync(int fd, st
29340 static int sonypi_misc_release(struct inode *inode, struct file *file)
29341 {
29342 mutex_lock(&sonypi_device.lock);
29343- sonypi_device.open_count--;
29344+ local_dec(&sonypi_device.open_count);
29345 mutex_unlock(&sonypi_device.lock);
29346 return 0;
29347 }
29348@@ -905,9 +906,9 @@ static int sonypi_misc_open(struct inode
29349 lock_kernel();
29350 mutex_lock(&sonypi_device.lock);
29351 /* Flush input queue on first open */
29352- if (!sonypi_device.open_count)
29353+ if (!local_read(&sonypi_device.open_count))
29354 kfifo_reset(sonypi_device.fifo);
29355- sonypi_device.open_count++;
29356+ local_inc(&sonypi_device.open_count);
29357 mutex_unlock(&sonypi_device.lock);
29358 unlock_kernel();
29359 return 0;
29360diff -urNp linux-2.6.32.48/drivers/char/stallion.c linux-2.6.32.48/drivers/char/stallion.c
29361--- linux-2.6.32.48/drivers/char/stallion.c 2011-11-08 19:02:43.000000000 -0500
29362+++ linux-2.6.32.48/drivers/char/stallion.c 2011-11-15 19:59:43.000000000 -0500
29363@@ -2448,6 +2448,8 @@ static int stl_getportstruct(struct stlp
29364 struct stlport stl_dummyport;
29365 struct stlport *portp;
29366
29367+ pax_track_stack();
29368+
29369 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
29370 return -EFAULT;
29371 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
29372diff -urNp linux-2.6.32.48/drivers/char/tpm/tpm_bios.c linux-2.6.32.48/drivers/char/tpm/tpm_bios.c
29373--- linux-2.6.32.48/drivers/char/tpm/tpm_bios.c 2011-11-08 19:02:43.000000000 -0500
29374+++ linux-2.6.32.48/drivers/char/tpm/tpm_bios.c 2011-11-15 19:59:43.000000000 -0500
29375@@ -172,7 +172,7 @@ static void *tpm_bios_measurements_start
29376 event = addr;
29377
29378 if ((event->event_type == 0 && event->event_size == 0) ||
29379- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
29380+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
29381 return NULL;
29382
29383 return addr;
29384@@ -197,7 +197,7 @@ static void *tpm_bios_measurements_next(
29385 return NULL;
29386
29387 if ((event->event_type == 0 && event->event_size == 0) ||
29388- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
29389+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
29390 return NULL;
29391
29392 (*pos)++;
29393@@ -290,7 +290,8 @@ static int tpm_binary_bios_measurements_
29394 int i;
29395
29396 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
29397- seq_putc(m, data[i]);
29398+ if (!seq_putc(m, data[i]))
29399+ return -EFAULT;
29400
29401 return 0;
29402 }
29403@@ -409,8 +410,13 @@ static int read_log(struct tpm_bios_log
29404 log->bios_event_log_end = log->bios_event_log + len;
29405
29406 virt = acpi_os_map_memory(start, len);
29407+ if (!virt) {
29408+ kfree(log->bios_event_log);
29409+ log->bios_event_log = NULL;
29410+ return -EFAULT;
29411+ }
29412
29413- memcpy(log->bios_event_log, virt, len);
29414+ memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
29415
29416 acpi_os_unmap_memory(virt, len);
29417 return 0;
29418diff -urNp linux-2.6.32.48/drivers/char/tpm/tpm.c linux-2.6.32.48/drivers/char/tpm/tpm.c
29419--- linux-2.6.32.48/drivers/char/tpm/tpm.c 2011-11-08 19:02:43.000000000 -0500
29420+++ linux-2.6.32.48/drivers/char/tpm/tpm.c 2011-11-15 19:59:43.000000000 -0500
29421@@ -405,7 +405,7 @@ static ssize_t tpm_transmit(struct tpm_c
29422 chip->vendor.req_complete_val)
29423 goto out_recv;
29424
29425- if ((status == chip->vendor.req_canceled)) {
29426+ if (status == chip->vendor.req_canceled) {
29427 dev_err(chip->dev, "Operation Canceled\n");
29428 rc = -ECANCELED;
29429 goto out;
29430@@ -824,6 +824,8 @@ ssize_t tpm_show_pubek(struct device *de
29431
29432 struct tpm_chip *chip = dev_get_drvdata(dev);
29433
29434+ pax_track_stack();
29435+
29436 tpm_cmd.header.in = tpm_readpubek_header;
29437 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
29438 "attempting to read the PUBEK");
29439diff -urNp linux-2.6.32.48/drivers/char/tty_io.c linux-2.6.32.48/drivers/char/tty_io.c
29440--- linux-2.6.32.48/drivers/char/tty_io.c 2011-11-08 19:02:43.000000000 -0500
29441+++ linux-2.6.32.48/drivers/char/tty_io.c 2011-11-15 19:59:43.000000000 -0500
29442@@ -1773,6 +1773,7 @@ got_driver:
29443
29444 if (IS_ERR(tty)) {
29445 mutex_unlock(&tty_mutex);
29446+ tty_driver_kref_put(driver);
29447 return PTR_ERR(tty);
29448 }
29449 }
29450@@ -2582,8 +2583,10 @@ long tty_ioctl(struct file *file, unsign
29451 return retval;
29452 }
29453
29454+EXPORT_SYMBOL(tty_ioctl);
29455+
29456 #ifdef CONFIG_COMPAT
29457-static long tty_compat_ioctl(struct file *file, unsigned int cmd,
29458+long tty_compat_ioctl(struct file *file, unsigned int cmd,
29459 unsigned long arg)
29460 {
29461 struct inode *inode = file->f_dentry->d_inode;
29462@@ -2607,6 +2610,8 @@ static long tty_compat_ioctl(struct file
29463
29464 return retval;
29465 }
29466+
29467+EXPORT_SYMBOL(tty_compat_ioctl);
29468 #endif
29469
29470 /*
29471@@ -3052,7 +3057,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
29472
29473 void tty_default_fops(struct file_operations *fops)
29474 {
29475- *fops = tty_fops;
29476+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
29477 }
29478
29479 /*
29480diff -urNp linux-2.6.32.48/drivers/char/tty_ldisc.c linux-2.6.32.48/drivers/char/tty_ldisc.c
29481--- linux-2.6.32.48/drivers/char/tty_ldisc.c 2011-11-08 19:02:43.000000000 -0500
29482+++ linux-2.6.32.48/drivers/char/tty_ldisc.c 2011-11-15 19:59:43.000000000 -0500
29483@@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *
29484 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
29485 struct tty_ldisc_ops *ldo = ld->ops;
29486
29487- ldo->refcount--;
29488+ atomic_dec(&ldo->refcount);
29489 module_put(ldo->owner);
29490 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
29491
29492@@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
29493 spin_lock_irqsave(&tty_ldisc_lock, flags);
29494 tty_ldiscs[disc] = new_ldisc;
29495 new_ldisc->num = disc;
29496- new_ldisc->refcount = 0;
29497+ atomic_set(&new_ldisc->refcount, 0);
29498 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
29499
29500 return ret;
29501@@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
29502 return -EINVAL;
29503
29504 spin_lock_irqsave(&tty_ldisc_lock, flags);
29505- if (tty_ldiscs[disc]->refcount)
29506+ if (atomic_read(&tty_ldiscs[disc]->refcount))
29507 ret = -EBUSY;
29508 else
29509 tty_ldiscs[disc] = NULL;
29510@@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
29511 if (ldops) {
29512 ret = ERR_PTR(-EAGAIN);
29513 if (try_module_get(ldops->owner)) {
29514- ldops->refcount++;
29515+ atomic_inc(&ldops->refcount);
29516 ret = ldops;
29517 }
29518 }
29519@@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
29520 unsigned long flags;
29521
29522 spin_lock_irqsave(&tty_ldisc_lock, flags);
29523- ldops->refcount--;
29524+ atomic_dec(&ldops->refcount);
29525 module_put(ldops->owner);
29526 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
29527 }
29528diff -urNp linux-2.6.32.48/drivers/char/virtio_console.c linux-2.6.32.48/drivers/char/virtio_console.c
29529--- linux-2.6.32.48/drivers/char/virtio_console.c 2011-11-08 19:02:43.000000000 -0500
29530+++ linux-2.6.32.48/drivers/char/virtio_console.c 2011-11-15 19:59:43.000000000 -0500
29531@@ -133,7 +133,9 @@ static int get_chars(u32 vtermno, char *
29532 * virtqueue, so we let the drivers do some boutique early-output thing. */
29533 int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
29534 {
29535- virtio_cons.put_chars = put_chars;
29536+ pax_open_kernel();
29537+ *(void **)&virtio_cons.put_chars = put_chars;
29538+ pax_close_kernel();
29539 return hvc_instantiate(0, 0, &virtio_cons);
29540 }
29541
29542@@ -213,11 +215,13 @@ static int __devinit virtcons_probe(stru
29543 out_vq = vqs[1];
29544
29545 /* Start using the new console output. */
29546- virtio_cons.get_chars = get_chars;
29547- virtio_cons.put_chars = put_chars;
29548- virtio_cons.notifier_add = notifier_add_vio;
29549- virtio_cons.notifier_del = notifier_del_vio;
29550- virtio_cons.notifier_hangup = notifier_del_vio;
29551+ pax_open_kernel();
29552+ *(void **)&virtio_cons.get_chars = get_chars;
29553+ *(void **)&virtio_cons.put_chars = put_chars;
29554+ *(void **)&virtio_cons.notifier_add = notifier_add_vio;
29555+ *(void **)&virtio_cons.notifier_del = notifier_del_vio;
29556+ *(void **)&virtio_cons.notifier_hangup = notifier_del_vio;
29557+ pax_close_kernel();
29558
29559 /* The first argument of hvc_alloc() is the virtual console number, so
29560 * we use zero. The second argument is the parameter for the
29561diff -urNp linux-2.6.32.48/drivers/char/vt.c linux-2.6.32.48/drivers/char/vt.c
29562--- linux-2.6.32.48/drivers/char/vt.c 2011-11-08 19:02:43.000000000 -0500
29563+++ linux-2.6.32.48/drivers/char/vt.c 2011-11-15 19:59:43.000000000 -0500
29564@@ -243,7 +243,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
29565
29566 static void notify_write(struct vc_data *vc, unsigned int unicode)
29567 {
29568- struct vt_notifier_param param = { .vc = vc, unicode = unicode };
29569+ struct vt_notifier_param param = { .vc = vc, .c = unicode };
29570 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
29571 }
29572
29573diff -urNp linux-2.6.32.48/drivers/char/vt_ioctl.c linux-2.6.32.48/drivers/char/vt_ioctl.c
29574--- linux-2.6.32.48/drivers/char/vt_ioctl.c 2011-11-08 19:02:43.000000000 -0500
29575+++ linux-2.6.32.48/drivers/char/vt_ioctl.c 2011-11-15 19:59:43.000000000 -0500
29576@@ -210,9 +210,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
29577 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
29578 return -EFAULT;
29579
29580- if (!capable(CAP_SYS_TTY_CONFIG))
29581- perm = 0;
29582-
29583 switch (cmd) {
29584 case KDGKBENT:
29585 key_map = key_maps[s];
29586@@ -224,8 +221,12 @@ do_kdsk_ioctl(int cmd, struct kbentry __
29587 val = (i ? K_HOLE : K_NOSUCHMAP);
29588 return put_user(val, &user_kbe->kb_value);
29589 case KDSKBENT:
29590+ if (!capable(CAP_SYS_TTY_CONFIG))
29591+ perm = 0;
29592+
29593 if (!perm)
29594 return -EPERM;
29595+
29596 if (!i && v == K_NOSUCHMAP) {
29597 /* deallocate map */
29598 key_map = key_maps[s];
29599@@ -325,9 +326,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
29600 int i, j, k;
29601 int ret;
29602
29603- if (!capable(CAP_SYS_TTY_CONFIG))
29604- perm = 0;
29605-
29606 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
29607 if (!kbs) {
29608 ret = -ENOMEM;
29609@@ -361,6 +359,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
29610 kfree(kbs);
29611 return ((p && *p) ? -EOVERFLOW : 0);
29612 case KDSKBSENT:
29613+ if (!capable(CAP_SYS_TTY_CONFIG))
29614+ perm = 0;
29615+
29616 if (!perm) {
29617 ret = -EPERM;
29618 goto reterr;
29619diff -urNp linux-2.6.32.48/drivers/cpufreq/cpufreq.c linux-2.6.32.48/drivers/cpufreq/cpufreq.c
29620--- linux-2.6.32.48/drivers/cpufreq/cpufreq.c 2011-11-08 19:02:43.000000000 -0500
29621+++ linux-2.6.32.48/drivers/cpufreq/cpufreq.c 2011-11-15 19:59:43.000000000 -0500
29622@@ -750,7 +750,7 @@ static void cpufreq_sysfs_release(struct
29623 complete(&policy->kobj_unregister);
29624 }
29625
29626-static struct sysfs_ops sysfs_ops = {
29627+static const struct sysfs_ops sysfs_ops = {
29628 .show = show,
29629 .store = store,
29630 };
29631diff -urNp linux-2.6.32.48/drivers/cpuidle/sysfs.c linux-2.6.32.48/drivers/cpuidle/sysfs.c
29632--- linux-2.6.32.48/drivers/cpuidle/sysfs.c 2011-11-08 19:02:43.000000000 -0500
29633+++ linux-2.6.32.48/drivers/cpuidle/sysfs.c 2011-11-15 19:59:43.000000000 -0500
29634@@ -191,7 +191,7 @@ static ssize_t cpuidle_store(struct kobj
29635 return ret;
29636 }
29637
29638-static struct sysfs_ops cpuidle_sysfs_ops = {
29639+static const struct sysfs_ops cpuidle_sysfs_ops = {
29640 .show = cpuidle_show,
29641 .store = cpuidle_store,
29642 };
29643@@ -277,7 +277,7 @@ static ssize_t cpuidle_state_show(struct
29644 return ret;
29645 }
29646
29647-static struct sysfs_ops cpuidle_state_sysfs_ops = {
29648+static const struct sysfs_ops cpuidle_state_sysfs_ops = {
29649 .show = cpuidle_state_show,
29650 };
29651
29652@@ -294,7 +294,7 @@ static struct kobj_type ktype_state_cpui
29653 .release = cpuidle_state_sysfs_release,
29654 };
29655
29656-static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
29657+static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
29658 {
29659 kobject_put(&device->kobjs[i]->kobj);
29660 wait_for_completion(&device->kobjs[i]->kobj_unregister);
29661diff -urNp linux-2.6.32.48/drivers/crypto/hifn_795x.c linux-2.6.32.48/drivers/crypto/hifn_795x.c
29662--- linux-2.6.32.48/drivers/crypto/hifn_795x.c 2011-11-08 19:02:43.000000000 -0500
29663+++ linux-2.6.32.48/drivers/crypto/hifn_795x.c 2011-11-15 19:59:43.000000000 -0500
29664@@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device
29665 0xCA, 0x34, 0x2B, 0x2E};
29666 struct scatterlist sg;
29667
29668+ pax_track_stack();
29669+
29670 memset(src, 0, sizeof(src));
29671 memset(ctx.key, 0, sizeof(ctx.key));
29672
29673diff -urNp linux-2.6.32.48/drivers/crypto/padlock-aes.c linux-2.6.32.48/drivers/crypto/padlock-aes.c
29674--- linux-2.6.32.48/drivers/crypto/padlock-aes.c 2011-11-08 19:02:43.000000000 -0500
29675+++ linux-2.6.32.48/drivers/crypto/padlock-aes.c 2011-11-15 19:59:43.000000000 -0500
29676@@ -108,6 +108,8 @@ static int aes_set_key(struct crypto_tfm
29677 struct crypto_aes_ctx gen_aes;
29678 int cpu;
29679
29680+ pax_track_stack();
29681+
29682 if (key_len % 8) {
29683 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
29684 return -EINVAL;
29685diff -urNp linux-2.6.32.48/drivers/dma/ioat/dma.c linux-2.6.32.48/drivers/dma/ioat/dma.c
29686--- linux-2.6.32.48/drivers/dma/ioat/dma.c 2011-11-08 19:02:43.000000000 -0500
29687+++ linux-2.6.32.48/drivers/dma/ioat/dma.c 2011-11-15 19:59:43.000000000 -0500
29688@@ -1146,7 +1146,7 @@ ioat_attr_show(struct kobject *kobj, str
29689 return entry->show(&chan->common, page);
29690 }
29691
29692-struct sysfs_ops ioat_sysfs_ops = {
29693+const struct sysfs_ops ioat_sysfs_ops = {
29694 .show = ioat_attr_show,
29695 };
29696
29697diff -urNp linux-2.6.32.48/drivers/dma/ioat/dma.h linux-2.6.32.48/drivers/dma/ioat/dma.h
29698--- linux-2.6.32.48/drivers/dma/ioat/dma.h 2011-11-08 19:02:43.000000000 -0500
29699+++ linux-2.6.32.48/drivers/dma/ioat/dma.h 2011-11-15 19:59:43.000000000 -0500
29700@@ -347,7 +347,7 @@ bool ioat_cleanup_preamble(struct ioat_c
29701 unsigned long *phys_complete);
29702 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
29703 void ioat_kobject_del(struct ioatdma_device *device);
29704-extern struct sysfs_ops ioat_sysfs_ops;
29705+extern const struct sysfs_ops ioat_sysfs_ops;
29706 extern struct ioat_sysfs_entry ioat_version_attr;
29707 extern struct ioat_sysfs_entry ioat_cap_attr;
29708 #endif /* IOATDMA_H */
29709diff -urNp linux-2.6.32.48/drivers/edac/edac_device_sysfs.c linux-2.6.32.48/drivers/edac/edac_device_sysfs.c
29710--- linux-2.6.32.48/drivers/edac/edac_device_sysfs.c 2011-11-08 19:02:43.000000000 -0500
29711+++ linux-2.6.32.48/drivers/edac/edac_device_sysfs.c 2011-11-15 19:59:43.000000000 -0500
29712@@ -137,7 +137,7 @@ static ssize_t edac_dev_ctl_info_store(s
29713 }
29714
29715 /* edac_dev file operations for an 'ctl_info' */
29716-static struct sysfs_ops device_ctl_info_ops = {
29717+static const struct sysfs_ops device_ctl_info_ops = {
29718 .show = edac_dev_ctl_info_show,
29719 .store = edac_dev_ctl_info_store
29720 };
29721@@ -373,7 +373,7 @@ static ssize_t edac_dev_instance_store(s
29722 }
29723
29724 /* edac_dev file operations for an 'instance' */
29725-static struct sysfs_ops device_instance_ops = {
29726+static const struct sysfs_ops device_instance_ops = {
29727 .show = edac_dev_instance_show,
29728 .store = edac_dev_instance_store
29729 };
29730@@ -476,7 +476,7 @@ static ssize_t edac_dev_block_store(stru
29731 }
29732
29733 /* edac_dev file operations for a 'block' */
29734-static struct sysfs_ops device_block_ops = {
29735+static const struct sysfs_ops device_block_ops = {
29736 .show = edac_dev_block_show,
29737 .store = edac_dev_block_store
29738 };
29739diff -urNp linux-2.6.32.48/drivers/edac/edac_mc_sysfs.c linux-2.6.32.48/drivers/edac/edac_mc_sysfs.c
29740--- linux-2.6.32.48/drivers/edac/edac_mc_sysfs.c 2011-11-08 19:02:43.000000000 -0500
29741+++ linux-2.6.32.48/drivers/edac/edac_mc_sysfs.c 2011-11-15 19:59:43.000000000 -0500
29742@@ -245,7 +245,7 @@ static ssize_t csrowdev_store(struct kob
29743 return -EIO;
29744 }
29745
29746-static struct sysfs_ops csrowfs_ops = {
29747+static const struct sysfs_ops csrowfs_ops = {
29748 .show = csrowdev_show,
29749 .store = csrowdev_store
29750 };
29751@@ -575,7 +575,7 @@ static ssize_t mcidev_store(struct kobje
29752 }
29753
29754 /* Intermediate show/store table */
29755-static struct sysfs_ops mci_ops = {
29756+static const struct sysfs_ops mci_ops = {
29757 .show = mcidev_show,
29758 .store = mcidev_store
29759 };
29760diff -urNp linux-2.6.32.48/drivers/edac/edac_pci_sysfs.c linux-2.6.32.48/drivers/edac/edac_pci_sysfs.c
29761--- linux-2.6.32.48/drivers/edac/edac_pci_sysfs.c 2011-11-08 19:02:43.000000000 -0500
29762+++ linux-2.6.32.48/drivers/edac/edac_pci_sysfs.c 2011-11-15 19:59:43.000000000 -0500
29763@@ -25,8 +25,8 @@ static int edac_pci_log_pe = 1; /* log
29764 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
29765 static int edac_pci_poll_msec = 1000; /* one second workq period */
29766
29767-static atomic_t pci_parity_count = ATOMIC_INIT(0);
29768-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
29769+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
29770+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
29771
29772 static struct kobject *edac_pci_top_main_kobj;
29773 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
29774@@ -121,7 +121,7 @@ static ssize_t edac_pci_instance_store(s
29775 }
29776
29777 /* fs_ops table */
29778-static struct sysfs_ops pci_instance_ops = {
29779+static const struct sysfs_ops pci_instance_ops = {
29780 .show = edac_pci_instance_show,
29781 .store = edac_pci_instance_store
29782 };
29783@@ -261,7 +261,7 @@ static ssize_t edac_pci_dev_store(struct
29784 return -EIO;
29785 }
29786
29787-static struct sysfs_ops edac_pci_sysfs_ops = {
29788+static const struct sysfs_ops edac_pci_sysfs_ops = {
29789 .show = edac_pci_dev_show,
29790 .store = edac_pci_dev_store
29791 };
29792@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(str
29793 edac_printk(KERN_CRIT, EDAC_PCI,
29794 "Signaled System Error on %s\n",
29795 pci_name(dev));
29796- atomic_inc(&pci_nonparity_count);
29797+ atomic_inc_unchecked(&pci_nonparity_count);
29798 }
29799
29800 if (status & (PCI_STATUS_PARITY)) {
29801@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(str
29802 "Master Data Parity Error on %s\n",
29803 pci_name(dev));
29804
29805- atomic_inc(&pci_parity_count);
29806+ atomic_inc_unchecked(&pci_parity_count);
29807 }
29808
29809 if (status & (PCI_STATUS_DETECTED_PARITY)) {
29810@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(str
29811 "Detected Parity Error on %s\n",
29812 pci_name(dev));
29813
29814- atomic_inc(&pci_parity_count);
29815+ atomic_inc_unchecked(&pci_parity_count);
29816 }
29817 }
29818
29819@@ -616,7 +616,7 @@ static void edac_pci_dev_parity_test(str
29820 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
29821 "Signaled System Error on %s\n",
29822 pci_name(dev));
29823- atomic_inc(&pci_nonparity_count);
29824+ atomic_inc_unchecked(&pci_nonparity_count);
29825 }
29826
29827 if (status & (PCI_STATUS_PARITY)) {
29828@@ -624,7 +624,7 @@ static void edac_pci_dev_parity_test(str
29829 "Master Data Parity Error on "
29830 "%s\n", pci_name(dev));
29831
29832- atomic_inc(&pci_parity_count);
29833+ atomic_inc_unchecked(&pci_parity_count);
29834 }
29835
29836 if (status & (PCI_STATUS_DETECTED_PARITY)) {
29837@@ -632,7 +632,7 @@ static void edac_pci_dev_parity_test(str
29838 "Detected Parity Error on %s\n",
29839 pci_name(dev));
29840
29841- atomic_inc(&pci_parity_count);
29842+ atomic_inc_unchecked(&pci_parity_count);
29843 }
29844 }
29845 }
29846@@ -674,7 +674,7 @@ void edac_pci_do_parity_check(void)
29847 if (!check_pci_errors)
29848 return;
29849
29850- before_count = atomic_read(&pci_parity_count);
29851+ before_count = atomic_read_unchecked(&pci_parity_count);
29852
29853 /* scan all PCI devices looking for a Parity Error on devices and
29854 * bridges.
29855@@ -686,7 +686,7 @@ void edac_pci_do_parity_check(void)
29856 /* Only if operator has selected panic on PCI Error */
29857 if (edac_pci_get_panic_on_pe()) {
29858 /* If the count is different 'after' from 'before' */
29859- if (before_count != atomic_read(&pci_parity_count))
29860+ if (before_count != atomic_read_unchecked(&pci_parity_count))
29861 panic("EDAC: PCI Parity Error");
29862 }
29863 }
29864diff -urNp linux-2.6.32.48/drivers/firewire/core-card.c linux-2.6.32.48/drivers/firewire/core-card.c
29865--- linux-2.6.32.48/drivers/firewire/core-card.c 2011-11-08 19:02:43.000000000 -0500
29866+++ linux-2.6.32.48/drivers/firewire/core-card.c 2011-11-15 19:59:43.000000000 -0500
29867@@ -558,7 +558,7 @@ void fw_card_release(struct kref *kref)
29868
29869 void fw_core_remove_card(struct fw_card *card)
29870 {
29871- struct fw_card_driver dummy_driver = dummy_driver_template;
29872+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
29873
29874 card->driver->update_phy_reg(card, 4,
29875 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
29876diff -urNp linux-2.6.32.48/drivers/firewire/core-cdev.c linux-2.6.32.48/drivers/firewire/core-cdev.c
29877--- linux-2.6.32.48/drivers/firewire/core-cdev.c 2011-11-08 19:02:43.000000000 -0500
29878+++ linux-2.6.32.48/drivers/firewire/core-cdev.c 2011-11-15 19:59:43.000000000 -0500
29879@@ -1141,8 +1141,7 @@ static int init_iso_resource(struct clie
29880 int ret;
29881
29882 if ((request->channels == 0 && request->bandwidth == 0) ||
29883- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
29884- request->bandwidth < 0)
29885+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
29886 return -EINVAL;
29887
29888 r = kmalloc(sizeof(*r), GFP_KERNEL);
29889diff -urNp linux-2.6.32.48/drivers/firewire/core.h linux-2.6.32.48/drivers/firewire/core.h
29890--- linux-2.6.32.48/drivers/firewire/core.h 2011-11-08 19:02:43.000000000 -0500
29891+++ linux-2.6.32.48/drivers/firewire/core.h 2011-11-15 19:59:43.000000000 -0500
29892@@ -86,6 +86,7 @@ struct fw_card_driver {
29893
29894 int (*stop_iso)(struct fw_iso_context *ctx);
29895 };
29896+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
29897
29898 void fw_card_initialize(struct fw_card *card,
29899 const struct fw_card_driver *driver, struct device *device);
29900diff -urNp linux-2.6.32.48/drivers/firewire/core-transaction.c linux-2.6.32.48/drivers/firewire/core-transaction.c
29901--- linux-2.6.32.48/drivers/firewire/core-transaction.c 2011-11-08 19:02:43.000000000 -0500
29902+++ linux-2.6.32.48/drivers/firewire/core-transaction.c 2011-11-15 19:59:43.000000000 -0500
29903@@ -36,6 +36,7 @@
29904 #include <linux/string.h>
29905 #include <linux/timer.h>
29906 #include <linux/types.h>
29907+#include <linux/sched.h>
29908
29909 #include <asm/byteorder.h>
29910
29911@@ -344,6 +345,8 @@ int fw_run_transaction(struct fw_card *c
29912 struct transaction_callback_data d;
29913 struct fw_transaction t;
29914
29915+ pax_track_stack();
29916+
29917 init_completion(&d.done);
29918 d.payload = payload;
29919 fw_send_request(card, &t, tcode, destination_id, generation, speed,
29920diff -urNp linux-2.6.32.48/drivers/firmware/dmi_scan.c linux-2.6.32.48/drivers/firmware/dmi_scan.c
29921--- linux-2.6.32.48/drivers/firmware/dmi_scan.c 2011-11-08 19:02:43.000000000 -0500
29922+++ linux-2.6.32.48/drivers/firmware/dmi_scan.c 2011-11-15 19:59:43.000000000 -0500
29923@@ -391,11 +391,6 @@ void __init dmi_scan_machine(void)
29924 }
29925 }
29926 else {
29927- /*
29928- * no iounmap() for that ioremap(); it would be a no-op, but
29929- * it's so early in setup that sucker gets confused into doing
29930- * what it shouldn't if we actually call it.
29931- */
29932 p = dmi_ioremap(0xF0000, 0x10000);
29933 if (p == NULL)
29934 goto error;
29935@@ -667,7 +662,7 @@ int dmi_walk(void (*decode)(const struct
29936 if (buf == NULL)
29937 return -1;
29938
29939- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
29940+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
29941
29942 iounmap(buf);
29943 return 0;
29944diff -urNp linux-2.6.32.48/drivers/firmware/edd.c linux-2.6.32.48/drivers/firmware/edd.c
29945--- linux-2.6.32.48/drivers/firmware/edd.c 2011-11-08 19:02:43.000000000 -0500
29946+++ linux-2.6.32.48/drivers/firmware/edd.c 2011-11-15 19:59:43.000000000 -0500
29947@@ -122,7 +122,7 @@ edd_attr_show(struct kobject * kobj, str
29948 return ret;
29949 }
29950
29951-static struct sysfs_ops edd_attr_ops = {
29952+static const struct sysfs_ops edd_attr_ops = {
29953 .show = edd_attr_show,
29954 };
29955
29956diff -urNp linux-2.6.32.48/drivers/firmware/efivars.c linux-2.6.32.48/drivers/firmware/efivars.c
29957--- linux-2.6.32.48/drivers/firmware/efivars.c 2011-11-08 19:02:43.000000000 -0500
29958+++ linux-2.6.32.48/drivers/firmware/efivars.c 2011-11-15 19:59:43.000000000 -0500
29959@@ -362,7 +362,7 @@ static ssize_t efivar_attr_store(struct
29960 return ret;
29961 }
29962
29963-static struct sysfs_ops efivar_attr_ops = {
29964+static const struct sysfs_ops efivar_attr_ops = {
29965 .show = efivar_attr_show,
29966 .store = efivar_attr_store,
29967 };
29968diff -urNp linux-2.6.32.48/drivers/firmware/iscsi_ibft.c linux-2.6.32.48/drivers/firmware/iscsi_ibft.c
29969--- linux-2.6.32.48/drivers/firmware/iscsi_ibft.c 2011-11-08 19:02:43.000000000 -0500
29970+++ linux-2.6.32.48/drivers/firmware/iscsi_ibft.c 2011-11-15 19:59:43.000000000 -0500
29971@@ -525,7 +525,7 @@ static ssize_t ibft_show_attribute(struc
29972 return ret;
29973 }
29974
29975-static struct sysfs_ops ibft_attr_ops = {
29976+static const struct sysfs_ops ibft_attr_ops = {
29977 .show = ibft_show_attribute,
29978 };
29979
29980diff -urNp linux-2.6.32.48/drivers/firmware/memmap.c linux-2.6.32.48/drivers/firmware/memmap.c
29981--- linux-2.6.32.48/drivers/firmware/memmap.c 2011-11-08 19:02:43.000000000 -0500
29982+++ linux-2.6.32.48/drivers/firmware/memmap.c 2011-11-15 19:59:43.000000000 -0500
29983@@ -74,7 +74,7 @@ static struct attribute *def_attrs[] = {
29984 NULL
29985 };
29986
29987-static struct sysfs_ops memmap_attr_ops = {
29988+static const struct sysfs_ops memmap_attr_ops = {
29989 .show = memmap_attr_show,
29990 };
29991
29992diff -urNp linux-2.6.32.48/drivers/gpio/vr41xx_giu.c linux-2.6.32.48/drivers/gpio/vr41xx_giu.c
29993--- linux-2.6.32.48/drivers/gpio/vr41xx_giu.c 2011-11-08 19:02:43.000000000 -0500
29994+++ linux-2.6.32.48/drivers/gpio/vr41xx_giu.c 2011-11-15 19:59:43.000000000 -0500
29995@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
29996 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
29997 maskl, pendl, maskh, pendh);
29998
29999- atomic_inc(&irq_err_count);
30000+ atomic_inc_unchecked(&irq_err_count);
30001
30002 return -EINVAL;
30003 }
30004diff -urNp linux-2.6.32.48/drivers/gpu/drm/drm_crtc.c linux-2.6.32.48/drivers/gpu/drm/drm_crtc.c
30005--- linux-2.6.32.48/drivers/gpu/drm/drm_crtc.c 2011-11-08 19:02:43.000000000 -0500
30006+++ linux-2.6.32.48/drivers/gpu/drm/drm_crtc.c 2011-11-15 19:59:43.000000000 -0500
30007@@ -1323,7 +1323,7 @@ int drm_mode_getconnector(struct drm_dev
30008 */
30009 if ((out_resp->count_modes >= mode_count) && mode_count) {
30010 copied = 0;
30011- mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
30012+ mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
30013 list_for_each_entry(mode, &connector->modes, head) {
30014 drm_crtc_convert_to_umode(&u_mode, mode);
30015 if (copy_to_user(mode_ptr + copied,
30016@@ -1338,8 +1338,8 @@ int drm_mode_getconnector(struct drm_dev
30017
30018 if ((out_resp->count_props >= props_count) && props_count) {
30019 copied = 0;
30020- prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
30021- prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
30022+ prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
30023+ prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
30024 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
30025 if (connector->property_ids[i] != 0) {
30026 if (put_user(connector->property_ids[i],
30027@@ -1361,7 +1361,7 @@ int drm_mode_getconnector(struct drm_dev
30028
30029 if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
30030 copied = 0;
30031- encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
30032+ encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
30033 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
30034 if (connector->encoder_ids[i] != 0) {
30035 if (put_user(connector->encoder_ids[i],
30036@@ -1513,7 +1513,7 @@ int drm_mode_setcrtc(struct drm_device *
30037 }
30038
30039 for (i = 0; i < crtc_req->count_connectors; i++) {
30040- set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
30041+ set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
30042 if (get_user(out_id, &set_connectors_ptr[i])) {
30043 ret = -EFAULT;
30044 goto out;
30045@@ -2118,7 +2118,7 @@ int drm_mode_getproperty_ioctl(struct dr
30046 out_resp->flags = property->flags;
30047
30048 if ((out_resp->count_values >= value_count) && value_count) {
30049- values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
30050+ values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
30051 for (i = 0; i < value_count; i++) {
30052 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
30053 ret = -EFAULT;
30054@@ -2131,7 +2131,7 @@ int drm_mode_getproperty_ioctl(struct dr
30055 if (property->flags & DRM_MODE_PROP_ENUM) {
30056 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
30057 copied = 0;
30058- enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
30059+ enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
30060 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
30061
30062 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
30063@@ -2154,7 +2154,7 @@ int drm_mode_getproperty_ioctl(struct dr
30064 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
30065 copied = 0;
30066 blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
30067- blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
30068+ blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
30069
30070 list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
30071 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
30072@@ -2226,7 +2226,7 @@ int drm_mode_getblob_ioctl(struct drm_de
30073 blob = obj_to_blob(obj);
30074
30075 if (out_resp->length == blob->length) {
30076- blob_ptr = (void *)(unsigned long)out_resp->data;
30077+ blob_ptr = (void __user *)(unsigned long)out_resp->data;
30078 if (copy_to_user(blob_ptr, blob->data, blob->length)){
30079 ret = -EFAULT;
30080 goto done;
30081diff -urNp linux-2.6.32.48/drivers/gpu/drm/drm_crtc_helper.c linux-2.6.32.48/drivers/gpu/drm/drm_crtc_helper.c
30082--- linux-2.6.32.48/drivers/gpu/drm/drm_crtc_helper.c 2011-11-08 19:02:43.000000000 -0500
30083+++ linux-2.6.32.48/drivers/gpu/drm/drm_crtc_helper.c 2011-11-15 19:59:43.000000000 -0500
30084@@ -573,7 +573,7 @@ static bool drm_encoder_crtc_ok(struct d
30085 struct drm_crtc *tmp;
30086 int crtc_mask = 1;
30087
30088- WARN(!crtc, "checking null crtc?");
30089+ BUG_ON(!crtc);
30090
30091 dev = crtc->dev;
30092
30093@@ -642,6 +642,8 @@ bool drm_crtc_helper_set_mode(struct drm
30094
30095 adjusted_mode = drm_mode_duplicate(dev, mode);
30096
30097+ pax_track_stack();
30098+
30099 crtc->enabled = drm_helper_crtc_in_use(crtc);
30100
30101 if (!crtc->enabled)
30102diff -urNp linux-2.6.32.48/drivers/gpu/drm/drm_drv.c linux-2.6.32.48/drivers/gpu/drm/drm_drv.c
30103--- linux-2.6.32.48/drivers/gpu/drm/drm_drv.c 2011-11-08 19:02:43.000000000 -0500
30104+++ linux-2.6.32.48/drivers/gpu/drm/drm_drv.c 2011-11-15 19:59:43.000000000 -0500
30105@@ -417,7 +417,7 @@ int drm_ioctl(struct inode *inode, struc
30106 char *kdata = NULL;
30107
30108 atomic_inc(&dev->ioctl_count);
30109- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
30110+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
30111 ++file_priv->ioctl_count;
30112
30113 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
30114diff -urNp linux-2.6.32.48/drivers/gpu/drm/drm_fops.c linux-2.6.32.48/drivers/gpu/drm/drm_fops.c
30115--- linux-2.6.32.48/drivers/gpu/drm/drm_fops.c 2011-11-08 19:02:43.000000000 -0500
30116+++ linux-2.6.32.48/drivers/gpu/drm/drm_fops.c 2011-11-15 19:59:43.000000000 -0500
30117@@ -66,7 +66,7 @@ static int drm_setup(struct drm_device *
30118 }
30119
30120 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
30121- atomic_set(&dev->counts[i], 0);
30122+ atomic_set_unchecked(&dev->counts[i], 0);
30123
30124 dev->sigdata.lock = NULL;
30125
30126@@ -130,9 +130,9 @@ int drm_open(struct inode *inode, struct
30127
30128 retcode = drm_open_helper(inode, filp, dev);
30129 if (!retcode) {
30130- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
30131+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
30132 spin_lock(&dev->count_lock);
30133- if (!dev->open_count++) {
30134+ if (local_inc_return(&dev->open_count) == 1) {
30135 spin_unlock(&dev->count_lock);
30136 retcode = drm_setup(dev);
30137 goto out;
30138@@ -435,7 +435,7 @@ int drm_release(struct inode *inode, str
30139
30140 lock_kernel();
30141
30142- DRM_DEBUG("open_count = %d\n", dev->open_count);
30143+ DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
30144
30145 if (dev->driver->preclose)
30146 dev->driver->preclose(dev, file_priv);
30147@@ -447,7 +447,7 @@ int drm_release(struct inode *inode, str
30148 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
30149 task_pid_nr(current),
30150 (long)old_encode_dev(file_priv->minor->device),
30151- dev->open_count);
30152+ local_read(&dev->open_count));
30153
30154 /* if the master has gone away we can't do anything with the lock */
30155 if (file_priv->minor->master)
30156@@ -524,9 +524,9 @@ int drm_release(struct inode *inode, str
30157 * End inline drm_release
30158 */
30159
30160- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
30161+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
30162 spin_lock(&dev->count_lock);
30163- if (!--dev->open_count) {
30164+ if (local_dec_and_test(&dev->open_count)) {
30165 if (atomic_read(&dev->ioctl_count)) {
30166 DRM_ERROR("Device busy: %d\n",
30167 atomic_read(&dev->ioctl_count));
30168diff -urNp linux-2.6.32.48/drivers/gpu/drm/drm_gem.c linux-2.6.32.48/drivers/gpu/drm/drm_gem.c
30169--- linux-2.6.32.48/drivers/gpu/drm/drm_gem.c 2011-11-08 19:02:43.000000000 -0500
30170+++ linux-2.6.32.48/drivers/gpu/drm/drm_gem.c 2011-11-15 19:59:43.000000000 -0500
30171@@ -83,11 +83,11 @@ drm_gem_init(struct drm_device *dev)
30172 spin_lock_init(&dev->object_name_lock);
30173 idr_init(&dev->object_name_idr);
30174 atomic_set(&dev->object_count, 0);
30175- atomic_set(&dev->object_memory, 0);
30176+ atomic_set_unchecked(&dev->object_memory, 0);
30177 atomic_set(&dev->pin_count, 0);
30178- atomic_set(&dev->pin_memory, 0);
30179+ atomic_set_unchecked(&dev->pin_memory, 0);
30180 atomic_set(&dev->gtt_count, 0);
30181- atomic_set(&dev->gtt_memory, 0);
30182+ atomic_set_unchecked(&dev->gtt_memory, 0);
30183
30184 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
30185 if (!mm) {
30186@@ -150,7 +150,7 @@ drm_gem_object_alloc(struct drm_device *
30187 goto fput;
30188 }
30189 atomic_inc(&dev->object_count);
30190- atomic_add(obj->size, &dev->object_memory);
30191+ atomic_add_unchecked(obj->size, &dev->object_memory);
30192 return obj;
30193 fput:
30194 fput(obj->filp);
30195@@ -429,7 +429,7 @@ drm_gem_object_free(struct kref *kref)
30196
30197 fput(obj->filp);
30198 atomic_dec(&dev->object_count);
30199- atomic_sub(obj->size, &dev->object_memory);
30200+ atomic_sub_unchecked(obj->size, &dev->object_memory);
30201 kfree(obj);
30202 }
30203 EXPORT_SYMBOL(drm_gem_object_free);
30204diff -urNp linux-2.6.32.48/drivers/gpu/drm/drm_info.c linux-2.6.32.48/drivers/gpu/drm/drm_info.c
30205--- linux-2.6.32.48/drivers/gpu/drm/drm_info.c 2011-11-08 19:02:43.000000000 -0500
30206+++ linux-2.6.32.48/drivers/gpu/drm/drm_info.c 2011-11-15 19:59:43.000000000 -0500
30207@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
30208 struct drm_local_map *map;
30209 struct drm_map_list *r_list;
30210
30211- /* Hardcoded from _DRM_FRAME_BUFFER,
30212- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
30213- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
30214- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
30215+ static const char * const types[] = {
30216+ [_DRM_FRAME_BUFFER] = "FB",
30217+ [_DRM_REGISTERS] = "REG",
30218+ [_DRM_SHM] = "SHM",
30219+ [_DRM_AGP] = "AGP",
30220+ [_DRM_SCATTER_GATHER] = "SG",
30221+ [_DRM_CONSISTENT] = "PCI",
30222+ [_DRM_GEM] = "GEM" };
30223 const char *type;
30224 int i;
30225
30226@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
30227 map = r_list->map;
30228 if (!map)
30229 continue;
30230- if (map->type < 0 || map->type > 5)
30231+ if (map->type >= ARRAY_SIZE(types))
30232 type = "??";
30233 else
30234 type = types[map->type];
30235@@ -265,10 +269,10 @@ int drm_gem_object_info(struct seq_file
30236 struct drm_device *dev = node->minor->dev;
30237
30238 seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
30239- seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
30240+ seq_printf(m, "%d object bytes\n", atomic_read_unchecked(&dev->object_memory));
30241 seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
30242- seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
30243- seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
30244+ seq_printf(m, "%d pin bytes\n", atomic_read_unchecked(&dev->pin_memory));
30245+ seq_printf(m, "%d gtt bytes\n", atomic_read_unchecked(&dev->gtt_memory));
30246 seq_printf(m, "%d gtt total\n", dev->gtt_total);
30247 return 0;
30248 }
30249@@ -288,7 +292,11 @@ int drm_vma_info(struct seq_file *m, voi
30250 mutex_lock(&dev->struct_mutex);
30251 seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
30252 atomic_read(&dev->vma_count),
30253+#ifdef CONFIG_GRKERNSEC_HIDESYM
30254+ NULL, 0);
30255+#else
30256 high_memory, (u64)virt_to_phys(high_memory));
30257+#endif
30258
30259 list_for_each_entry(pt, &dev->vmalist, head) {
30260 vma = pt->vma;
30261@@ -296,14 +304,23 @@ int drm_vma_info(struct seq_file *m, voi
30262 continue;
30263 seq_printf(m,
30264 "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
30265- pt->pid, vma->vm_start, vma->vm_end,
30266+ pt->pid,
30267+#ifdef CONFIG_GRKERNSEC_HIDESYM
30268+ 0, 0,
30269+#else
30270+ vma->vm_start, vma->vm_end,
30271+#endif
30272 vma->vm_flags & VM_READ ? 'r' : '-',
30273 vma->vm_flags & VM_WRITE ? 'w' : '-',
30274 vma->vm_flags & VM_EXEC ? 'x' : '-',
30275 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
30276 vma->vm_flags & VM_LOCKED ? 'l' : '-',
30277 vma->vm_flags & VM_IO ? 'i' : '-',
30278+#ifdef CONFIG_GRKERNSEC_HIDESYM
30279+ 0);
30280+#else
30281 vma->vm_pgoff);
30282+#endif
30283
30284 #if defined(__i386__)
30285 pgprot = pgprot_val(vma->vm_page_prot);
30286diff -urNp linux-2.6.32.48/drivers/gpu/drm/drm_ioc32.c linux-2.6.32.48/drivers/gpu/drm/drm_ioc32.c
30287--- linux-2.6.32.48/drivers/gpu/drm/drm_ioc32.c 2011-11-08 19:02:43.000000000 -0500
30288+++ linux-2.6.32.48/drivers/gpu/drm/drm_ioc32.c 2011-11-15 19:59:43.000000000 -0500
30289@@ -463,7 +463,7 @@ static int compat_drm_infobufs(struct fi
30290 request = compat_alloc_user_space(nbytes);
30291 if (!access_ok(VERIFY_WRITE, request, nbytes))
30292 return -EFAULT;
30293- list = (struct drm_buf_desc *) (request + 1);
30294+ list = (struct drm_buf_desc __user *) (request + 1);
30295
30296 if (__put_user(count, &request->count)
30297 || __put_user(list, &request->list))
30298@@ -525,7 +525,7 @@ static int compat_drm_mapbufs(struct fil
30299 request = compat_alloc_user_space(nbytes);
30300 if (!access_ok(VERIFY_WRITE, request, nbytes))
30301 return -EFAULT;
30302- list = (struct drm_buf_pub *) (request + 1);
30303+ list = (struct drm_buf_pub __user *) (request + 1);
30304
30305 if (__put_user(count, &request->count)
30306 || __put_user(list, &request->list))
30307diff -urNp linux-2.6.32.48/drivers/gpu/drm/drm_ioctl.c linux-2.6.32.48/drivers/gpu/drm/drm_ioctl.c
30308--- linux-2.6.32.48/drivers/gpu/drm/drm_ioctl.c 2011-11-08 19:02:43.000000000 -0500
30309+++ linux-2.6.32.48/drivers/gpu/drm/drm_ioctl.c 2011-11-15 19:59:43.000000000 -0500
30310@@ -283,7 +283,7 @@ int drm_getstats(struct drm_device *dev,
30311 stats->data[i].value =
30312 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
30313 else
30314- stats->data[i].value = atomic_read(&dev->counts[i]);
30315+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
30316 stats->data[i].type = dev->types[i];
30317 }
30318
30319diff -urNp linux-2.6.32.48/drivers/gpu/drm/drm_lock.c linux-2.6.32.48/drivers/gpu/drm/drm_lock.c
30320--- linux-2.6.32.48/drivers/gpu/drm/drm_lock.c 2011-11-08 19:02:43.000000000 -0500
30321+++ linux-2.6.32.48/drivers/gpu/drm/drm_lock.c 2011-11-15 19:59:43.000000000 -0500
30322@@ -87,7 +87,7 @@ int drm_lock(struct drm_device *dev, voi
30323 if (drm_lock_take(&master->lock, lock->context)) {
30324 master->lock.file_priv = file_priv;
30325 master->lock.lock_time = jiffies;
30326- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
30327+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
30328 break; /* Got lock */
30329 }
30330
30331@@ -165,7 +165,7 @@ int drm_unlock(struct drm_device *dev, v
30332 return -EINVAL;
30333 }
30334
30335- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
30336+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
30337
30338 /* kernel_context_switch isn't used by any of the x86 drm
30339 * modules but is required by the Sparc driver.
30340diff -urNp linux-2.6.32.48/drivers/gpu/drm/i810/i810_dma.c linux-2.6.32.48/drivers/gpu/drm/i810/i810_dma.c
30341--- linux-2.6.32.48/drivers/gpu/drm/i810/i810_dma.c 2011-11-08 19:02:43.000000000 -0500
30342+++ linux-2.6.32.48/drivers/gpu/drm/i810/i810_dma.c 2011-11-15 19:59:43.000000000 -0500
30343@@ -952,8 +952,8 @@ static int i810_dma_vertex(struct drm_de
30344 dma->buflist[vertex->idx],
30345 vertex->discard, vertex->used);
30346
30347- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
30348- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
30349+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
30350+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
30351 sarea_priv->last_enqueue = dev_priv->counter - 1;
30352 sarea_priv->last_dispatch = (int)hw_status[5];
30353
30354@@ -1115,8 +1115,8 @@ static int i810_dma_mc(struct drm_device
30355 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
30356 mc->last_render);
30357
30358- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
30359- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
30360+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
30361+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
30362 sarea_priv->last_enqueue = dev_priv->counter - 1;
30363 sarea_priv->last_dispatch = (int)hw_status[5];
30364
30365diff -urNp linux-2.6.32.48/drivers/gpu/drm/i810/i810_drv.h linux-2.6.32.48/drivers/gpu/drm/i810/i810_drv.h
30366--- linux-2.6.32.48/drivers/gpu/drm/i810/i810_drv.h 2011-11-08 19:02:43.000000000 -0500
30367+++ linux-2.6.32.48/drivers/gpu/drm/i810/i810_drv.h 2011-11-15 19:59:43.000000000 -0500
30368@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
30369 int page_flipping;
30370
30371 wait_queue_head_t irq_queue;
30372- atomic_t irq_received;
30373- atomic_t irq_emitted;
30374+ atomic_unchecked_t irq_received;
30375+ atomic_unchecked_t irq_emitted;
30376
30377 int front_offset;
30378 } drm_i810_private_t;
30379diff -urNp linux-2.6.32.48/drivers/gpu/drm/i830/i830_drv.h linux-2.6.32.48/drivers/gpu/drm/i830/i830_drv.h
30380--- linux-2.6.32.48/drivers/gpu/drm/i830/i830_drv.h 2011-11-08 19:02:43.000000000 -0500
30381+++ linux-2.6.32.48/drivers/gpu/drm/i830/i830_drv.h 2011-11-15 19:59:43.000000000 -0500
30382@@ -115,8 +115,8 @@ typedef struct drm_i830_private {
30383 int page_flipping;
30384
30385 wait_queue_head_t irq_queue;
30386- atomic_t irq_received;
30387- atomic_t irq_emitted;
30388+ atomic_unchecked_t irq_received;
30389+ atomic_unchecked_t irq_emitted;
30390
30391 int use_mi_batchbuffer_start;
30392
30393diff -urNp linux-2.6.32.48/drivers/gpu/drm/i830/i830_irq.c linux-2.6.32.48/drivers/gpu/drm/i830/i830_irq.c
30394--- linux-2.6.32.48/drivers/gpu/drm/i830/i830_irq.c 2011-11-08 19:02:43.000000000 -0500
30395+++ linux-2.6.32.48/drivers/gpu/drm/i830/i830_irq.c 2011-11-15 19:59:43.000000000 -0500
30396@@ -47,7 +47,7 @@ irqreturn_t i830_driver_irq_handler(DRM_
30397
30398 I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
30399
30400- atomic_inc(&dev_priv->irq_received);
30401+ atomic_inc_unchecked(&dev_priv->irq_received);
30402 wake_up_interruptible(&dev_priv->irq_queue);
30403
30404 return IRQ_HANDLED;
30405@@ -60,14 +60,14 @@ static int i830_emit_irq(struct drm_devi
30406
30407 DRM_DEBUG("%s\n", __func__);
30408
30409- atomic_inc(&dev_priv->irq_emitted);
30410+ atomic_inc_unchecked(&dev_priv->irq_emitted);
30411
30412 BEGIN_LP_RING(2);
30413 OUT_RING(0);
30414 OUT_RING(GFX_OP_USER_INTERRUPT);
30415 ADVANCE_LP_RING();
30416
30417- return atomic_read(&dev_priv->irq_emitted);
30418+ return atomic_read_unchecked(&dev_priv->irq_emitted);
30419 }
30420
30421 static int i830_wait_irq(struct drm_device * dev, int irq_nr)
30422@@ -79,7 +79,7 @@ static int i830_wait_irq(struct drm_devi
30423
30424 DRM_DEBUG("%s\n", __func__);
30425
30426- if (atomic_read(&dev_priv->irq_received) >= irq_nr)
30427+ if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
30428 return 0;
30429
30430 dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
30431@@ -88,7 +88,7 @@ static int i830_wait_irq(struct drm_devi
30432
30433 for (;;) {
30434 __set_current_state(TASK_INTERRUPTIBLE);
30435- if (atomic_read(&dev_priv->irq_received) >= irq_nr)
30436+ if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
30437 break;
30438 if ((signed)(end - jiffies) <= 0) {
30439 DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
30440@@ -163,8 +163,8 @@ void i830_driver_irq_preinstall(struct d
30441 I830_WRITE16(I830REG_HWSTAM, 0xffff);
30442 I830_WRITE16(I830REG_INT_MASK_R, 0x0);
30443 I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
30444- atomic_set(&dev_priv->irq_received, 0);
30445- atomic_set(&dev_priv->irq_emitted, 0);
30446+ atomic_set_unchecked(&dev_priv->irq_received, 0);
30447+ atomic_set_unchecked(&dev_priv->irq_emitted, 0);
30448 init_waitqueue_head(&dev_priv->irq_queue);
30449 }
30450
30451diff -urNp linux-2.6.32.48/drivers/gpu/drm/i915/dvo_ch7017.c linux-2.6.32.48/drivers/gpu/drm/i915/dvo_ch7017.c
30452--- linux-2.6.32.48/drivers/gpu/drm/i915/dvo_ch7017.c 2011-11-08 19:02:43.000000000 -0500
30453+++ linux-2.6.32.48/drivers/gpu/drm/i915/dvo_ch7017.c 2011-11-15 19:59:43.000000000 -0500
30454@@ -443,7 +443,7 @@ static void ch7017_destroy(struct intel_
30455 }
30456 }
30457
30458-struct intel_dvo_dev_ops ch7017_ops = {
30459+const struct intel_dvo_dev_ops ch7017_ops = {
30460 .init = ch7017_init,
30461 .detect = ch7017_detect,
30462 .mode_valid = ch7017_mode_valid,
30463diff -urNp linux-2.6.32.48/drivers/gpu/drm/i915/dvo_ch7xxx.c linux-2.6.32.48/drivers/gpu/drm/i915/dvo_ch7xxx.c
30464--- linux-2.6.32.48/drivers/gpu/drm/i915/dvo_ch7xxx.c 2011-11-08 19:02:43.000000000 -0500
30465+++ linux-2.6.32.48/drivers/gpu/drm/i915/dvo_ch7xxx.c 2011-11-15 19:59:43.000000000 -0500
30466@@ -356,7 +356,7 @@ static void ch7xxx_destroy(struct intel_
30467 }
30468 }
30469
30470-struct intel_dvo_dev_ops ch7xxx_ops = {
30471+const struct intel_dvo_dev_ops ch7xxx_ops = {
30472 .init = ch7xxx_init,
30473 .detect = ch7xxx_detect,
30474 .mode_valid = ch7xxx_mode_valid,
30475diff -urNp linux-2.6.32.48/drivers/gpu/drm/i915/dvo.h linux-2.6.32.48/drivers/gpu/drm/i915/dvo.h
30476--- linux-2.6.32.48/drivers/gpu/drm/i915/dvo.h 2011-11-08 19:02:43.000000000 -0500
30477+++ linux-2.6.32.48/drivers/gpu/drm/i915/dvo.h 2011-11-15 19:59:43.000000000 -0500
30478@@ -135,23 +135,23 @@ struct intel_dvo_dev_ops {
30479 *
30480 * \return singly-linked list of modes or NULL if no modes found.
30481 */
30482- struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
30483+ struct drm_display_mode *(* const get_modes)(struct intel_dvo_device *dvo);
30484
30485 /**
30486 * Clean up driver-specific bits of the output
30487 */
30488- void (*destroy) (struct intel_dvo_device *dvo);
30489+ void (* const destroy) (struct intel_dvo_device *dvo);
30490
30491 /**
30492 * Debugging hook to dump device registers to log file
30493 */
30494- void (*dump_regs)(struct intel_dvo_device *dvo);
30495+ void (* const dump_regs)(struct intel_dvo_device *dvo);
30496 };
30497
30498-extern struct intel_dvo_dev_ops sil164_ops;
30499-extern struct intel_dvo_dev_ops ch7xxx_ops;
30500-extern struct intel_dvo_dev_ops ivch_ops;
30501-extern struct intel_dvo_dev_ops tfp410_ops;
30502-extern struct intel_dvo_dev_ops ch7017_ops;
30503+extern const struct intel_dvo_dev_ops sil164_ops;
30504+extern const struct intel_dvo_dev_ops ch7xxx_ops;
30505+extern const struct intel_dvo_dev_ops ivch_ops;
30506+extern const struct intel_dvo_dev_ops tfp410_ops;
30507+extern const struct intel_dvo_dev_ops ch7017_ops;
30508
30509 #endif /* _INTEL_DVO_H */
30510diff -urNp linux-2.6.32.48/drivers/gpu/drm/i915/dvo_ivch.c linux-2.6.32.48/drivers/gpu/drm/i915/dvo_ivch.c
30511--- linux-2.6.32.48/drivers/gpu/drm/i915/dvo_ivch.c 2011-11-08 19:02:43.000000000 -0500
30512+++ linux-2.6.32.48/drivers/gpu/drm/i915/dvo_ivch.c 2011-11-15 19:59:43.000000000 -0500
30513@@ -430,7 +430,7 @@ static void ivch_destroy(struct intel_dv
30514 }
30515 }
30516
30517-struct intel_dvo_dev_ops ivch_ops= {
30518+const struct intel_dvo_dev_ops ivch_ops= {
30519 .init = ivch_init,
30520 .dpms = ivch_dpms,
30521 .save = ivch_save,
30522diff -urNp linux-2.6.32.48/drivers/gpu/drm/i915/dvo_sil164.c linux-2.6.32.48/drivers/gpu/drm/i915/dvo_sil164.c
30523--- linux-2.6.32.48/drivers/gpu/drm/i915/dvo_sil164.c 2011-11-08 19:02:43.000000000 -0500
30524+++ linux-2.6.32.48/drivers/gpu/drm/i915/dvo_sil164.c 2011-11-15 19:59:43.000000000 -0500
30525@@ -290,7 +290,7 @@ static void sil164_destroy(struct intel_
30526 }
30527 }
30528
30529-struct intel_dvo_dev_ops sil164_ops = {
30530+const struct intel_dvo_dev_ops sil164_ops = {
30531 .init = sil164_init,
30532 .detect = sil164_detect,
30533 .mode_valid = sil164_mode_valid,
30534diff -urNp linux-2.6.32.48/drivers/gpu/drm/i915/dvo_tfp410.c linux-2.6.32.48/drivers/gpu/drm/i915/dvo_tfp410.c
30535--- linux-2.6.32.48/drivers/gpu/drm/i915/dvo_tfp410.c 2011-11-08 19:02:43.000000000 -0500
30536+++ linux-2.6.32.48/drivers/gpu/drm/i915/dvo_tfp410.c 2011-11-15 19:59:43.000000000 -0500
30537@@ -323,7 +323,7 @@ static void tfp410_destroy(struct intel_
30538 }
30539 }
30540
30541-struct intel_dvo_dev_ops tfp410_ops = {
30542+const struct intel_dvo_dev_ops tfp410_ops = {
30543 .init = tfp410_init,
30544 .detect = tfp410_detect,
30545 .mode_valid = tfp410_mode_valid,
30546diff -urNp linux-2.6.32.48/drivers/gpu/drm/i915/i915_debugfs.c linux-2.6.32.48/drivers/gpu/drm/i915/i915_debugfs.c
30547--- linux-2.6.32.48/drivers/gpu/drm/i915/i915_debugfs.c 2011-11-08 19:02:43.000000000 -0500
30548+++ linux-2.6.32.48/drivers/gpu/drm/i915/i915_debugfs.c 2011-11-15 19:59:43.000000000 -0500
30549@@ -192,7 +192,7 @@ static int i915_interrupt_info(struct se
30550 I915_READ(GTIMR));
30551 }
30552 seq_printf(m, "Interrupts received: %d\n",
30553- atomic_read(&dev_priv->irq_received));
30554+ atomic_read_unchecked(&dev_priv->irq_received));
30555 if (dev_priv->hw_status_page != NULL) {
30556 seq_printf(m, "Current sequence: %d\n",
30557 i915_get_gem_seqno(dev));
30558diff -urNp linux-2.6.32.48/drivers/gpu/drm/i915/i915_drv.c linux-2.6.32.48/drivers/gpu/drm/i915/i915_drv.c
30559--- linux-2.6.32.48/drivers/gpu/drm/i915/i915_drv.c 2011-11-08 19:02:43.000000000 -0500
30560+++ linux-2.6.32.48/drivers/gpu/drm/i915/i915_drv.c 2011-11-15 19:59:43.000000000 -0500
30561@@ -285,7 +285,7 @@ i915_pci_resume(struct pci_dev *pdev)
30562 return i915_resume(dev);
30563 }
30564
30565-static struct vm_operations_struct i915_gem_vm_ops = {
30566+static const struct vm_operations_struct i915_gem_vm_ops = {
30567 .fault = i915_gem_fault,
30568 .open = drm_gem_vm_open,
30569 .close = drm_gem_vm_close,
30570diff -urNp linux-2.6.32.48/drivers/gpu/drm/i915/i915_drv.h linux-2.6.32.48/drivers/gpu/drm/i915/i915_drv.h
30571--- linux-2.6.32.48/drivers/gpu/drm/i915/i915_drv.h 2011-11-08 19:02:43.000000000 -0500
30572+++ linux-2.6.32.48/drivers/gpu/drm/i915/i915_drv.h 2011-11-15 19:59:43.000000000 -0500
30573@@ -168,7 +168,7 @@ struct drm_i915_display_funcs {
30574 /* display clock increase/decrease */
30575 /* pll clock increase/decrease */
30576 /* clock gating init */
30577-};
30578+} __no_const;
30579
30580 typedef struct drm_i915_private {
30581 struct drm_device *dev;
30582@@ -197,7 +197,7 @@ typedef struct drm_i915_private {
30583 int page_flipping;
30584
30585 wait_queue_head_t irq_queue;
30586- atomic_t irq_received;
30587+ atomic_unchecked_t irq_received;
30588 /** Protects user_irq_refcount and irq_mask_reg */
30589 spinlock_t user_irq_lock;
30590 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
30591diff -urNp linux-2.6.32.48/drivers/gpu/drm/i915/i915_gem.c linux-2.6.32.48/drivers/gpu/drm/i915/i915_gem.c
30592--- linux-2.6.32.48/drivers/gpu/drm/i915/i915_gem.c 2011-11-08 19:02:43.000000000 -0500
30593+++ linux-2.6.32.48/drivers/gpu/drm/i915/i915_gem.c 2011-11-15 19:59:43.000000000 -0500
30594@@ -102,7 +102,7 @@ i915_gem_get_aperture_ioctl(struct drm_d
30595
30596 args->aper_size = dev->gtt_total;
30597 args->aper_available_size = (args->aper_size -
30598- atomic_read(&dev->pin_memory));
30599+ atomic_read_unchecked(&dev->pin_memory));
30600
30601 return 0;
30602 }
30603@@ -492,6 +492,11 @@ i915_gem_pread_ioctl(struct drm_device *
30604 return -EINVAL;
30605 }
30606
30607+ if (!access_ok(VERIFY_WRITE, (char __user *) (uintptr_t)args->data_ptr, args->size)) {
30608+ drm_gem_object_unreference(obj);
30609+ return -EFAULT;
30610+ }
30611+
30612 if (i915_gem_object_needs_bit17_swizzle(obj)) {
30613 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
30614 } else {
30615@@ -965,6 +970,11 @@ i915_gem_pwrite_ioctl(struct drm_device
30616 return -EINVAL;
30617 }
30618
30619+ if (!access_ok(VERIFY_READ, (char __user *) (uintptr_t)args->data_ptr, args->size)) {
30620+ drm_gem_object_unreference(obj);
30621+ return -EFAULT;
30622+ }
30623+
30624 /* We can only do the GTT pwrite on untiled buffers, as otherwise
30625 * it would end up going through the fenced access, and we'll get
30626 * different detiling behavior between reading and writing.
30627@@ -2054,7 +2064,7 @@ i915_gem_object_unbind(struct drm_gem_ob
30628
30629 if (obj_priv->gtt_space) {
30630 atomic_dec(&dev->gtt_count);
30631- atomic_sub(obj->size, &dev->gtt_memory);
30632+ atomic_sub_unchecked(obj->size, &dev->gtt_memory);
30633
30634 drm_mm_put_block(obj_priv->gtt_space);
30635 obj_priv->gtt_space = NULL;
30636@@ -2697,7 +2707,7 @@ i915_gem_object_bind_to_gtt(struct drm_g
30637 goto search_free;
30638 }
30639 atomic_inc(&dev->gtt_count);
30640- atomic_add(obj->size, &dev->gtt_memory);
30641+ atomic_add_unchecked(obj->size, &dev->gtt_memory);
30642
30643 /* Assert that the object is not currently in any GPU domain. As it
30644 * wasn't in the GTT, there shouldn't be any way it could have been in
30645@@ -3751,9 +3761,9 @@ i915_gem_execbuffer(struct drm_device *d
30646 "%d/%d gtt bytes\n",
30647 atomic_read(&dev->object_count),
30648 atomic_read(&dev->pin_count),
30649- atomic_read(&dev->object_memory),
30650- atomic_read(&dev->pin_memory),
30651- atomic_read(&dev->gtt_memory),
30652+ atomic_read_unchecked(&dev->object_memory),
30653+ atomic_read_unchecked(&dev->pin_memory),
30654+ atomic_read_unchecked(&dev->gtt_memory),
30655 dev->gtt_total);
30656 }
30657 goto err;
30658@@ -3985,7 +3995,7 @@ i915_gem_object_pin(struct drm_gem_objec
30659 */
30660 if (obj_priv->pin_count == 1) {
30661 atomic_inc(&dev->pin_count);
30662- atomic_add(obj->size, &dev->pin_memory);
30663+ atomic_add_unchecked(obj->size, &dev->pin_memory);
30664 if (!obj_priv->active &&
30665 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
30666 !list_empty(&obj_priv->list))
30667@@ -4018,7 +4028,7 @@ i915_gem_object_unpin(struct drm_gem_obj
30668 list_move_tail(&obj_priv->list,
30669 &dev_priv->mm.inactive_list);
30670 atomic_dec(&dev->pin_count);
30671- atomic_sub(obj->size, &dev->pin_memory);
30672+ atomic_sub_unchecked(obj->size, &dev->pin_memory);
30673 }
30674 i915_verify_inactive(dev, __FILE__, __LINE__);
30675 }
30676diff -urNp linux-2.6.32.48/drivers/gpu/drm/i915/i915_irq.c linux-2.6.32.48/drivers/gpu/drm/i915/i915_irq.c
30677--- linux-2.6.32.48/drivers/gpu/drm/i915/i915_irq.c 2011-11-08 19:02:43.000000000 -0500
30678+++ linux-2.6.32.48/drivers/gpu/drm/i915/i915_irq.c 2011-11-15 19:59:43.000000000 -0500
30679@@ -528,7 +528,7 @@ irqreturn_t i915_driver_irq_handler(DRM_
30680 int irq_received;
30681 int ret = IRQ_NONE;
30682
30683- atomic_inc(&dev_priv->irq_received);
30684+ atomic_inc_unchecked(&dev_priv->irq_received);
30685
30686 if (IS_IGDNG(dev))
30687 return igdng_irq_handler(dev);
30688@@ -1021,7 +1021,7 @@ void i915_driver_irq_preinstall(struct d
30689 {
30690 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
30691
30692- atomic_set(&dev_priv->irq_received, 0);
30693+ atomic_set_unchecked(&dev_priv->irq_received, 0);
30694
30695 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
30696 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
30697diff -urNp linux-2.6.32.48/drivers/gpu/drm/i915/intel_sdvo.c linux-2.6.32.48/drivers/gpu/drm/i915/intel_sdvo.c
30698--- linux-2.6.32.48/drivers/gpu/drm/i915/intel_sdvo.c 2011-11-08 19:02:43.000000000 -0500
30699+++ linux-2.6.32.48/drivers/gpu/drm/i915/intel_sdvo.c 2011-11-15 19:59:43.000000000 -0500
30700@@ -2795,7 +2795,9 @@ bool intel_sdvo_init(struct drm_device *
30701 sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device);
30702
30703 /* Save the bit-banging i2c functionality for use by the DDC wrapper */
30704- intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
30705+ pax_open_kernel();
30706+ *(void **)&intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
30707+ pax_close_kernel();
30708
30709 /* Read the regs to test if we can talk to the device */
30710 for (i = 0; i < 0x40; i++) {
30711diff -urNp linux-2.6.32.48/drivers/gpu/drm/mga/mga_drv.h linux-2.6.32.48/drivers/gpu/drm/mga/mga_drv.h
30712--- linux-2.6.32.48/drivers/gpu/drm/mga/mga_drv.h 2011-11-08 19:02:43.000000000 -0500
30713+++ linux-2.6.32.48/drivers/gpu/drm/mga/mga_drv.h 2011-11-15 19:59:43.000000000 -0500
30714@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
30715 u32 clear_cmd;
30716 u32 maccess;
30717
30718- atomic_t vbl_received; /**< Number of vblanks received. */
30719+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
30720 wait_queue_head_t fence_queue;
30721- atomic_t last_fence_retired;
30722+ atomic_unchecked_t last_fence_retired;
30723 u32 next_fence_to_post;
30724
30725 unsigned int fb_cpp;
30726diff -urNp linux-2.6.32.48/drivers/gpu/drm/mga/mga_irq.c linux-2.6.32.48/drivers/gpu/drm/mga/mga_irq.c
30727--- linux-2.6.32.48/drivers/gpu/drm/mga/mga_irq.c 2011-11-08 19:02:43.000000000 -0500
30728+++ linux-2.6.32.48/drivers/gpu/drm/mga/mga_irq.c 2011-11-15 19:59:43.000000000 -0500
30729@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
30730 if (crtc != 0)
30731 return 0;
30732
30733- return atomic_read(&dev_priv->vbl_received);
30734+ return atomic_read_unchecked(&dev_priv->vbl_received);
30735 }
30736
30737
30738@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
30739 /* VBLANK interrupt */
30740 if (status & MGA_VLINEPEN) {
30741 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
30742- atomic_inc(&dev_priv->vbl_received);
30743+ atomic_inc_unchecked(&dev_priv->vbl_received);
30744 drm_handle_vblank(dev, 0);
30745 handled = 1;
30746 }
30747@@ -80,7 +80,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
30748 MGA_WRITE(MGA_PRIMEND, prim_end);
30749 }
30750
30751- atomic_inc(&dev_priv->last_fence_retired);
30752+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
30753 DRM_WAKEUP(&dev_priv->fence_queue);
30754 handled = 1;
30755 }
30756@@ -131,7 +131,7 @@ int mga_driver_fence_wait(struct drm_dev
30757 * using fences.
30758 */
30759 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
30760- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
30761+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
30762 - *sequence) <= (1 << 23)));
30763
30764 *sequence = cur_fence;
30765diff -urNp linux-2.6.32.48/drivers/gpu/drm/r128/r128_cce.c linux-2.6.32.48/drivers/gpu/drm/r128/r128_cce.c
30766--- linux-2.6.32.48/drivers/gpu/drm/r128/r128_cce.c 2011-11-08 19:02:43.000000000 -0500
30767+++ linux-2.6.32.48/drivers/gpu/drm/r128/r128_cce.c 2011-11-15 19:59:43.000000000 -0500
30768@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
30769
30770 /* GH: Simple idle check.
30771 */
30772- atomic_set(&dev_priv->idle_count, 0);
30773+ atomic_set_unchecked(&dev_priv->idle_count, 0);
30774
30775 /* We don't support anything other than bus-mastering ring mode,
30776 * but the ring can be in either AGP or PCI space for the ring
30777diff -urNp linux-2.6.32.48/drivers/gpu/drm/r128/r128_drv.h linux-2.6.32.48/drivers/gpu/drm/r128/r128_drv.h
30778--- linux-2.6.32.48/drivers/gpu/drm/r128/r128_drv.h 2011-11-08 19:02:43.000000000 -0500
30779+++ linux-2.6.32.48/drivers/gpu/drm/r128/r128_drv.h 2011-11-15 19:59:43.000000000 -0500
30780@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
30781 int is_pci;
30782 unsigned long cce_buffers_offset;
30783
30784- atomic_t idle_count;
30785+ atomic_unchecked_t idle_count;
30786
30787 int page_flipping;
30788 int current_page;
30789 u32 crtc_offset;
30790 u32 crtc_offset_cntl;
30791
30792- atomic_t vbl_received;
30793+ atomic_unchecked_t vbl_received;
30794
30795 u32 color_fmt;
30796 unsigned int front_offset;
30797diff -urNp linux-2.6.32.48/drivers/gpu/drm/r128/r128_irq.c linux-2.6.32.48/drivers/gpu/drm/r128/r128_irq.c
30798--- linux-2.6.32.48/drivers/gpu/drm/r128/r128_irq.c 2011-11-08 19:02:43.000000000 -0500
30799+++ linux-2.6.32.48/drivers/gpu/drm/r128/r128_irq.c 2011-11-15 19:59:43.000000000 -0500
30800@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
30801 if (crtc != 0)
30802 return 0;
30803
30804- return atomic_read(&dev_priv->vbl_received);
30805+ return atomic_read_unchecked(&dev_priv->vbl_received);
30806 }
30807
30808 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
30809@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
30810 /* VBLANK interrupt */
30811 if (status & R128_CRTC_VBLANK_INT) {
30812 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
30813- atomic_inc(&dev_priv->vbl_received);
30814+ atomic_inc_unchecked(&dev_priv->vbl_received);
30815 drm_handle_vblank(dev, 0);
30816 return IRQ_HANDLED;
30817 }
30818diff -urNp linux-2.6.32.48/drivers/gpu/drm/r128/r128_state.c linux-2.6.32.48/drivers/gpu/drm/r128/r128_state.c
30819--- linux-2.6.32.48/drivers/gpu/drm/r128/r128_state.c 2011-11-08 19:02:43.000000000 -0500
30820+++ linux-2.6.32.48/drivers/gpu/drm/r128/r128_state.c 2011-11-15 19:59:43.000000000 -0500
30821@@ -323,10 +323,10 @@ static void r128_clear_box(drm_r128_priv
30822
30823 static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv)
30824 {
30825- if (atomic_read(&dev_priv->idle_count) == 0) {
30826+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0) {
30827 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
30828 } else {
30829- atomic_set(&dev_priv->idle_count, 0);
30830+ atomic_set_unchecked(&dev_priv->idle_count, 0);
30831 }
30832 }
30833
30834diff -urNp linux-2.6.32.48/drivers/gpu/drm/radeon/atom.c linux-2.6.32.48/drivers/gpu/drm/radeon/atom.c
30835--- linux-2.6.32.48/drivers/gpu/drm/radeon/atom.c 2011-11-08 19:02:43.000000000 -0500
30836+++ linux-2.6.32.48/drivers/gpu/drm/radeon/atom.c 2011-11-15 19:59:43.000000000 -0500
30837@@ -1115,6 +1115,8 @@ struct atom_context *atom_parse(struct c
30838 char name[512];
30839 int i;
30840
30841+ pax_track_stack();
30842+
30843 ctx->card = card;
30844 ctx->bios = bios;
30845
30846diff -urNp linux-2.6.32.48/drivers/gpu/drm/radeon/mkregtable.c linux-2.6.32.48/drivers/gpu/drm/radeon/mkregtable.c
30847--- linux-2.6.32.48/drivers/gpu/drm/radeon/mkregtable.c 2011-11-08 19:02:43.000000000 -0500
30848+++ linux-2.6.32.48/drivers/gpu/drm/radeon/mkregtable.c 2011-11-15 19:59:43.000000000 -0500
30849@@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
30850 regex_t mask_rex;
30851 regmatch_t match[4];
30852 char buf[1024];
30853- size_t end;
30854+ long end;
30855 int len;
30856 int done = 0;
30857 int r;
30858 unsigned o;
30859 struct offset *offset;
30860 char last_reg_s[10];
30861- int last_reg;
30862+ unsigned long last_reg;
30863
30864 if (regcomp
30865 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
30866diff -urNp linux-2.6.32.48/drivers/gpu/drm/radeon/radeon_atombios.c linux-2.6.32.48/drivers/gpu/drm/radeon/radeon_atombios.c
30867--- linux-2.6.32.48/drivers/gpu/drm/radeon/radeon_atombios.c 2011-11-08 19:02:43.000000000 -0500
30868+++ linux-2.6.32.48/drivers/gpu/drm/radeon/radeon_atombios.c 2011-11-15 19:59:43.000000000 -0500
30869@@ -275,6 +275,8 @@ bool radeon_get_atom_connector_info_from
30870 bool linkb;
30871 struct radeon_i2c_bus_rec ddc_bus;
30872
30873+ pax_track_stack();
30874+
30875 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
30876
30877 if (data_offset == 0)
30878@@ -520,13 +522,13 @@ static uint16_t atombios_get_connector_o
30879 }
30880 }
30881
30882-struct bios_connector {
30883+static struct bios_connector {
30884 bool valid;
30885 uint16_t line_mux;
30886 uint16_t devices;
30887 int connector_type;
30888 struct radeon_i2c_bus_rec ddc_bus;
30889-};
30890+} bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
30891
30892 bool radeon_get_atom_connector_info_from_supported_devices_table(struct
30893 drm_device
30894@@ -542,7 +544,6 @@ bool radeon_get_atom_connector_info_from
30895 uint8_t dac;
30896 union atom_supported_devices *supported_devices;
30897 int i, j;
30898- struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
30899
30900 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
30901
30902diff -urNp linux-2.6.32.48/drivers/gpu/drm/radeon/radeon_display.c linux-2.6.32.48/drivers/gpu/drm/radeon/radeon_display.c
30903--- linux-2.6.32.48/drivers/gpu/drm/radeon/radeon_display.c 2011-11-08 19:02:43.000000000 -0500
30904+++ linux-2.6.32.48/drivers/gpu/drm/radeon/radeon_display.c 2011-11-15 19:59:43.000000000 -0500
30905@@ -482,7 +482,7 @@ void radeon_compute_pll(struct radeon_pl
30906
30907 if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
30908 error = freq - current_freq;
30909- error = error < 0 ? 0xffffffff : error;
30910+ error = (int32_t)error < 0 ? 0xffffffff : error;
30911 } else
30912 error = abs(current_freq - freq);
30913 vco_diff = abs(vco - best_vco);
30914diff -urNp linux-2.6.32.48/drivers/gpu/drm/radeon/radeon_drv.h linux-2.6.32.48/drivers/gpu/drm/radeon/radeon_drv.h
30915--- linux-2.6.32.48/drivers/gpu/drm/radeon/radeon_drv.h 2011-11-08 19:02:43.000000000 -0500
30916+++ linux-2.6.32.48/drivers/gpu/drm/radeon/radeon_drv.h 2011-11-15 19:59:43.000000000 -0500
30917@@ -253,7 +253,7 @@ typedef struct drm_radeon_private {
30918
30919 /* SW interrupt */
30920 wait_queue_head_t swi_queue;
30921- atomic_t swi_emitted;
30922+ atomic_unchecked_t swi_emitted;
30923 int vblank_crtc;
30924 uint32_t irq_enable_reg;
30925 uint32_t r500_disp_irq_reg;
30926diff -urNp linux-2.6.32.48/drivers/gpu/drm/radeon/radeon_fence.c linux-2.6.32.48/drivers/gpu/drm/radeon/radeon_fence.c
30927--- linux-2.6.32.48/drivers/gpu/drm/radeon/radeon_fence.c 2011-11-08 19:02:43.000000000 -0500
30928+++ linux-2.6.32.48/drivers/gpu/drm/radeon/radeon_fence.c 2011-11-15 19:59:43.000000000 -0500
30929@@ -47,7 +47,7 @@ int radeon_fence_emit(struct radeon_devi
30930 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
30931 return 0;
30932 }
30933- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
30934+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
30935 if (!rdev->cp.ready) {
30936 /* FIXME: cp is not running assume everythings is done right
30937 * away
30938@@ -364,7 +364,7 @@ int radeon_fence_driver_init(struct rade
30939 return r;
30940 }
30941 WREG32(rdev->fence_drv.scratch_reg, 0);
30942- atomic_set(&rdev->fence_drv.seq, 0);
30943+ atomic_set_unchecked(&rdev->fence_drv.seq, 0);
30944 INIT_LIST_HEAD(&rdev->fence_drv.created);
30945 INIT_LIST_HEAD(&rdev->fence_drv.emited);
30946 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
30947diff -urNp linux-2.6.32.48/drivers/gpu/drm/radeon/radeon.h linux-2.6.32.48/drivers/gpu/drm/radeon/radeon.h
30948--- linux-2.6.32.48/drivers/gpu/drm/radeon/radeon.h 2011-11-08 19:02:43.000000000 -0500
30949+++ linux-2.6.32.48/drivers/gpu/drm/radeon/radeon.h 2011-11-15 19:59:43.000000000 -0500
30950@@ -149,7 +149,7 @@ int radeon_pm_init(struct radeon_device
30951 */
30952 struct radeon_fence_driver {
30953 uint32_t scratch_reg;
30954- atomic_t seq;
30955+ atomic_unchecked_t seq;
30956 uint32_t last_seq;
30957 unsigned long count_timeout;
30958 wait_queue_head_t queue;
30959@@ -640,7 +640,7 @@ struct radeon_asic {
30960 uint32_t offset, uint32_t obj_size);
30961 int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
30962 void (*bandwidth_update)(struct radeon_device *rdev);
30963-};
30964+} __no_const;
30965
30966 /*
30967 * Asic structures
30968diff -urNp linux-2.6.32.48/drivers/gpu/drm/radeon/radeon_ioc32.c linux-2.6.32.48/drivers/gpu/drm/radeon/radeon_ioc32.c
30969--- linux-2.6.32.48/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-11-08 19:02:43.000000000 -0500
30970+++ linux-2.6.32.48/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-11-15 19:59:43.000000000 -0500
30971@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(str
30972 request = compat_alloc_user_space(sizeof(*request));
30973 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
30974 || __put_user(req32.param, &request->param)
30975- || __put_user((void __user *)(unsigned long)req32.value,
30976+ || __put_user((unsigned long)req32.value,
30977 &request->value))
30978 return -EFAULT;
30979
30980diff -urNp linux-2.6.32.48/drivers/gpu/drm/radeon/radeon_irq.c linux-2.6.32.48/drivers/gpu/drm/radeon/radeon_irq.c
30981--- linux-2.6.32.48/drivers/gpu/drm/radeon/radeon_irq.c 2011-11-08 19:02:43.000000000 -0500
30982+++ linux-2.6.32.48/drivers/gpu/drm/radeon/radeon_irq.c 2011-11-15 19:59:43.000000000 -0500
30983@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
30984 unsigned int ret;
30985 RING_LOCALS;
30986
30987- atomic_inc(&dev_priv->swi_emitted);
30988- ret = atomic_read(&dev_priv->swi_emitted);
30989+ atomic_inc_unchecked(&dev_priv->swi_emitted);
30990+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
30991
30992 BEGIN_RING(4);
30993 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
30994@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
30995 drm_radeon_private_t *dev_priv =
30996 (drm_radeon_private_t *) dev->dev_private;
30997
30998- atomic_set(&dev_priv->swi_emitted, 0);
30999+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
31000 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
31001
31002 dev->max_vblank_count = 0x001fffff;
31003diff -urNp linux-2.6.32.48/drivers/gpu/drm/radeon/radeon_state.c linux-2.6.32.48/drivers/gpu/drm/radeon/radeon_state.c
31004--- linux-2.6.32.48/drivers/gpu/drm/radeon/radeon_state.c 2011-11-08 19:02:43.000000000 -0500
31005+++ linux-2.6.32.48/drivers/gpu/drm/radeon/radeon_state.c 2011-11-15 19:59:43.000000000 -0500
31006@@ -3021,7 +3021,7 @@ static int radeon_cp_getparam(struct drm
31007 {
31008 drm_radeon_private_t *dev_priv = dev->dev_private;
31009 drm_radeon_getparam_t *param = data;
31010- int value;
31011+ int value = 0;
31012
31013 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
31014
31015diff -urNp linux-2.6.32.48/drivers/gpu/drm/radeon/radeon_ttm.c linux-2.6.32.48/drivers/gpu/drm/radeon/radeon_ttm.c
31016--- linux-2.6.32.48/drivers/gpu/drm/radeon/radeon_ttm.c 2011-11-08 19:02:43.000000000 -0500
31017+++ linux-2.6.32.48/drivers/gpu/drm/radeon/radeon_ttm.c 2011-11-15 19:59:43.000000000 -0500
31018@@ -535,27 +535,10 @@ void radeon_ttm_fini(struct radeon_devic
31019 DRM_INFO("radeon: ttm finalized\n");
31020 }
31021
31022-static struct vm_operations_struct radeon_ttm_vm_ops;
31023-static const struct vm_operations_struct *ttm_vm_ops = NULL;
31024-
31025-static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
31026-{
31027- struct ttm_buffer_object *bo;
31028- int r;
31029-
31030- bo = (struct ttm_buffer_object *)vma->vm_private_data;
31031- if (bo == NULL) {
31032- return VM_FAULT_NOPAGE;
31033- }
31034- r = ttm_vm_ops->fault(vma, vmf);
31035- return r;
31036-}
31037-
31038 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
31039 {
31040 struct drm_file *file_priv;
31041 struct radeon_device *rdev;
31042- int r;
31043
31044 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
31045 return drm_mmap(filp, vma);
31046@@ -563,20 +546,9 @@ int radeon_mmap(struct file *filp, struc
31047
31048 file_priv = (struct drm_file *)filp->private_data;
31049 rdev = file_priv->minor->dev->dev_private;
31050- if (rdev == NULL) {
31051+ if (!rdev)
31052 return -EINVAL;
31053- }
31054- r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
31055- if (unlikely(r != 0)) {
31056- return r;
31057- }
31058- if (unlikely(ttm_vm_ops == NULL)) {
31059- ttm_vm_ops = vma->vm_ops;
31060- radeon_ttm_vm_ops = *ttm_vm_ops;
31061- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
31062- }
31063- vma->vm_ops = &radeon_ttm_vm_ops;
31064- return 0;
31065+ return ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
31066 }
31067
31068
31069diff -urNp linux-2.6.32.48/drivers/gpu/drm/radeon/rs690.c linux-2.6.32.48/drivers/gpu/drm/radeon/rs690.c
31070--- linux-2.6.32.48/drivers/gpu/drm/radeon/rs690.c 2011-11-08 19:02:43.000000000 -0500
31071+++ linux-2.6.32.48/drivers/gpu/drm/radeon/rs690.c 2011-11-15 19:59:43.000000000 -0500
31072@@ -302,9 +302,11 @@ void rs690_crtc_bandwidth_compute(struct
31073 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
31074 rdev->pm.sideport_bandwidth.full)
31075 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
31076- read_delay_latency.full = rfixed_const(370 * 800 * 1000);
31077+ read_delay_latency.full = rfixed_const(800 * 1000);
31078 read_delay_latency.full = rfixed_div(read_delay_latency,
31079 rdev->pm.igp_sideport_mclk);
31080+ a.full = rfixed_const(370);
31081+ read_delay_latency.full = rfixed_mul(read_delay_latency, a);
31082 } else {
31083 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
31084 rdev->pm.k8_bandwidth.full)
31085diff -urNp linux-2.6.32.48/drivers/gpu/drm/ttm/ttm_bo.c linux-2.6.32.48/drivers/gpu/drm/ttm/ttm_bo.c
31086--- linux-2.6.32.48/drivers/gpu/drm/ttm/ttm_bo.c 2011-11-08 19:02:43.000000000 -0500
31087+++ linux-2.6.32.48/drivers/gpu/drm/ttm/ttm_bo.c 2011-11-15 19:59:43.000000000 -0500
31088@@ -67,7 +67,7 @@ static struct attribute *ttm_bo_global_a
31089 NULL
31090 };
31091
31092-static struct sysfs_ops ttm_bo_global_ops = {
31093+static const struct sysfs_ops ttm_bo_global_ops = {
31094 .show = &ttm_bo_global_show
31095 };
31096
31097diff -urNp linux-2.6.32.48/drivers/gpu/drm/ttm/ttm_bo_vm.c linux-2.6.32.48/drivers/gpu/drm/ttm/ttm_bo_vm.c
31098--- linux-2.6.32.48/drivers/gpu/drm/ttm/ttm_bo_vm.c 2011-11-08 19:02:43.000000000 -0500
31099+++ linux-2.6.32.48/drivers/gpu/drm/ttm/ttm_bo_vm.c 2011-11-15 19:59:43.000000000 -0500
31100@@ -73,7 +73,7 @@ static int ttm_bo_vm_fault(struct vm_are
31101 {
31102 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
31103 vma->vm_private_data;
31104- struct ttm_bo_device *bdev = bo->bdev;
31105+ struct ttm_bo_device *bdev;
31106 unsigned long bus_base;
31107 unsigned long bus_offset;
31108 unsigned long bus_size;
31109@@ -88,6 +88,10 @@ static int ttm_bo_vm_fault(struct vm_are
31110 unsigned long address = (unsigned long)vmf->virtual_address;
31111 int retval = VM_FAULT_NOPAGE;
31112
31113+ if (!bo)
31114+ return VM_FAULT_NOPAGE;
31115+ bdev = bo->bdev;
31116+
31117 /*
31118 * Work around locking order reversal in fault / nopfn
31119 * between mmap_sem and bo_reserve: Perform a trylock operation
31120diff -urNp linux-2.6.32.48/drivers/gpu/drm/ttm/ttm_global.c linux-2.6.32.48/drivers/gpu/drm/ttm/ttm_global.c
31121--- linux-2.6.32.48/drivers/gpu/drm/ttm/ttm_global.c 2011-11-08 19:02:43.000000000 -0500
31122+++ linux-2.6.32.48/drivers/gpu/drm/ttm/ttm_global.c 2011-11-15 19:59:43.000000000 -0500
31123@@ -36,7 +36,7 @@
31124 struct ttm_global_item {
31125 struct mutex mutex;
31126 void *object;
31127- int refcount;
31128+ atomic_t refcount;
31129 };
31130
31131 static struct ttm_global_item glob[TTM_GLOBAL_NUM];
31132@@ -49,7 +49,7 @@ void ttm_global_init(void)
31133 struct ttm_global_item *item = &glob[i];
31134 mutex_init(&item->mutex);
31135 item->object = NULL;
31136- item->refcount = 0;
31137+ atomic_set(&item->refcount, 0);
31138 }
31139 }
31140
31141@@ -59,7 +59,7 @@ void ttm_global_release(void)
31142 for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
31143 struct ttm_global_item *item = &glob[i];
31144 BUG_ON(item->object != NULL);
31145- BUG_ON(item->refcount != 0);
31146+ BUG_ON(atomic_read(&item->refcount) != 0);
31147 }
31148 }
31149
31150@@ -70,7 +70,7 @@ int ttm_global_item_ref(struct ttm_globa
31151 void *object;
31152
31153 mutex_lock(&item->mutex);
31154- if (item->refcount == 0) {
31155+ if (atomic_read(&item->refcount) == 0) {
31156 item->object = kzalloc(ref->size, GFP_KERNEL);
31157 if (unlikely(item->object == NULL)) {
31158 ret = -ENOMEM;
31159@@ -83,7 +83,7 @@ int ttm_global_item_ref(struct ttm_globa
31160 goto out_err;
31161
31162 }
31163- ++item->refcount;
31164+ atomic_inc(&item->refcount);
31165 ref->object = item->object;
31166 object = item->object;
31167 mutex_unlock(&item->mutex);
31168@@ -100,9 +100,9 @@ void ttm_global_item_unref(struct ttm_gl
31169 struct ttm_global_item *item = &glob[ref->global_type];
31170
31171 mutex_lock(&item->mutex);
31172- BUG_ON(item->refcount == 0);
31173+ BUG_ON(atomic_read(&item->refcount) == 0);
31174 BUG_ON(ref->object != item->object);
31175- if (--item->refcount == 0) {
31176+ if (atomic_dec_and_test(&item->refcount)) {
31177 ref->release(ref);
31178 item->object = NULL;
31179 }
31180diff -urNp linux-2.6.32.48/drivers/gpu/drm/ttm/ttm_memory.c linux-2.6.32.48/drivers/gpu/drm/ttm/ttm_memory.c
31181--- linux-2.6.32.48/drivers/gpu/drm/ttm/ttm_memory.c 2011-11-08 19:02:43.000000000 -0500
31182+++ linux-2.6.32.48/drivers/gpu/drm/ttm/ttm_memory.c 2011-11-15 19:59:43.000000000 -0500
31183@@ -152,7 +152,7 @@ static struct attribute *ttm_mem_zone_at
31184 NULL
31185 };
31186
31187-static struct sysfs_ops ttm_mem_zone_ops = {
31188+static const struct sysfs_ops ttm_mem_zone_ops = {
31189 .show = &ttm_mem_zone_show,
31190 .store = &ttm_mem_zone_store
31191 };
31192diff -urNp linux-2.6.32.48/drivers/gpu/drm/via/via_drv.h linux-2.6.32.48/drivers/gpu/drm/via/via_drv.h
31193--- linux-2.6.32.48/drivers/gpu/drm/via/via_drv.h 2011-11-08 19:02:43.000000000 -0500
31194+++ linux-2.6.32.48/drivers/gpu/drm/via/via_drv.h 2011-11-15 19:59:43.000000000 -0500
31195@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
31196 typedef uint32_t maskarray_t[5];
31197
31198 typedef struct drm_via_irq {
31199- atomic_t irq_received;
31200+ atomic_unchecked_t irq_received;
31201 uint32_t pending_mask;
31202 uint32_t enable_mask;
31203 wait_queue_head_t irq_queue;
31204@@ -75,7 +75,7 @@ typedef struct drm_via_private {
31205 struct timeval last_vblank;
31206 int last_vblank_valid;
31207 unsigned usec_per_vblank;
31208- atomic_t vbl_received;
31209+ atomic_unchecked_t vbl_received;
31210 drm_via_state_t hc_state;
31211 char pci_buf[VIA_PCI_BUF_SIZE];
31212 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
31213diff -urNp linux-2.6.32.48/drivers/gpu/drm/via/via_irq.c linux-2.6.32.48/drivers/gpu/drm/via/via_irq.c
31214--- linux-2.6.32.48/drivers/gpu/drm/via/via_irq.c 2011-11-08 19:02:43.000000000 -0500
31215+++ linux-2.6.32.48/drivers/gpu/drm/via/via_irq.c 2011-11-15 19:59:43.000000000 -0500
31216@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
31217 if (crtc != 0)
31218 return 0;
31219
31220- return atomic_read(&dev_priv->vbl_received);
31221+ return atomic_read_unchecked(&dev_priv->vbl_received);
31222 }
31223
31224 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
31225@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
31226
31227 status = VIA_READ(VIA_REG_INTERRUPT);
31228 if (status & VIA_IRQ_VBLANK_PENDING) {
31229- atomic_inc(&dev_priv->vbl_received);
31230- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
31231+ atomic_inc_unchecked(&dev_priv->vbl_received);
31232+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
31233 do_gettimeofday(&cur_vblank);
31234 if (dev_priv->last_vblank_valid) {
31235 dev_priv->usec_per_vblank =
31236@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
31237 dev_priv->last_vblank = cur_vblank;
31238 dev_priv->last_vblank_valid = 1;
31239 }
31240- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
31241+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
31242 DRM_DEBUG("US per vblank is: %u\n",
31243 dev_priv->usec_per_vblank);
31244 }
31245@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
31246
31247 for (i = 0; i < dev_priv->num_irqs; ++i) {
31248 if (status & cur_irq->pending_mask) {
31249- atomic_inc(&cur_irq->irq_received);
31250+ atomic_inc_unchecked(&cur_irq->irq_received);
31251 DRM_WAKEUP(&cur_irq->irq_queue);
31252 handled = 1;
31253 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
31254@@ -244,11 +244,11 @@ via_driver_irq_wait(struct drm_device *
31255 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
31256 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
31257 masks[irq][4]));
31258- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
31259+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
31260 } else {
31261 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
31262 (((cur_irq_sequence =
31263- atomic_read(&cur_irq->irq_received)) -
31264+ atomic_read_unchecked(&cur_irq->irq_received)) -
31265 *sequence) <= (1 << 23)));
31266 }
31267 *sequence = cur_irq_sequence;
31268@@ -286,7 +286,7 @@ void via_driver_irq_preinstall(struct dr
31269 }
31270
31271 for (i = 0; i < dev_priv->num_irqs; ++i) {
31272- atomic_set(&cur_irq->irq_received, 0);
31273+ atomic_set_unchecked(&cur_irq->irq_received, 0);
31274 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
31275 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
31276 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
31277@@ -368,7 +368,7 @@ int via_wait_irq(struct drm_device *dev,
31278 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
31279 case VIA_IRQ_RELATIVE:
31280 irqwait->request.sequence +=
31281- atomic_read(&cur_irq->irq_received);
31282+ atomic_read_unchecked(&cur_irq->irq_received);
31283 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
31284 case VIA_IRQ_ABSOLUTE:
31285 break;
31286diff -urNp linux-2.6.32.48/drivers/hid/hid-core.c linux-2.6.32.48/drivers/hid/hid-core.c
31287--- linux-2.6.32.48/drivers/hid/hid-core.c 2011-11-08 19:02:43.000000000 -0500
31288+++ linux-2.6.32.48/drivers/hid/hid-core.c 2011-11-15 19:59:43.000000000 -0500
31289@@ -1752,7 +1752,7 @@ static bool hid_ignore(struct hid_device
31290
31291 int hid_add_device(struct hid_device *hdev)
31292 {
31293- static atomic_t id = ATOMIC_INIT(0);
31294+ static atomic_unchecked_t id = ATOMIC_INIT(0);
31295 int ret;
31296
31297 if (WARN_ON(hdev->status & HID_STAT_ADDED))
31298@@ -1766,7 +1766,7 @@ int hid_add_device(struct hid_device *hd
31299 /* XXX hack, any other cleaner solution after the driver core
31300 * is converted to allow more than 20 bytes as the device name? */
31301 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
31302- hdev->vendor, hdev->product, atomic_inc_return(&id));
31303+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
31304
31305 ret = device_add(&hdev->dev);
31306 if (!ret)
31307diff -urNp linux-2.6.32.48/drivers/hid/usbhid/hiddev.c linux-2.6.32.48/drivers/hid/usbhid/hiddev.c
31308--- linux-2.6.32.48/drivers/hid/usbhid/hiddev.c 2011-11-08 19:02:43.000000000 -0500
31309+++ linux-2.6.32.48/drivers/hid/usbhid/hiddev.c 2011-11-15 19:59:43.000000000 -0500
31310@@ -617,7 +617,7 @@ static long hiddev_ioctl(struct file *fi
31311 return put_user(HID_VERSION, (int __user *)arg);
31312
31313 case HIDIOCAPPLICATION:
31314- if (arg < 0 || arg >= hid->maxapplication)
31315+ if (arg >= hid->maxapplication)
31316 return -EINVAL;
31317
31318 for (i = 0; i < hid->maxcollection; i++)
31319diff -urNp linux-2.6.32.48/drivers/hwmon/lis3lv02d.c linux-2.6.32.48/drivers/hwmon/lis3lv02d.c
31320--- linux-2.6.32.48/drivers/hwmon/lis3lv02d.c 2011-11-08 19:02:43.000000000 -0500
31321+++ linux-2.6.32.48/drivers/hwmon/lis3lv02d.c 2011-11-15 19:59:43.000000000 -0500
31322@@ -146,7 +146,7 @@ static irqreturn_t lis302dl_interrupt(in
31323 * the lid is closed. This leads to interrupts as soon as a little move
31324 * is done.
31325 */
31326- atomic_inc(&lis3_dev.count);
31327+ atomic_inc_unchecked(&lis3_dev.count);
31328
31329 wake_up_interruptible(&lis3_dev.misc_wait);
31330 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
31331@@ -160,7 +160,7 @@ static int lis3lv02d_misc_open(struct in
31332 if (test_and_set_bit(0, &lis3_dev.misc_opened))
31333 return -EBUSY; /* already open */
31334
31335- atomic_set(&lis3_dev.count, 0);
31336+ atomic_set_unchecked(&lis3_dev.count, 0);
31337
31338 /*
31339 * The sensor can generate interrupts for free-fall and direction
31340@@ -206,7 +206,7 @@ static ssize_t lis3lv02d_misc_read(struc
31341 add_wait_queue(&lis3_dev.misc_wait, &wait);
31342 while (true) {
31343 set_current_state(TASK_INTERRUPTIBLE);
31344- data = atomic_xchg(&lis3_dev.count, 0);
31345+ data = atomic_xchg_unchecked(&lis3_dev.count, 0);
31346 if (data)
31347 break;
31348
31349@@ -244,7 +244,7 @@ out:
31350 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
31351 {
31352 poll_wait(file, &lis3_dev.misc_wait, wait);
31353- if (atomic_read(&lis3_dev.count))
31354+ if (atomic_read_unchecked(&lis3_dev.count))
31355 return POLLIN | POLLRDNORM;
31356 return 0;
31357 }
31358diff -urNp linux-2.6.32.48/drivers/hwmon/lis3lv02d.h linux-2.6.32.48/drivers/hwmon/lis3lv02d.h
31359--- linux-2.6.32.48/drivers/hwmon/lis3lv02d.h 2011-11-08 19:02:43.000000000 -0500
31360+++ linux-2.6.32.48/drivers/hwmon/lis3lv02d.h 2011-11-15 19:59:43.000000000 -0500
31361@@ -201,7 +201,7 @@ struct lis3lv02d {
31362
31363 struct input_polled_dev *idev; /* input device */
31364 struct platform_device *pdev; /* platform device */
31365- atomic_t count; /* interrupt count after last read */
31366+ atomic_unchecked_t count; /* interrupt count after last read */
31367 int xcalib; /* calibrated null value for x */
31368 int ycalib; /* calibrated null value for y */
31369 int zcalib; /* calibrated null value for z */
31370diff -urNp linux-2.6.32.48/drivers/hwmon/sht15.c linux-2.6.32.48/drivers/hwmon/sht15.c
31371--- linux-2.6.32.48/drivers/hwmon/sht15.c 2011-11-08 19:02:43.000000000 -0500
31372+++ linux-2.6.32.48/drivers/hwmon/sht15.c 2011-11-15 19:59:43.000000000 -0500
31373@@ -112,7 +112,7 @@ struct sht15_data {
31374 int supply_uV;
31375 int supply_uV_valid;
31376 struct work_struct update_supply_work;
31377- atomic_t interrupt_handled;
31378+ atomic_unchecked_t interrupt_handled;
31379 };
31380
31381 /**
31382@@ -245,13 +245,13 @@ static inline int sht15_update_single_va
31383 return ret;
31384
31385 gpio_direction_input(data->pdata->gpio_data);
31386- atomic_set(&data->interrupt_handled, 0);
31387+ atomic_set_unchecked(&data->interrupt_handled, 0);
31388
31389 enable_irq(gpio_to_irq(data->pdata->gpio_data));
31390 if (gpio_get_value(data->pdata->gpio_data) == 0) {
31391 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
31392 /* Only relevant if the interrupt hasn't occured. */
31393- if (!atomic_read(&data->interrupt_handled))
31394+ if (!atomic_read_unchecked(&data->interrupt_handled))
31395 schedule_work(&data->read_work);
31396 }
31397 ret = wait_event_timeout(data->wait_queue,
31398@@ -398,7 +398,7 @@ static irqreturn_t sht15_interrupt_fired
31399 struct sht15_data *data = d;
31400 /* First disable the interrupt */
31401 disable_irq_nosync(irq);
31402- atomic_inc(&data->interrupt_handled);
31403+ atomic_inc_unchecked(&data->interrupt_handled);
31404 /* Then schedule a reading work struct */
31405 if (data->flag != SHT15_READING_NOTHING)
31406 schedule_work(&data->read_work);
31407@@ -449,11 +449,11 @@ static void sht15_bh_read_data(struct wo
31408 here as could have gone low in meantime so verify
31409 it hasn't!
31410 */
31411- atomic_set(&data->interrupt_handled, 0);
31412+ atomic_set_unchecked(&data->interrupt_handled, 0);
31413 enable_irq(gpio_to_irq(data->pdata->gpio_data));
31414 /* If still not occured or another handler has been scheduled */
31415 if (gpio_get_value(data->pdata->gpio_data)
31416- || atomic_read(&data->interrupt_handled))
31417+ || atomic_read_unchecked(&data->interrupt_handled))
31418 return;
31419 }
31420 /* Read the data back from the device */
31421diff -urNp linux-2.6.32.48/drivers/hwmon/w83791d.c linux-2.6.32.48/drivers/hwmon/w83791d.c
31422--- linux-2.6.32.48/drivers/hwmon/w83791d.c 2011-11-08 19:02:43.000000000 -0500
31423+++ linux-2.6.32.48/drivers/hwmon/w83791d.c 2011-11-15 19:59:43.000000000 -0500
31424@@ -330,8 +330,8 @@ static int w83791d_detect(struct i2c_cli
31425 struct i2c_board_info *info);
31426 static int w83791d_remove(struct i2c_client *client);
31427
31428-static int w83791d_read(struct i2c_client *client, u8 register);
31429-static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
31430+static int w83791d_read(struct i2c_client *client, u8 reg);
31431+static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
31432 static struct w83791d_data *w83791d_update_device(struct device *dev);
31433
31434 #ifdef DEBUG
31435diff -urNp linux-2.6.32.48/drivers/i2c/busses/i2c-amd756-s4882.c linux-2.6.32.48/drivers/i2c/busses/i2c-amd756-s4882.c
31436--- linux-2.6.32.48/drivers/i2c/busses/i2c-amd756-s4882.c 2011-11-08 19:02:43.000000000 -0500
31437+++ linux-2.6.32.48/drivers/i2c/busses/i2c-amd756-s4882.c 2011-11-15 19:59:43.000000000 -0500
31438@@ -43,7 +43,7 @@
31439 extern struct i2c_adapter amd756_smbus;
31440
31441 static struct i2c_adapter *s4882_adapter;
31442-static struct i2c_algorithm *s4882_algo;
31443+static i2c_algorithm_no_const *s4882_algo;
31444
31445 /* Wrapper access functions for multiplexed SMBus */
31446 static DEFINE_MUTEX(amd756_lock);
31447diff -urNp linux-2.6.32.48/drivers/i2c/busses/i2c-nforce2-s4985.c linux-2.6.32.48/drivers/i2c/busses/i2c-nforce2-s4985.c
31448--- linux-2.6.32.48/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-11-08 19:02:43.000000000 -0500
31449+++ linux-2.6.32.48/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-11-15 19:59:43.000000000 -0500
31450@@ -41,7 +41,7 @@
31451 extern struct i2c_adapter *nforce2_smbus;
31452
31453 static struct i2c_adapter *s4985_adapter;
31454-static struct i2c_algorithm *s4985_algo;
31455+static i2c_algorithm_no_const *s4985_algo;
31456
31457 /* Wrapper access functions for multiplexed SMBus */
31458 static DEFINE_MUTEX(nforce2_lock);
31459diff -urNp linux-2.6.32.48/drivers/ide/ide-cd.c linux-2.6.32.48/drivers/ide/ide-cd.c
31460--- linux-2.6.32.48/drivers/ide/ide-cd.c 2011-11-08 19:02:43.000000000 -0500
31461+++ linux-2.6.32.48/drivers/ide/ide-cd.c 2011-11-15 19:59:43.000000000 -0500
31462@@ -774,7 +774,7 @@ static void cdrom_do_block_pc(ide_drive_
31463 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
31464 if ((unsigned long)buf & alignment
31465 || blk_rq_bytes(rq) & q->dma_pad_mask
31466- || object_is_on_stack(buf))
31467+ || object_starts_on_stack(buf))
31468 drive->dma = 0;
31469 }
31470 }
31471diff -urNp linux-2.6.32.48/drivers/ide/ide-floppy.c linux-2.6.32.48/drivers/ide/ide-floppy.c
31472--- linux-2.6.32.48/drivers/ide/ide-floppy.c 2011-11-08 19:02:43.000000000 -0500
31473+++ linux-2.6.32.48/drivers/ide/ide-floppy.c 2011-11-15 19:59:43.000000000 -0500
31474@@ -373,6 +373,8 @@ static int ide_floppy_get_capacity(ide_d
31475 u8 pc_buf[256], header_len, desc_cnt;
31476 int i, rc = 1, blocks, length;
31477
31478+ pax_track_stack();
31479+
31480 ide_debug_log(IDE_DBG_FUNC, "enter");
31481
31482 drive->bios_cyl = 0;
31483diff -urNp linux-2.6.32.48/drivers/ide/setup-pci.c linux-2.6.32.48/drivers/ide/setup-pci.c
31484--- linux-2.6.32.48/drivers/ide/setup-pci.c 2011-11-08 19:02:43.000000000 -0500
31485+++ linux-2.6.32.48/drivers/ide/setup-pci.c 2011-11-15 19:59:43.000000000 -0500
31486@@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
31487 int ret, i, n_ports = dev2 ? 4 : 2;
31488 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
31489
31490+ pax_track_stack();
31491+
31492 for (i = 0; i < n_ports / 2; i++) {
31493 ret = ide_setup_pci_controller(pdev[i], d, !i);
31494 if (ret < 0)
31495diff -urNp linux-2.6.32.48/drivers/ieee1394/dv1394.c linux-2.6.32.48/drivers/ieee1394/dv1394.c
31496--- linux-2.6.32.48/drivers/ieee1394/dv1394.c 2011-11-08 19:02:43.000000000 -0500
31497+++ linux-2.6.32.48/drivers/ieee1394/dv1394.c 2011-11-15 19:59:43.000000000 -0500
31498@@ -739,7 +739,7 @@ static void frame_prepare(struct video_c
31499 based upon DIF section and sequence
31500 */
31501
31502-static void inline
31503+static inline void
31504 frame_put_packet (struct frame *f, struct packet *p)
31505 {
31506 int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */
31507diff -urNp linux-2.6.32.48/drivers/ieee1394/hosts.c linux-2.6.32.48/drivers/ieee1394/hosts.c
31508--- linux-2.6.32.48/drivers/ieee1394/hosts.c 2011-11-08 19:02:43.000000000 -0500
31509+++ linux-2.6.32.48/drivers/ieee1394/hosts.c 2011-11-15 19:59:43.000000000 -0500
31510@@ -78,6 +78,7 @@ static int dummy_isoctl(struct hpsb_iso
31511 }
31512
31513 static struct hpsb_host_driver dummy_driver = {
31514+ .name = "dummy",
31515 .transmit_packet = dummy_transmit_packet,
31516 .devctl = dummy_devctl,
31517 .isoctl = dummy_isoctl
31518diff -urNp linux-2.6.32.48/drivers/ieee1394/init_ohci1394_dma.c linux-2.6.32.48/drivers/ieee1394/init_ohci1394_dma.c
31519--- linux-2.6.32.48/drivers/ieee1394/init_ohci1394_dma.c 2011-11-08 19:02:43.000000000 -0500
31520+++ linux-2.6.32.48/drivers/ieee1394/init_ohci1394_dma.c 2011-11-15 19:59:43.000000000 -0500
31521@@ -257,7 +257,7 @@ void __init init_ohci1394_dma_on_all_con
31522 for (func = 0; func < 8; func++) {
31523 u32 class = read_pci_config(num,slot,func,
31524 PCI_CLASS_REVISION);
31525- if ((class == 0xffffffff))
31526+ if (class == 0xffffffff)
31527 continue; /* No device at this func */
31528
31529 if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI)
31530diff -urNp linux-2.6.32.48/drivers/ieee1394/ohci1394.c linux-2.6.32.48/drivers/ieee1394/ohci1394.c
31531--- linux-2.6.32.48/drivers/ieee1394/ohci1394.c 2011-11-08 19:02:43.000000000 -0500
31532+++ linux-2.6.32.48/drivers/ieee1394/ohci1394.c 2011-11-15 19:59:43.000000000 -0500
31533@@ -147,9 +147,9 @@ printk(level "%s: " fmt "\n" , OHCI1394_
31534 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
31535
31536 /* Module Parameters */
31537-static int phys_dma = 1;
31538+static int phys_dma;
31539 module_param(phys_dma, int, 0444);
31540-MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
31541+MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 0).");
31542
31543 static void dma_trm_tasklet(unsigned long data);
31544 static void dma_trm_reset(struct dma_trm_ctx *d);
31545diff -urNp linux-2.6.32.48/drivers/ieee1394/sbp2.c linux-2.6.32.48/drivers/ieee1394/sbp2.c
31546--- linux-2.6.32.48/drivers/ieee1394/sbp2.c 2011-11-08 19:02:43.000000000 -0500
31547+++ linux-2.6.32.48/drivers/ieee1394/sbp2.c 2011-11-15 19:59:43.000000000 -0500
31548@@ -2111,7 +2111,7 @@ MODULE_DESCRIPTION("IEEE-1394 SBP-2 prot
31549 MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
31550 MODULE_LICENSE("GPL");
31551
31552-static int sbp2_module_init(void)
31553+static int __init sbp2_module_init(void)
31554 {
31555 int ret;
31556
31557diff -urNp linux-2.6.32.48/drivers/infiniband/core/cm.c linux-2.6.32.48/drivers/infiniband/core/cm.c
31558--- linux-2.6.32.48/drivers/infiniband/core/cm.c 2011-11-08 19:02:43.000000000 -0500
31559+++ linux-2.6.32.48/drivers/infiniband/core/cm.c 2011-11-15 19:59:43.000000000 -0500
31560@@ -112,7 +112,7 @@ static char const counter_group_names[CM
31561
31562 struct cm_counter_group {
31563 struct kobject obj;
31564- atomic_long_t counter[CM_ATTR_COUNT];
31565+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
31566 };
31567
31568 struct cm_counter_attribute {
31569@@ -1386,7 +1386,7 @@ static void cm_dup_req_handler(struct cm
31570 struct ib_mad_send_buf *msg = NULL;
31571 int ret;
31572
31573- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31574+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31575 counter[CM_REQ_COUNTER]);
31576
31577 /* Quick state check to discard duplicate REQs. */
31578@@ -1764,7 +1764,7 @@ static void cm_dup_rep_handler(struct cm
31579 if (!cm_id_priv)
31580 return;
31581
31582- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31583+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31584 counter[CM_REP_COUNTER]);
31585 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
31586 if (ret)
31587@@ -1931,7 +1931,7 @@ static int cm_rtu_handler(struct cm_work
31588 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
31589 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
31590 spin_unlock_irq(&cm_id_priv->lock);
31591- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31592+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31593 counter[CM_RTU_COUNTER]);
31594 goto out;
31595 }
31596@@ -2110,7 +2110,7 @@ static int cm_dreq_handler(struct cm_wor
31597 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
31598 dreq_msg->local_comm_id);
31599 if (!cm_id_priv) {
31600- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31601+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31602 counter[CM_DREQ_COUNTER]);
31603 cm_issue_drep(work->port, work->mad_recv_wc);
31604 return -EINVAL;
31605@@ -2131,7 +2131,7 @@ static int cm_dreq_handler(struct cm_wor
31606 case IB_CM_MRA_REP_RCVD:
31607 break;
31608 case IB_CM_TIMEWAIT:
31609- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31610+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31611 counter[CM_DREQ_COUNTER]);
31612 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
31613 goto unlock;
31614@@ -2145,7 +2145,7 @@ static int cm_dreq_handler(struct cm_wor
31615 cm_free_msg(msg);
31616 goto deref;
31617 case IB_CM_DREQ_RCVD:
31618- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31619+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31620 counter[CM_DREQ_COUNTER]);
31621 goto unlock;
31622 default:
31623@@ -2501,7 +2501,7 @@ static int cm_mra_handler(struct cm_work
31624 ib_modify_mad(cm_id_priv->av.port->mad_agent,
31625 cm_id_priv->msg, timeout)) {
31626 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
31627- atomic_long_inc(&work->port->
31628+ atomic_long_inc_unchecked(&work->port->
31629 counter_group[CM_RECV_DUPLICATES].
31630 counter[CM_MRA_COUNTER]);
31631 goto out;
31632@@ -2510,7 +2510,7 @@ static int cm_mra_handler(struct cm_work
31633 break;
31634 case IB_CM_MRA_REQ_RCVD:
31635 case IB_CM_MRA_REP_RCVD:
31636- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31637+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31638 counter[CM_MRA_COUNTER]);
31639 /* fall through */
31640 default:
31641@@ -2672,7 +2672,7 @@ static int cm_lap_handler(struct cm_work
31642 case IB_CM_LAP_IDLE:
31643 break;
31644 case IB_CM_MRA_LAP_SENT:
31645- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31646+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31647 counter[CM_LAP_COUNTER]);
31648 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
31649 goto unlock;
31650@@ -2688,7 +2688,7 @@ static int cm_lap_handler(struct cm_work
31651 cm_free_msg(msg);
31652 goto deref;
31653 case IB_CM_LAP_RCVD:
31654- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31655+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31656 counter[CM_LAP_COUNTER]);
31657 goto unlock;
31658 default:
31659@@ -2972,7 +2972,7 @@ static int cm_sidr_req_handler(struct cm
31660 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
31661 if (cur_cm_id_priv) {
31662 spin_unlock_irq(&cm.lock);
31663- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31664+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31665 counter[CM_SIDR_REQ_COUNTER]);
31666 goto out; /* Duplicate message. */
31667 }
31668@@ -3184,10 +3184,10 @@ static void cm_send_handler(struct ib_ma
31669 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
31670 msg->retries = 1;
31671
31672- atomic_long_add(1 + msg->retries,
31673+ atomic_long_add_unchecked(1 + msg->retries,
31674 &port->counter_group[CM_XMIT].counter[attr_index]);
31675 if (msg->retries)
31676- atomic_long_add(msg->retries,
31677+ atomic_long_add_unchecked(msg->retries,
31678 &port->counter_group[CM_XMIT_RETRIES].
31679 counter[attr_index]);
31680
31681@@ -3397,7 +3397,7 @@ static void cm_recv_handler(struct ib_ma
31682 }
31683
31684 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
31685- atomic_long_inc(&port->counter_group[CM_RECV].
31686+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
31687 counter[attr_id - CM_ATTR_ID_OFFSET]);
31688
31689 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
31690@@ -3595,10 +3595,10 @@ static ssize_t cm_show_counter(struct ko
31691 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
31692
31693 return sprintf(buf, "%ld\n",
31694- atomic_long_read(&group->counter[cm_attr->index]));
31695+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
31696 }
31697
31698-static struct sysfs_ops cm_counter_ops = {
31699+static const struct sysfs_ops cm_counter_ops = {
31700 .show = cm_show_counter
31701 };
31702
31703diff -urNp linux-2.6.32.48/drivers/infiniband/core/fmr_pool.c linux-2.6.32.48/drivers/infiniband/core/fmr_pool.c
31704--- linux-2.6.32.48/drivers/infiniband/core/fmr_pool.c 2011-11-08 19:02:43.000000000 -0500
31705+++ linux-2.6.32.48/drivers/infiniband/core/fmr_pool.c 2011-11-15 19:59:43.000000000 -0500
31706@@ -97,8 +97,8 @@ struct ib_fmr_pool {
31707
31708 struct task_struct *thread;
31709
31710- atomic_t req_ser;
31711- atomic_t flush_ser;
31712+ atomic_unchecked_t req_ser;
31713+ atomic_unchecked_t flush_ser;
31714
31715 wait_queue_head_t force_wait;
31716 };
31717@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
31718 struct ib_fmr_pool *pool = pool_ptr;
31719
31720 do {
31721- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
31722+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
31723 ib_fmr_batch_release(pool);
31724
31725- atomic_inc(&pool->flush_ser);
31726+ atomic_inc_unchecked(&pool->flush_ser);
31727 wake_up_interruptible(&pool->force_wait);
31728
31729 if (pool->flush_function)
31730@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
31731 }
31732
31733 set_current_state(TASK_INTERRUPTIBLE);
31734- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
31735+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
31736 !kthread_should_stop())
31737 schedule();
31738 __set_current_state(TASK_RUNNING);
31739@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
31740 pool->dirty_watermark = params->dirty_watermark;
31741 pool->dirty_len = 0;
31742 spin_lock_init(&pool->pool_lock);
31743- atomic_set(&pool->req_ser, 0);
31744- atomic_set(&pool->flush_ser, 0);
31745+ atomic_set_unchecked(&pool->req_ser, 0);
31746+ atomic_set_unchecked(&pool->flush_ser, 0);
31747 init_waitqueue_head(&pool->force_wait);
31748
31749 pool->thread = kthread_run(ib_fmr_cleanup_thread,
31750@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
31751 }
31752 spin_unlock_irq(&pool->pool_lock);
31753
31754- serial = atomic_inc_return(&pool->req_ser);
31755+ serial = atomic_inc_return_unchecked(&pool->req_ser);
31756 wake_up_process(pool->thread);
31757
31758 if (wait_event_interruptible(pool->force_wait,
31759- atomic_read(&pool->flush_ser) - serial >= 0))
31760+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
31761 return -EINTR;
31762
31763 return 0;
31764@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
31765 } else {
31766 list_add_tail(&fmr->list, &pool->dirty_list);
31767 if (++pool->dirty_len >= pool->dirty_watermark) {
31768- atomic_inc(&pool->req_ser);
31769+ atomic_inc_unchecked(&pool->req_ser);
31770 wake_up_process(pool->thread);
31771 }
31772 }
31773diff -urNp linux-2.6.32.48/drivers/infiniband/core/sysfs.c linux-2.6.32.48/drivers/infiniband/core/sysfs.c
31774--- linux-2.6.32.48/drivers/infiniband/core/sysfs.c 2011-11-08 19:02:43.000000000 -0500
31775+++ linux-2.6.32.48/drivers/infiniband/core/sysfs.c 2011-11-15 19:59:43.000000000 -0500
31776@@ -79,7 +79,7 @@ static ssize_t port_attr_show(struct kob
31777 return port_attr->show(p, port_attr, buf);
31778 }
31779
31780-static struct sysfs_ops port_sysfs_ops = {
31781+static const struct sysfs_ops port_sysfs_ops = {
31782 .show = port_attr_show
31783 };
31784
31785diff -urNp linux-2.6.32.48/drivers/infiniband/core/uverbs_marshall.c linux-2.6.32.48/drivers/infiniband/core/uverbs_marshall.c
31786--- linux-2.6.32.48/drivers/infiniband/core/uverbs_marshall.c 2011-11-08 19:02:43.000000000 -0500
31787+++ linux-2.6.32.48/drivers/infiniband/core/uverbs_marshall.c 2011-11-15 19:59:43.000000000 -0500
31788@@ -40,18 +40,21 @@ void ib_copy_ah_attr_to_user(struct ib_u
31789 dst->grh.sgid_index = src->grh.sgid_index;
31790 dst->grh.hop_limit = src->grh.hop_limit;
31791 dst->grh.traffic_class = src->grh.traffic_class;
31792+ memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
31793 dst->dlid = src->dlid;
31794 dst->sl = src->sl;
31795 dst->src_path_bits = src->src_path_bits;
31796 dst->static_rate = src->static_rate;
31797 dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0;
31798 dst->port_num = src->port_num;
31799+ dst->reserved = 0;
31800 }
31801 EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
31802
31803 void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
31804 struct ib_qp_attr *src)
31805 {
31806+ dst->qp_state = src->qp_state;
31807 dst->cur_qp_state = src->cur_qp_state;
31808 dst->path_mtu = src->path_mtu;
31809 dst->path_mig_state = src->path_mig_state;
31810@@ -83,6 +86,7 @@ void ib_copy_qp_attr_to_user(struct ib_u
31811 dst->rnr_retry = src->rnr_retry;
31812 dst->alt_port_num = src->alt_port_num;
31813 dst->alt_timeout = src->alt_timeout;
31814+ memset(dst->reserved, 0, sizeof(dst->reserved));
31815 }
31816 EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
31817
31818diff -urNp linux-2.6.32.48/drivers/infiniband/hw/ipath/ipath_fs.c linux-2.6.32.48/drivers/infiniband/hw/ipath/ipath_fs.c
31819--- linux-2.6.32.48/drivers/infiniband/hw/ipath/ipath_fs.c 2011-11-08 19:02:43.000000000 -0500
31820+++ linux-2.6.32.48/drivers/infiniband/hw/ipath/ipath_fs.c 2011-11-15 19:59:43.000000000 -0500
31821@@ -110,6 +110,8 @@ static ssize_t atomic_counters_read(stru
31822 struct infinipath_counters counters;
31823 struct ipath_devdata *dd;
31824
31825+ pax_track_stack();
31826+
31827 dd = file->f_path.dentry->d_inode->i_private;
31828 dd->ipath_f_read_counters(dd, &counters);
31829
31830diff -urNp linux-2.6.32.48/drivers/infiniband/hw/nes/nes.c linux-2.6.32.48/drivers/infiniband/hw/nes/nes.c
31831--- linux-2.6.32.48/drivers/infiniband/hw/nes/nes.c 2011-11-08 19:02:43.000000000 -0500
31832+++ linux-2.6.32.48/drivers/infiniband/hw/nes/nes.c 2011-11-15 19:59:43.000000000 -0500
31833@@ -102,7 +102,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
31834 LIST_HEAD(nes_adapter_list);
31835 static LIST_HEAD(nes_dev_list);
31836
31837-atomic_t qps_destroyed;
31838+atomic_unchecked_t qps_destroyed;
31839
31840 static unsigned int ee_flsh_adapter;
31841 static unsigned int sysfs_nonidx_addr;
31842@@ -259,7 +259,7 @@ static void nes_cqp_rem_ref_callback(str
31843 struct nes_adapter *nesadapter = nesdev->nesadapter;
31844 u32 qp_id;
31845
31846- atomic_inc(&qps_destroyed);
31847+ atomic_inc_unchecked(&qps_destroyed);
31848
31849 /* Free the control structures */
31850
31851diff -urNp linux-2.6.32.48/drivers/infiniband/hw/nes/nes_cm.c linux-2.6.32.48/drivers/infiniband/hw/nes/nes_cm.c
31852--- linux-2.6.32.48/drivers/infiniband/hw/nes/nes_cm.c 2011-11-08 19:02:43.000000000 -0500
31853+++ linux-2.6.32.48/drivers/infiniband/hw/nes/nes_cm.c 2011-11-15 19:59:43.000000000 -0500
31854@@ -69,11 +69,11 @@ u32 cm_packets_received;
31855 u32 cm_listens_created;
31856 u32 cm_listens_destroyed;
31857 u32 cm_backlog_drops;
31858-atomic_t cm_loopbacks;
31859-atomic_t cm_nodes_created;
31860-atomic_t cm_nodes_destroyed;
31861-atomic_t cm_accel_dropped_pkts;
31862-atomic_t cm_resets_recvd;
31863+atomic_unchecked_t cm_loopbacks;
31864+atomic_unchecked_t cm_nodes_created;
31865+atomic_unchecked_t cm_nodes_destroyed;
31866+atomic_unchecked_t cm_accel_dropped_pkts;
31867+atomic_unchecked_t cm_resets_recvd;
31868
31869 static inline int mini_cm_accelerated(struct nes_cm_core *,
31870 struct nes_cm_node *);
31871@@ -149,13 +149,13 @@ static struct nes_cm_ops nes_cm_api = {
31872
31873 static struct nes_cm_core *g_cm_core;
31874
31875-atomic_t cm_connects;
31876-atomic_t cm_accepts;
31877-atomic_t cm_disconnects;
31878-atomic_t cm_closes;
31879-atomic_t cm_connecteds;
31880-atomic_t cm_connect_reqs;
31881-atomic_t cm_rejects;
31882+atomic_unchecked_t cm_connects;
31883+atomic_unchecked_t cm_accepts;
31884+atomic_unchecked_t cm_disconnects;
31885+atomic_unchecked_t cm_closes;
31886+atomic_unchecked_t cm_connecteds;
31887+atomic_unchecked_t cm_connect_reqs;
31888+atomic_unchecked_t cm_rejects;
31889
31890
31891 /**
31892@@ -1195,7 +1195,7 @@ static struct nes_cm_node *make_cm_node(
31893 cm_node->rem_mac);
31894
31895 add_hte_node(cm_core, cm_node);
31896- atomic_inc(&cm_nodes_created);
31897+ atomic_inc_unchecked(&cm_nodes_created);
31898
31899 return cm_node;
31900 }
31901@@ -1253,7 +1253,7 @@ static int rem_ref_cm_node(struct nes_cm
31902 }
31903
31904 atomic_dec(&cm_core->node_cnt);
31905- atomic_inc(&cm_nodes_destroyed);
31906+ atomic_inc_unchecked(&cm_nodes_destroyed);
31907 nesqp = cm_node->nesqp;
31908 if (nesqp) {
31909 nesqp->cm_node = NULL;
31910@@ -1320,7 +1320,7 @@ static int process_options(struct nes_cm
31911
31912 static void drop_packet(struct sk_buff *skb)
31913 {
31914- atomic_inc(&cm_accel_dropped_pkts);
31915+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
31916 dev_kfree_skb_any(skb);
31917 }
31918
31919@@ -1377,7 +1377,7 @@ static void handle_rst_pkt(struct nes_cm
31920
31921 int reset = 0; /* whether to send reset in case of err.. */
31922 int passive_state;
31923- atomic_inc(&cm_resets_recvd);
31924+ atomic_inc_unchecked(&cm_resets_recvd);
31925 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
31926 " refcnt=%d\n", cm_node, cm_node->state,
31927 atomic_read(&cm_node->ref_count));
31928@@ -2000,7 +2000,7 @@ static struct nes_cm_node *mini_cm_conne
31929 rem_ref_cm_node(cm_node->cm_core, cm_node);
31930 return NULL;
31931 }
31932- atomic_inc(&cm_loopbacks);
31933+ atomic_inc_unchecked(&cm_loopbacks);
31934 loopbackremotenode->loopbackpartner = cm_node;
31935 loopbackremotenode->tcp_cntxt.rcv_wscale =
31936 NES_CM_DEFAULT_RCV_WND_SCALE;
31937@@ -2262,7 +2262,7 @@ static int mini_cm_recv_pkt(struct nes_c
31938 add_ref_cm_node(cm_node);
31939 } else if (cm_node->state == NES_CM_STATE_TSA) {
31940 rem_ref_cm_node(cm_core, cm_node);
31941- atomic_inc(&cm_accel_dropped_pkts);
31942+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
31943 dev_kfree_skb_any(skb);
31944 break;
31945 }
31946@@ -2568,7 +2568,7 @@ static int nes_cm_disconn_true(struct ne
31947
31948 if ((cm_id) && (cm_id->event_handler)) {
31949 if (issue_disconn) {
31950- atomic_inc(&cm_disconnects);
31951+ atomic_inc_unchecked(&cm_disconnects);
31952 cm_event.event = IW_CM_EVENT_DISCONNECT;
31953 cm_event.status = disconn_status;
31954 cm_event.local_addr = cm_id->local_addr;
31955@@ -2590,7 +2590,7 @@ static int nes_cm_disconn_true(struct ne
31956 }
31957
31958 if (issue_close) {
31959- atomic_inc(&cm_closes);
31960+ atomic_inc_unchecked(&cm_closes);
31961 nes_disconnect(nesqp, 1);
31962
31963 cm_id->provider_data = nesqp;
31964@@ -2710,7 +2710,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
31965
31966 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
31967 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
31968- atomic_inc(&cm_accepts);
31969+ atomic_inc_unchecked(&cm_accepts);
31970
31971 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
31972 atomic_read(&nesvnic->netdev->refcnt));
31973@@ -2919,7 +2919,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
31974
31975 struct nes_cm_core *cm_core;
31976
31977- atomic_inc(&cm_rejects);
31978+ atomic_inc_unchecked(&cm_rejects);
31979 cm_node = (struct nes_cm_node *) cm_id->provider_data;
31980 loopback = cm_node->loopbackpartner;
31981 cm_core = cm_node->cm_core;
31982@@ -2982,7 +2982,7 @@ int nes_connect(struct iw_cm_id *cm_id,
31983 ntohl(cm_id->local_addr.sin_addr.s_addr),
31984 ntohs(cm_id->local_addr.sin_port));
31985
31986- atomic_inc(&cm_connects);
31987+ atomic_inc_unchecked(&cm_connects);
31988 nesqp->active_conn = 1;
31989
31990 /* cache the cm_id in the qp */
31991@@ -3195,7 +3195,7 @@ static void cm_event_connected(struct ne
31992 if (nesqp->destroyed) {
31993 return;
31994 }
31995- atomic_inc(&cm_connecteds);
31996+ atomic_inc_unchecked(&cm_connecteds);
31997 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
31998 " local port 0x%04X. jiffies = %lu.\n",
31999 nesqp->hwqp.qp_id,
32000@@ -3403,7 +3403,7 @@ static void cm_event_reset(struct nes_cm
32001
32002 ret = cm_id->event_handler(cm_id, &cm_event);
32003 cm_id->add_ref(cm_id);
32004- atomic_inc(&cm_closes);
32005+ atomic_inc_unchecked(&cm_closes);
32006 cm_event.event = IW_CM_EVENT_CLOSE;
32007 cm_event.status = IW_CM_EVENT_STATUS_OK;
32008 cm_event.provider_data = cm_id->provider_data;
32009@@ -3439,7 +3439,7 @@ static void cm_event_mpa_req(struct nes_
32010 return;
32011 cm_id = cm_node->cm_id;
32012
32013- atomic_inc(&cm_connect_reqs);
32014+ atomic_inc_unchecked(&cm_connect_reqs);
32015 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
32016 cm_node, cm_id, jiffies);
32017
32018@@ -3477,7 +3477,7 @@ static void cm_event_mpa_reject(struct n
32019 return;
32020 cm_id = cm_node->cm_id;
32021
32022- atomic_inc(&cm_connect_reqs);
32023+ atomic_inc_unchecked(&cm_connect_reqs);
32024 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
32025 cm_node, cm_id, jiffies);
32026
32027diff -urNp linux-2.6.32.48/drivers/infiniband/hw/nes/nes.h linux-2.6.32.48/drivers/infiniband/hw/nes/nes.h
32028--- linux-2.6.32.48/drivers/infiniband/hw/nes/nes.h 2011-11-08 19:02:43.000000000 -0500
32029+++ linux-2.6.32.48/drivers/infiniband/hw/nes/nes.h 2011-11-15 19:59:43.000000000 -0500
32030@@ -174,17 +174,17 @@ extern unsigned int nes_debug_level;
32031 extern unsigned int wqm_quanta;
32032 extern struct list_head nes_adapter_list;
32033
32034-extern atomic_t cm_connects;
32035-extern atomic_t cm_accepts;
32036-extern atomic_t cm_disconnects;
32037-extern atomic_t cm_closes;
32038-extern atomic_t cm_connecteds;
32039-extern atomic_t cm_connect_reqs;
32040-extern atomic_t cm_rejects;
32041-extern atomic_t mod_qp_timouts;
32042-extern atomic_t qps_created;
32043-extern atomic_t qps_destroyed;
32044-extern atomic_t sw_qps_destroyed;
32045+extern atomic_unchecked_t cm_connects;
32046+extern atomic_unchecked_t cm_accepts;
32047+extern atomic_unchecked_t cm_disconnects;
32048+extern atomic_unchecked_t cm_closes;
32049+extern atomic_unchecked_t cm_connecteds;
32050+extern atomic_unchecked_t cm_connect_reqs;
32051+extern atomic_unchecked_t cm_rejects;
32052+extern atomic_unchecked_t mod_qp_timouts;
32053+extern atomic_unchecked_t qps_created;
32054+extern atomic_unchecked_t qps_destroyed;
32055+extern atomic_unchecked_t sw_qps_destroyed;
32056 extern u32 mh_detected;
32057 extern u32 mh_pauses_sent;
32058 extern u32 cm_packets_sent;
32059@@ -196,11 +196,11 @@ extern u32 cm_packets_retrans;
32060 extern u32 cm_listens_created;
32061 extern u32 cm_listens_destroyed;
32062 extern u32 cm_backlog_drops;
32063-extern atomic_t cm_loopbacks;
32064-extern atomic_t cm_nodes_created;
32065-extern atomic_t cm_nodes_destroyed;
32066-extern atomic_t cm_accel_dropped_pkts;
32067-extern atomic_t cm_resets_recvd;
32068+extern atomic_unchecked_t cm_loopbacks;
32069+extern atomic_unchecked_t cm_nodes_created;
32070+extern atomic_unchecked_t cm_nodes_destroyed;
32071+extern atomic_unchecked_t cm_accel_dropped_pkts;
32072+extern atomic_unchecked_t cm_resets_recvd;
32073
32074 extern u32 int_mod_timer_init;
32075 extern u32 int_mod_cq_depth_256;
32076diff -urNp linux-2.6.32.48/drivers/infiniband/hw/nes/nes_nic.c linux-2.6.32.48/drivers/infiniband/hw/nes/nes_nic.c
32077--- linux-2.6.32.48/drivers/infiniband/hw/nes/nes_nic.c 2011-11-08 19:02:43.000000000 -0500
32078+++ linux-2.6.32.48/drivers/infiniband/hw/nes/nes_nic.c 2011-11-15 19:59:43.000000000 -0500
32079@@ -1210,17 +1210,17 @@ static void nes_netdev_get_ethtool_stats
32080 target_stat_values[++index] = mh_detected;
32081 target_stat_values[++index] = mh_pauses_sent;
32082 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
32083- target_stat_values[++index] = atomic_read(&cm_connects);
32084- target_stat_values[++index] = atomic_read(&cm_accepts);
32085- target_stat_values[++index] = atomic_read(&cm_disconnects);
32086- target_stat_values[++index] = atomic_read(&cm_connecteds);
32087- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
32088- target_stat_values[++index] = atomic_read(&cm_rejects);
32089- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
32090- target_stat_values[++index] = atomic_read(&qps_created);
32091- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
32092- target_stat_values[++index] = atomic_read(&qps_destroyed);
32093- target_stat_values[++index] = atomic_read(&cm_closes);
32094+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
32095+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
32096+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
32097+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
32098+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
32099+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
32100+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
32101+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
32102+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
32103+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
32104+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
32105 target_stat_values[++index] = cm_packets_sent;
32106 target_stat_values[++index] = cm_packets_bounced;
32107 target_stat_values[++index] = cm_packets_created;
32108@@ -1230,11 +1230,11 @@ static void nes_netdev_get_ethtool_stats
32109 target_stat_values[++index] = cm_listens_created;
32110 target_stat_values[++index] = cm_listens_destroyed;
32111 target_stat_values[++index] = cm_backlog_drops;
32112- target_stat_values[++index] = atomic_read(&cm_loopbacks);
32113- target_stat_values[++index] = atomic_read(&cm_nodes_created);
32114- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
32115- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
32116- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
32117+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
32118+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
32119+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
32120+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
32121+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
32122 target_stat_values[++index] = int_mod_timer_init;
32123 target_stat_values[++index] = int_mod_cq_depth_1;
32124 target_stat_values[++index] = int_mod_cq_depth_4;
32125diff -urNp linux-2.6.32.48/drivers/infiniband/hw/nes/nes_verbs.c linux-2.6.32.48/drivers/infiniband/hw/nes/nes_verbs.c
32126--- linux-2.6.32.48/drivers/infiniband/hw/nes/nes_verbs.c 2011-11-08 19:02:43.000000000 -0500
32127+++ linux-2.6.32.48/drivers/infiniband/hw/nes/nes_verbs.c 2011-11-15 19:59:43.000000000 -0500
32128@@ -45,9 +45,9 @@
32129
32130 #include <rdma/ib_umem.h>
32131
32132-atomic_t mod_qp_timouts;
32133-atomic_t qps_created;
32134-atomic_t sw_qps_destroyed;
32135+atomic_unchecked_t mod_qp_timouts;
32136+atomic_unchecked_t qps_created;
32137+atomic_unchecked_t sw_qps_destroyed;
32138
32139 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
32140
32141@@ -1240,7 +1240,7 @@ static struct ib_qp *nes_create_qp(struc
32142 if (init_attr->create_flags)
32143 return ERR_PTR(-EINVAL);
32144
32145- atomic_inc(&qps_created);
32146+ atomic_inc_unchecked(&qps_created);
32147 switch (init_attr->qp_type) {
32148 case IB_QPT_RC:
32149 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
32150@@ -1568,7 +1568,7 @@ static int nes_destroy_qp(struct ib_qp *
32151 struct iw_cm_event cm_event;
32152 int ret;
32153
32154- atomic_inc(&sw_qps_destroyed);
32155+ atomic_inc_unchecked(&sw_qps_destroyed);
32156 nesqp->destroyed = 1;
32157
32158 /* Blow away the connection if it exists. */
32159diff -urNp linux-2.6.32.48/drivers/input/gameport/gameport.c linux-2.6.32.48/drivers/input/gameport/gameport.c
32160--- linux-2.6.32.48/drivers/input/gameport/gameport.c 2011-11-08 19:02:43.000000000 -0500
32161+++ linux-2.6.32.48/drivers/input/gameport/gameport.c 2011-11-15 19:59:43.000000000 -0500
32162@@ -515,13 +515,13 @@ EXPORT_SYMBOL(gameport_set_phys);
32163 */
32164 static void gameport_init_port(struct gameport *gameport)
32165 {
32166- static atomic_t gameport_no = ATOMIC_INIT(0);
32167+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
32168
32169 __module_get(THIS_MODULE);
32170
32171 mutex_init(&gameport->drv_mutex);
32172 device_initialize(&gameport->dev);
32173- dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return(&gameport_no) - 1);
32174+ dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
32175 gameport->dev.bus = &gameport_bus;
32176 gameport->dev.release = gameport_release_port;
32177 if (gameport->parent)
32178diff -urNp linux-2.6.32.48/drivers/input/input.c linux-2.6.32.48/drivers/input/input.c
32179--- linux-2.6.32.48/drivers/input/input.c 2011-11-08 19:02:43.000000000 -0500
32180+++ linux-2.6.32.48/drivers/input/input.c 2011-11-15 19:59:43.000000000 -0500
32181@@ -1558,7 +1558,7 @@ EXPORT_SYMBOL(input_set_capability);
32182 */
32183 int input_register_device(struct input_dev *dev)
32184 {
32185- static atomic_t input_no = ATOMIC_INIT(0);
32186+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
32187 struct input_handler *handler;
32188 const char *path;
32189 int error;
32190@@ -1585,7 +1585,7 @@ int input_register_device(struct input_d
32191 dev->setkeycode = input_default_setkeycode;
32192
32193 dev_set_name(&dev->dev, "input%ld",
32194- (unsigned long) atomic_inc_return(&input_no) - 1);
32195+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
32196
32197 error = device_add(&dev->dev);
32198 if (error)
32199diff -urNp linux-2.6.32.48/drivers/input/joystick/sidewinder.c linux-2.6.32.48/drivers/input/joystick/sidewinder.c
32200--- linux-2.6.32.48/drivers/input/joystick/sidewinder.c 2011-11-08 19:02:43.000000000 -0500
32201+++ linux-2.6.32.48/drivers/input/joystick/sidewinder.c 2011-11-15 19:59:43.000000000 -0500
32202@@ -30,6 +30,7 @@
32203 #include <linux/kernel.h>
32204 #include <linux/module.h>
32205 #include <linux/slab.h>
32206+#include <linux/sched.h>
32207 #include <linux/init.h>
32208 #include <linux/input.h>
32209 #include <linux/gameport.h>
32210@@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
32211 unsigned char buf[SW_LENGTH];
32212 int i;
32213
32214+ pax_track_stack();
32215+
32216 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
32217
32218 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
32219diff -urNp linux-2.6.32.48/drivers/input/joystick/xpad.c linux-2.6.32.48/drivers/input/joystick/xpad.c
32220--- linux-2.6.32.48/drivers/input/joystick/xpad.c 2011-11-08 19:02:43.000000000 -0500
32221+++ linux-2.6.32.48/drivers/input/joystick/xpad.c 2011-11-15 19:59:43.000000000 -0500
32222@@ -621,7 +621,7 @@ static void xpad_led_set(struct led_clas
32223
32224 static int xpad_led_probe(struct usb_xpad *xpad)
32225 {
32226- static atomic_t led_seq = ATOMIC_INIT(0);
32227+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
32228 long led_no;
32229 struct xpad_led *led;
32230 struct led_classdev *led_cdev;
32231@@ -634,7 +634,7 @@ static int xpad_led_probe(struct usb_xpa
32232 if (!led)
32233 return -ENOMEM;
32234
32235- led_no = (long)atomic_inc_return(&led_seq) - 1;
32236+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
32237
32238 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
32239 led->xpad = xpad;
32240diff -urNp linux-2.6.32.48/drivers/input/serio/serio.c linux-2.6.32.48/drivers/input/serio/serio.c
32241--- linux-2.6.32.48/drivers/input/serio/serio.c 2011-11-08 19:02:43.000000000 -0500
32242+++ linux-2.6.32.48/drivers/input/serio/serio.c 2011-11-15 19:59:43.000000000 -0500
32243@@ -527,7 +527,7 @@ static void serio_release_port(struct de
32244 */
32245 static void serio_init_port(struct serio *serio)
32246 {
32247- static atomic_t serio_no = ATOMIC_INIT(0);
32248+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
32249
32250 __module_get(THIS_MODULE);
32251
32252@@ -536,7 +536,7 @@ static void serio_init_port(struct serio
32253 mutex_init(&serio->drv_mutex);
32254 device_initialize(&serio->dev);
32255 dev_set_name(&serio->dev, "serio%ld",
32256- (long)atomic_inc_return(&serio_no) - 1);
32257+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
32258 serio->dev.bus = &serio_bus;
32259 serio->dev.release = serio_release_port;
32260 if (serio->parent) {
32261diff -urNp linux-2.6.32.48/drivers/isdn/gigaset/common.c linux-2.6.32.48/drivers/isdn/gigaset/common.c
32262--- linux-2.6.32.48/drivers/isdn/gigaset/common.c 2011-11-08 19:02:43.000000000 -0500
32263+++ linux-2.6.32.48/drivers/isdn/gigaset/common.c 2011-11-15 19:59:43.000000000 -0500
32264@@ -712,7 +712,7 @@ struct cardstate *gigaset_initcs(struct
32265 cs->commands_pending = 0;
32266 cs->cur_at_seq = 0;
32267 cs->gotfwver = -1;
32268- cs->open_count = 0;
32269+ local_set(&cs->open_count, 0);
32270 cs->dev = NULL;
32271 cs->tty = NULL;
32272 cs->tty_dev = NULL;
32273diff -urNp linux-2.6.32.48/drivers/isdn/gigaset/gigaset.h linux-2.6.32.48/drivers/isdn/gigaset/gigaset.h
32274--- linux-2.6.32.48/drivers/isdn/gigaset/gigaset.h 2011-11-08 19:02:43.000000000 -0500
32275+++ linux-2.6.32.48/drivers/isdn/gigaset/gigaset.h 2011-11-15 19:59:43.000000000 -0500
32276@@ -34,6 +34,7 @@
32277 #include <linux/tty_driver.h>
32278 #include <linux/list.h>
32279 #include <asm/atomic.h>
32280+#include <asm/local.h>
32281
32282 #define GIG_VERSION {0,5,0,0}
32283 #define GIG_COMPAT {0,4,0,0}
32284@@ -446,7 +447,7 @@ struct cardstate {
32285 spinlock_t cmdlock;
32286 unsigned curlen, cmdbytes;
32287
32288- unsigned open_count;
32289+ local_t open_count;
32290 struct tty_struct *tty;
32291 struct tasklet_struct if_wake_tasklet;
32292 unsigned control_state;
32293diff -urNp linux-2.6.32.48/drivers/isdn/gigaset/interface.c linux-2.6.32.48/drivers/isdn/gigaset/interface.c
32294--- linux-2.6.32.48/drivers/isdn/gigaset/interface.c 2011-11-08 19:02:43.000000000 -0500
32295+++ linux-2.6.32.48/drivers/isdn/gigaset/interface.c 2011-11-15 19:59:43.000000000 -0500
32296@@ -165,9 +165,7 @@ static int if_open(struct tty_struct *tt
32297 return -ERESTARTSYS; // FIXME -EINTR?
32298 tty->driver_data = cs;
32299
32300- ++cs->open_count;
32301-
32302- if (cs->open_count == 1) {
32303+ if (local_inc_return(&cs->open_count) == 1) {
32304 spin_lock_irqsave(&cs->lock, flags);
32305 cs->tty = tty;
32306 spin_unlock_irqrestore(&cs->lock, flags);
32307@@ -195,10 +193,10 @@ static void if_close(struct tty_struct *
32308
32309 if (!cs->connected)
32310 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
32311- else if (!cs->open_count)
32312+ else if (!local_read(&cs->open_count))
32313 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32314 else {
32315- if (!--cs->open_count) {
32316+ if (!local_dec_return(&cs->open_count)) {
32317 spin_lock_irqsave(&cs->lock, flags);
32318 cs->tty = NULL;
32319 spin_unlock_irqrestore(&cs->lock, flags);
32320@@ -233,7 +231,7 @@ static int if_ioctl(struct tty_struct *t
32321 if (!cs->connected) {
32322 gig_dbg(DEBUG_IF, "not connected");
32323 retval = -ENODEV;
32324- } else if (!cs->open_count)
32325+ } else if (!local_read(&cs->open_count))
32326 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32327 else {
32328 retval = 0;
32329@@ -361,7 +359,7 @@ static int if_write(struct tty_struct *t
32330 if (!cs->connected) {
32331 gig_dbg(DEBUG_IF, "not connected");
32332 retval = -ENODEV;
32333- } else if (!cs->open_count)
32334+ } else if (!local_read(&cs->open_count))
32335 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32336 else if (cs->mstate != MS_LOCKED) {
32337 dev_warn(cs->dev, "can't write to unlocked device\n");
32338@@ -395,7 +393,7 @@ static int if_write_room(struct tty_stru
32339 if (!cs->connected) {
32340 gig_dbg(DEBUG_IF, "not connected");
32341 retval = -ENODEV;
32342- } else if (!cs->open_count)
32343+ } else if (!local_read(&cs->open_count))
32344 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32345 else if (cs->mstate != MS_LOCKED) {
32346 dev_warn(cs->dev, "can't write to unlocked device\n");
32347@@ -425,7 +423,7 @@ static int if_chars_in_buffer(struct tty
32348
32349 if (!cs->connected)
32350 gig_dbg(DEBUG_IF, "not connected");
32351- else if (!cs->open_count)
32352+ else if (!local_read(&cs->open_count))
32353 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32354 else if (cs->mstate != MS_LOCKED)
32355 dev_warn(cs->dev, "can't write to unlocked device\n");
32356@@ -453,7 +451,7 @@ static void if_throttle(struct tty_struc
32357
32358 if (!cs->connected)
32359 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
32360- else if (!cs->open_count)
32361+ else if (!local_read(&cs->open_count))
32362 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32363 else {
32364 //FIXME
32365@@ -478,7 +476,7 @@ static void if_unthrottle(struct tty_str
32366
32367 if (!cs->connected)
32368 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
32369- else if (!cs->open_count)
32370+ else if (!local_read(&cs->open_count))
32371 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32372 else {
32373 //FIXME
32374@@ -510,7 +508,7 @@ static void if_set_termios(struct tty_st
32375 goto out;
32376 }
32377
32378- if (!cs->open_count) {
32379+ if (!local_read(&cs->open_count)) {
32380 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32381 goto out;
32382 }
32383diff -urNp linux-2.6.32.48/drivers/isdn/hardware/avm/b1.c linux-2.6.32.48/drivers/isdn/hardware/avm/b1.c
32384--- linux-2.6.32.48/drivers/isdn/hardware/avm/b1.c 2011-11-08 19:02:43.000000000 -0500
32385+++ linux-2.6.32.48/drivers/isdn/hardware/avm/b1.c 2011-11-15 19:59:43.000000000 -0500
32386@@ -173,7 +173,7 @@ int b1_load_t4file(avmcard *card, capilo
32387 }
32388 if (left) {
32389 if (t4file->user) {
32390- if (copy_from_user(buf, dp, left))
32391+ if (left > sizeof buf || copy_from_user(buf, dp, left))
32392 return -EFAULT;
32393 } else {
32394 memcpy(buf, dp, left);
32395@@ -221,7 +221,7 @@ int b1_load_config(avmcard *card, capilo
32396 }
32397 if (left) {
32398 if (config->user) {
32399- if (copy_from_user(buf, dp, left))
32400+ if (left > sizeof buf || copy_from_user(buf, dp, left))
32401 return -EFAULT;
32402 } else {
32403 memcpy(buf, dp, left);
32404diff -urNp linux-2.6.32.48/drivers/isdn/hardware/eicon/capidtmf.c linux-2.6.32.48/drivers/isdn/hardware/eicon/capidtmf.c
32405--- linux-2.6.32.48/drivers/isdn/hardware/eicon/capidtmf.c 2011-11-08 19:02:43.000000000 -0500
32406+++ linux-2.6.32.48/drivers/isdn/hardware/eicon/capidtmf.c 2011-11-15 19:59:43.000000000 -0500
32407@@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
32408 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
32409 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
32410
32411+ pax_track_stack();
32412
32413 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
32414 {
32415diff -urNp linux-2.6.32.48/drivers/isdn/hardware/eicon/capifunc.c linux-2.6.32.48/drivers/isdn/hardware/eicon/capifunc.c
32416--- linux-2.6.32.48/drivers/isdn/hardware/eicon/capifunc.c 2011-11-08 19:02:43.000000000 -0500
32417+++ linux-2.6.32.48/drivers/isdn/hardware/eicon/capifunc.c 2011-11-15 19:59:43.000000000 -0500
32418@@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
32419 IDI_SYNC_REQ req;
32420 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
32421
32422+ pax_track_stack();
32423+
32424 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
32425
32426 for (x = 0; x < MAX_DESCRIPTORS; x++) {
32427diff -urNp linux-2.6.32.48/drivers/isdn/hardware/eicon/diddfunc.c linux-2.6.32.48/drivers/isdn/hardware/eicon/diddfunc.c
32428--- linux-2.6.32.48/drivers/isdn/hardware/eicon/diddfunc.c 2011-11-08 19:02:43.000000000 -0500
32429+++ linux-2.6.32.48/drivers/isdn/hardware/eicon/diddfunc.c 2011-11-15 19:59:43.000000000 -0500
32430@@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
32431 IDI_SYNC_REQ req;
32432 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
32433
32434+ pax_track_stack();
32435+
32436 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
32437
32438 for (x = 0; x < MAX_DESCRIPTORS; x++) {
32439diff -urNp linux-2.6.32.48/drivers/isdn/hardware/eicon/divasfunc.c linux-2.6.32.48/drivers/isdn/hardware/eicon/divasfunc.c
32440--- linux-2.6.32.48/drivers/isdn/hardware/eicon/divasfunc.c 2011-11-08 19:02:43.000000000 -0500
32441+++ linux-2.6.32.48/drivers/isdn/hardware/eicon/divasfunc.c 2011-11-15 19:59:43.000000000 -0500
32442@@ -161,6 +161,8 @@ static int DIVA_INIT_FUNCTION connect_di
32443 IDI_SYNC_REQ req;
32444 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
32445
32446+ pax_track_stack();
32447+
32448 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
32449
32450 for (x = 0; x < MAX_DESCRIPTORS; x++) {
32451diff -urNp linux-2.6.32.48/drivers/isdn/hardware/eicon/divasync.h linux-2.6.32.48/drivers/isdn/hardware/eicon/divasync.h
32452--- linux-2.6.32.48/drivers/isdn/hardware/eicon/divasync.h 2011-11-08 19:02:43.000000000 -0500
32453+++ linux-2.6.32.48/drivers/isdn/hardware/eicon/divasync.h 2011-11-15 19:59:43.000000000 -0500
32454@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
32455 } diva_didd_add_adapter_t;
32456 typedef struct _diva_didd_remove_adapter {
32457 IDI_CALL p_request;
32458-} diva_didd_remove_adapter_t;
32459+} __no_const diva_didd_remove_adapter_t;
32460 typedef struct _diva_didd_read_adapter_array {
32461 void * buffer;
32462 dword length;
32463diff -urNp linux-2.6.32.48/drivers/isdn/hardware/eicon/idifunc.c linux-2.6.32.48/drivers/isdn/hardware/eicon/idifunc.c
32464--- linux-2.6.32.48/drivers/isdn/hardware/eicon/idifunc.c 2011-11-08 19:02:43.000000000 -0500
32465+++ linux-2.6.32.48/drivers/isdn/hardware/eicon/idifunc.c 2011-11-15 19:59:43.000000000 -0500
32466@@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
32467 IDI_SYNC_REQ req;
32468 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
32469
32470+ pax_track_stack();
32471+
32472 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
32473
32474 for (x = 0; x < MAX_DESCRIPTORS; x++) {
32475diff -urNp linux-2.6.32.48/drivers/isdn/hardware/eicon/message.c linux-2.6.32.48/drivers/isdn/hardware/eicon/message.c
32476--- linux-2.6.32.48/drivers/isdn/hardware/eicon/message.c 2011-11-08 19:02:43.000000000 -0500
32477+++ linux-2.6.32.48/drivers/isdn/hardware/eicon/message.c 2011-11-15 19:59:43.000000000 -0500
32478@@ -4889,6 +4889,8 @@ static void sig_ind(PLCI *plci)
32479 dword d;
32480 word w;
32481
32482+ pax_track_stack();
32483+
32484 a = plci->adapter;
32485 Id = ((word)plci->Id<<8)|a->Id;
32486 PUT_WORD(&SS_Ind[4],0x0000);
32487@@ -7484,6 +7486,8 @@ static word add_b1(PLCI *plci, API_PARSE
32488 word j, n, w;
32489 dword d;
32490
32491+ pax_track_stack();
32492+
32493
32494 for(i=0;i<8;i++) bp_parms[i].length = 0;
32495 for(i=0;i<2;i++) global_config[i].length = 0;
32496@@ -7958,6 +7962,8 @@ static word add_b23(PLCI *plci, API_PARS
32497 const byte llc3[] = {4,3,2,2,6,6,0};
32498 const byte header[] = {0,2,3,3,0,0,0};
32499
32500+ pax_track_stack();
32501+
32502 for(i=0;i<8;i++) bp_parms[i].length = 0;
32503 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
32504 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
32505@@ -14761,6 +14767,8 @@ static void group_optimization(DIVA_CAPI
32506 word appl_number_group_type[MAX_APPL];
32507 PLCI *auxplci;
32508
32509+ pax_track_stack();
32510+
32511 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
32512
32513 if(!a->group_optimization_enabled)
32514diff -urNp linux-2.6.32.48/drivers/isdn/hardware/eicon/mntfunc.c linux-2.6.32.48/drivers/isdn/hardware/eicon/mntfunc.c
32515--- linux-2.6.32.48/drivers/isdn/hardware/eicon/mntfunc.c 2011-11-08 19:02:43.000000000 -0500
32516+++ linux-2.6.32.48/drivers/isdn/hardware/eicon/mntfunc.c 2011-11-15 19:59:43.000000000 -0500
32517@@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
32518 IDI_SYNC_REQ req;
32519 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
32520
32521+ pax_track_stack();
32522+
32523 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
32524
32525 for (x = 0; x < MAX_DESCRIPTORS; x++) {
32526diff -urNp linux-2.6.32.48/drivers/isdn/hardware/eicon/xdi_adapter.h linux-2.6.32.48/drivers/isdn/hardware/eicon/xdi_adapter.h
32527--- linux-2.6.32.48/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-11-08 19:02:43.000000000 -0500
32528+++ linux-2.6.32.48/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-11-15 19:59:43.000000000 -0500
32529@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
32530 typedef struct _diva_os_idi_adapter_interface {
32531 diva_init_card_proc_t cleanup_adapter_proc;
32532 diva_cmd_card_proc_t cmd_proc;
32533-} diva_os_idi_adapter_interface_t;
32534+} __no_const diva_os_idi_adapter_interface_t;
32535
32536 typedef struct _diva_os_xdi_adapter {
32537 struct list_head link;
32538diff -urNp linux-2.6.32.48/drivers/isdn/i4l/isdn_common.c linux-2.6.32.48/drivers/isdn/i4l/isdn_common.c
32539--- linux-2.6.32.48/drivers/isdn/i4l/isdn_common.c 2011-11-08 19:02:43.000000000 -0500
32540+++ linux-2.6.32.48/drivers/isdn/i4l/isdn_common.c 2011-11-15 19:59:43.000000000 -0500
32541@@ -1290,6 +1290,8 @@ isdn_ioctl(struct inode *inode, struct f
32542 } iocpar;
32543 void __user *argp = (void __user *)arg;
32544
32545+ pax_track_stack();
32546+
32547 #define name iocpar.name
32548 #define bname iocpar.bname
32549 #define iocts iocpar.iocts
32550diff -urNp linux-2.6.32.48/drivers/isdn/icn/icn.c linux-2.6.32.48/drivers/isdn/icn/icn.c
32551--- linux-2.6.32.48/drivers/isdn/icn/icn.c 2011-11-08 19:02:43.000000000 -0500
32552+++ linux-2.6.32.48/drivers/isdn/icn/icn.c 2011-11-15 19:59:43.000000000 -0500
32553@@ -1044,7 +1044,7 @@ icn_writecmd(const u_char * buf, int len
32554 if (count > len)
32555 count = len;
32556 if (user) {
32557- if (copy_from_user(msg, buf, count))
32558+ if (count > sizeof msg || copy_from_user(msg, buf, count))
32559 return -EFAULT;
32560 } else
32561 memcpy(msg, buf, count);
32562diff -urNp linux-2.6.32.48/drivers/isdn/mISDN/socket.c linux-2.6.32.48/drivers/isdn/mISDN/socket.c
32563--- linux-2.6.32.48/drivers/isdn/mISDN/socket.c 2011-11-08 19:02:43.000000000 -0500
32564+++ linux-2.6.32.48/drivers/isdn/mISDN/socket.c 2011-11-15 19:59:43.000000000 -0500
32565@@ -391,6 +391,7 @@ data_sock_ioctl(struct socket *sock, uns
32566 if (dev) {
32567 struct mISDN_devinfo di;
32568
32569+ memset(&di, 0, sizeof(di));
32570 di.id = dev->id;
32571 di.Dprotocols = dev->Dprotocols;
32572 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
32573@@ -671,6 +672,7 @@ base_sock_ioctl(struct socket *sock, uns
32574 if (dev) {
32575 struct mISDN_devinfo di;
32576
32577+ memset(&di, 0, sizeof(di));
32578 di.id = dev->id;
32579 di.Dprotocols = dev->Dprotocols;
32580 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
32581diff -urNp linux-2.6.32.48/drivers/isdn/sc/interrupt.c linux-2.6.32.48/drivers/isdn/sc/interrupt.c
32582--- linux-2.6.32.48/drivers/isdn/sc/interrupt.c 2011-11-08 19:02:43.000000000 -0500
32583+++ linux-2.6.32.48/drivers/isdn/sc/interrupt.c 2011-11-15 19:59:43.000000000 -0500
32584@@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy,
32585 }
32586 else if(callid>=0x0000 && callid<=0x7FFF)
32587 {
32588+ int len;
32589+
32590 pr_debug("%s: Got Incoming Call\n",
32591 sc_adapter[card]->devicename);
32592- strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4]));
32593- strcpy(setup.eazmsn,
32594- sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn);
32595+ len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]),
32596+ sizeof(setup.phone));
32597+ if (len >= sizeof(setup.phone))
32598+ continue;
32599+ len = strlcpy(setup.eazmsn,
32600+ sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
32601+ sizeof(setup.eazmsn));
32602+ if (len >= sizeof(setup.eazmsn))
32603+ continue;
32604 setup.si1 = 7;
32605 setup.si2 = 0;
32606 setup.plan = 0;
32607@@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy,
32608 * Handle a GetMyNumber Rsp
32609 */
32610 if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){
32611- strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array);
32612+ strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
32613+ rcvmsg.msg_data.byte_array,
32614+ sizeof(rcvmsg.msg_data.byte_array));
32615 continue;
32616 }
32617
32618diff -urNp linux-2.6.32.48/drivers/lguest/core.c linux-2.6.32.48/drivers/lguest/core.c
32619--- linux-2.6.32.48/drivers/lguest/core.c 2011-11-08 19:02:43.000000000 -0500
32620+++ linux-2.6.32.48/drivers/lguest/core.c 2011-11-15 19:59:43.000000000 -0500
32621@@ -91,9 +91,17 @@ static __init int map_switcher(void)
32622 * it's worked so far. The end address needs +1 because __get_vm_area
32623 * allocates an extra guard page, so we need space for that.
32624 */
32625+
32626+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
32627+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
32628+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
32629+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
32630+#else
32631 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
32632 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
32633 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
32634+#endif
32635+
32636 if (!switcher_vma) {
32637 err = -ENOMEM;
32638 printk("lguest: could not map switcher pages high\n");
32639@@ -118,7 +126,7 @@ static __init int map_switcher(void)
32640 * Now the Switcher is mapped at the right address, we can't fail!
32641 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
32642 */
32643- memcpy(switcher_vma->addr, start_switcher_text,
32644+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
32645 end_switcher_text - start_switcher_text);
32646
32647 printk(KERN_INFO "lguest: mapped switcher at %p\n",
32648diff -urNp linux-2.6.32.48/drivers/lguest/x86/core.c linux-2.6.32.48/drivers/lguest/x86/core.c
32649--- linux-2.6.32.48/drivers/lguest/x86/core.c 2011-11-08 19:02:43.000000000 -0500
32650+++ linux-2.6.32.48/drivers/lguest/x86/core.c 2011-11-15 19:59:43.000000000 -0500
32651@@ -59,7 +59,7 @@ static struct {
32652 /* Offset from where switcher.S was compiled to where we've copied it */
32653 static unsigned long switcher_offset(void)
32654 {
32655- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
32656+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
32657 }
32658
32659 /* This cpu's struct lguest_pages. */
32660@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
32661 * These copies are pretty cheap, so we do them unconditionally: */
32662 /* Save the current Host top-level page directory.
32663 */
32664+
32665+#ifdef CONFIG_PAX_PER_CPU_PGD
32666+ pages->state.host_cr3 = read_cr3();
32667+#else
32668 pages->state.host_cr3 = __pa(current->mm->pgd);
32669+#endif
32670+
32671 /*
32672 * Set up the Guest's page tables to see this CPU's pages (and no
32673 * other CPU's pages).
32674@@ -535,7 +541,7 @@ void __init lguest_arch_host_init(void)
32675 * compiled-in switcher code and the high-mapped copy we just made.
32676 */
32677 for (i = 0; i < IDT_ENTRIES; i++)
32678- default_idt_entries[i] += switcher_offset();
32679+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
32680
32681 /*
32682 * Set up the Switcher's per-cpu areas.
32683@@ -618,7 +624,7 @@ void __init lguest_arch_host_init(void)
32684 * it will be undisturbed when we switch. To change %cs and jump we
32685 * need this structure to feed to Intel's "lcall" instruction.
32686 */
32687- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
32688+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
32689 lguest_entry.segment = LGUEST_CS;
32690
32691 /*
32692diff -urNp linux-2.6.32.48/drivers/lguest/x86/switcher_32.S linux-2.6.32.48/drivers/lguest/x86/switcher_32.S
32693--- linux-2.6.32.48/drivers/lguest/x86/switcher_32.S 2011-11-08 19:02:43.000000000 -0500
32694+++ linux-2.6.32.48/drivers/lguest/x86/switcher_32.S 2011-11-15 19:59:43.000000000 -0500
32695@@ -87,6 +87,7 @@
32696 #include <asm/page.h>
32697 #include <asm/segment.h>
32698 #include <asm/lguest.h>
32699+#include <asm/processor-flags.h>
32700
32701 // We mark the start of the code to copy
32702 // It's placed in .text tho it's never run here
32703@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
32704 // Changes type when we load it: damn Intel!
32705 // For after we switch over our page tables
32706 // That entry will be read-only: we'd crash.
32707+
32708+#ifdef CONFIG_PAX_KERNEXEC
32709+ mov %cr0, %edx
32710+ xor $X86_CR0_WP, %edx
32711+ mov %edx, %cr0
32712+#endif
32713+
32714 movl $(GDT_ENTRY_TSS*8), %edx
32715 ltr %dx
32716
32717@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
32718 // Let's clear it again for our return.
32719 // The GDT descriptor of the Host
32720 // Points to the table after two "size" bytes
32721- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
32722+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
32723 // Clear "used" from type field (byte 5, bit 2)
32724- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
32725+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
32726+
32727+#ifdef CONFIG_PAX_KERNEXEC
32728+ mov %cr0, %eax
32729+ xor $X86_CR0_WP, %eax
32730+ mov %eax, %cr0
32731+#endif
32732
32733 // Once our page table's switched, the Guest is live!
32734 // The Host fades as we run this final step.
32735@@ -295,13 +309,12 @@ deliver_to_host:
32736 // I consulted gcc, and it gave
32737 // These instructions, which I gladly credit:
32738 leal (%edx,%ebx,8), %eax
32739- movzwl (%eax),%edx
32740- movl 4(%eax), %eax
32741- xorw %ax, %ax
32742- orl %eax, %edx
32743+ movl 4(%eax), %edx
32744+ movw (%eax), %dx
32745 // Now the address of the handler's in %edx
32746 // We call it now: its "iret" drops us home.
32747- jmp *%edx
32748+ ljmp $__KERNEL_CS, $1f
32749+1: jmp *%edx
32750
32751 // Every interrupt can come to us here
32752 // But we must truly tell each apart.
32753diff -urNp linux-2.6.32.48/drivers/macintosh/via-pmu-backlight.c linux-2.6.32.48/drivers/macintosh/via-pmu-backlight.c
32754--- linux-2.6.32.48/drivers/macintosh/via-pmu-backlight.c 2011-11-08 19:02:43.000000000 -0500
32755+++ linux-2.6.32.48/drivers/macintosh/via-pmu-backlight.c 2011-11-15 19:59:43.000000000 -0500
32756@@ -15,7 +15,7 @@
32757
32758 #define MAX_PMU_LEVEL 0xFF
32759
32760-static struct backlight_ops pmu_backlight_data;
32761+static const struct backlight_ops pmu_backlight_data;
32762 static DEFINE_SPINLOCK(pmu_backlight_lock);
32763 static int sleeping, uses_pmu_bl;
32764 static u8 bl_curve[FB_BACKLIGHT_LEVELS];
32765@@ -115,7 +115,7 @@ static int pmu_backlight_get_brightness(
32766 return bd->props.brightness;
32767 }
32768
32769-static struct backlight_ops pmu_backlight_data = {
32770+static const struct backlight_ops pmu_backlight_data = {
32771 .get_brightness = pmu_backlight_get_brightness,
32772 .update_status = pmu_backlight_update_status,
32773
32774diff -urNp linux-2.6.32.48/drivers/macintosh/via-pmu.c linux-2.6.32.48/drivers/macintosh/via-pmu.c
32775--- linux-2.6.32.48/drivers/macintosh/via-pmu.c 2011-11-08 19:02:43.000000000 -0500
32776+++ linux-2.6.32.48/drivers/macintosh/via-pmu.c 2011-11-15 19:59:43.000000000 -0500
32777@@ -2232,7 +2232,7 @@ static int pmu_sleep_valid(suspend_state
32778 && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0);
32779 }
32780
32781-static struct platform_suspend_ops pmu_pm_ops = {
32782+static const struct platform_suspend_ops pmu_pm_ops = {
32783 .enter = powerbook_sleep,
32784 .valid = pmu_sleep_valid,
32785 };
32786diff -urNp linux-2.6.32.48/drivers/md/dm.c linux-2.6.32.48/drivers/md/dm.c
32787--- linux-2.6.32.48/drivers/md/dm.c 2011-11-08 19:02:43.000000000 -0500
32788+++ linux-2.6.32.48/drivers/md/dm.c 2011-11-15 19:59:43.000000000 -0500
32789@@ -165,9 +165,9 @@ struct mapped_device {
32790 /*
32791 * Event handling.
32792 */
32793- atomic_t event_nr;
32794+ atomic_unchecked_t event_nr;
32795 wait_queue_head_t eventq;
32796- atomic_t uevent_seq;
32797+ atomic_unchecked_t uevent_seq;
32798 struct list_head uevent_list;
32799 spinlock_t uevent_lock; /* Protect access to uevent_list */
32800
32801@@ -1776,8 +1776,8 @@ static struct mapped_device *alloc_dev(i
32802 rwlock_init(&md->map_lock);
32803 atomic_set(&md->holders, 1);
32804 atomic_set(&md->open_count, 0);
32805- atomic_set(&md->event_nr, 0);
32806- atomic_set(&md->uevent_seq, 0);
32807+ atomic_set_unchecked(&md->event_nr, 0);
32808+ atomic_set_unchecked(&md->uevent_seq, 0);
32809 INIT_LIST_HEAD(&md->uevent_list);
32810 spin_lock_init(&md->uevent_lock);
32811
32812@@ -1927,7 +1927,7 @@ static void event_callback(void *context
32813
32814 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
32815
32816- atomic_inc(&md->event_nr);
32817+ atomic_inc_unchecked(&md->event_nr);
32818 wake_up(&md->eventq);
32819 }
32820
32821@@ -2562,18 +2562,18 @@ void dm_kobject_uevent(struct mapped_dev
32822
32823 uint32_t dm_next_uevent_seq(struct mapped_device *md)
32824 {
32825- return atomic_add_return(1, &md->uevent_seq);
32826+ return atomic_add_return_unchecked(1, &md->uevent_seq);
32827 }
32828
32829 uint32_t dm_get_event_nr(struct mapped_device *md)
32830 {
32831- return atomic_read(&md->event_nr);
32832+ return atomic_read_unchecked(&md->event_nr);
32833 }
32834
32835 int dm_wait_event(struct mapped_device *md, int event_nr)
32836 {
32837 return wait_event_interruptible(md->eventq,
32838- (event_nr != atomic_read(&md->event_nr)));
32839+ (event_nr != atomic_read_unchecked(&md->event_nr)));
32840 }
32841
32842 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
32843diff -urNp linux-2.6.32.48/drivers/md/dm-ioctl.c linux-2.6.32.48/drivers/md/dm-ioctl.c
32844--- linux-2.6.32.48/drivers/md/dm-ioctl.c 2011-11-08 19:02:43.000000000 -0500
32845+++ linux-2.6.32.48/drivers/md/dm-ioctl.c 2011-11-15 19:59:43.000000000 -0500
32846@@ -1437,7 +1437,7 @@ static int validate_params(uint cmd, str
32847 cmd == DM_LIST_VERSIONS_CMD)
32848 return 0;
32849
32850- if ((cmd == DM_DEV_CREATE_CMD)) {
32851+ if (cmd == DM_DEV_CREATE_CMD) {
32852 if (!*param->name) {
32853 DMWARN("name not supplied when creating device");
32854 return -EINVAL;
32855diff -urNp linux-2.6.32.48/drivers/md/dm-raid1.c linux-2.6.32.48/drivers/md/dm-raid1.c
32856--- linux-2.6.32.48/drivers/md/dm-raid1.c 2011-11-08 19:02:43.000000000 -0500
32857+++ linux-2.6.32.48/drivers/md/dm-raid1.c 2011-11-15 19:59:43.000000000 -0500
32858@@ -41,7 +41,7 @@ enum dm_raid1_error {
32859
32860 struct mirror {
32861 struct mirror_set *ms;
32862- atomic_t error_count;
32863+ atomic_unchecked_t error_count;
32864 unsigned long error_type;
32865 struct dm_dev *dev;
32866 sector_t offset;
32867@@ -203,7 +203,7 @@ static void fail_mirror(struct mirror *m
32868 * simple way to tell if a device has encountered
32869 * errors.
32870 */
32871- atomic_inc(&m->error_count);
32872+ atomic_inc_unchecked(&m->error_count);
32873
32874 if (test_and_set_bit(error_type, &m->error_type))
32875 return;
32876@@ -225,7 +225,7 @@ static void fail_mirror(struct mirror *m
32877 }
32878
32879 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
32880- if (!atomic_read(&new->error_count)) {
32881+ if (!atomic_read_unchecked(&new->error_count)) {
32882 set_default_mirror(new);
32883 break;
32884 }
32885@@ -363,7 +363,7 @@ static struct mirror *choose_mirror(stru
32886 struct mirror *m = get_default_mirror(ms);
32887
32888 do {
32889- if (likely(!atomic_read(&m->error_count)))
32890+ if (likely(!atomic_read_unchecked(&m->error_count)))
32891 return m;
32892
32893 if (m-- == ms->mirror)
32894@@ -377,7 +377,7 @@ static int default_ok(struct mirror *m)
32895 {
32896 struct mirror *default_mirror = get_default_mirror(m->ms);
32897
32898- return !atomic_read(&default_mirror->error_count);
32899+ return !atomic_read_unchecked(&default_mirror->error_count);
32900 }
32901
32902 static int mirror_available(struct mirror_set *ms, struct bio *bio)
32903@@ -484,7 +484,7 @@ static void do_reads(struct mirror_set *
32904 */
32905 if (likely(region_in_sync(ms, region, 1)))
32906 m = choose_mirror(ms, bio->bi_sector);
32907- else if (m && atomic_read(&m->error_count))
32908+ else if (m && atomic_read_unchecked(&m->error_count))
32909 m = NULL;
32910
32911 if (likely(m))
32912@@ -855,7 +855,7 @@ static int get_mirror(struct mirror_set
32913 }
32914
32915 ms->mirror[mirror].ms = ms;
32916- atomic_set(&(ms->mirror[mirror].error_count), 0);
32917+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
32918 ms->mirror[mirror].error_type = 0;
32919 ms->mirror[mirror].offset = offset;
32920
32921@@ -1241,7 +1241,7 @@ static void mirror_resume(struct dm_targ
32922 */
32923 static char device_status_char(struct mirror *m)
32924 {
32925- if (!atomic_read(&(m->error_count)))
32926+ if (!atomic_read_unchecked(&(m->error_count)))
32927 return 'A';
32928
32929 return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
32930diff -urNp linux-2.6.32.48/drivers/md/dm-stripe.c linux-2.6.32.48/drivers/md/dm-stripe.c
32931--- linux-2.6.32.48/drivers/md/dm-stripe.c 2011-11-08 19:02:43.000000000 -0500
32932+++ linux-2.6.32.48/drivers/md/dm-stripe.c 2011-11-15 19:59:43.000000000 -0500
32933@@ -20,7 +20,7 @@ struct stripe {
32934 struct dm_dev *dev;
32935 sector_t physical_start;
32936
32937- atomic_t error_count;
32938+ atomic_unchecked_t error_count;
32939 };
32940
32941 struct stripe_c {
32942@@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *
32943 kfree(sc);
32944 return r;
32945 }
32946- atomic_set(&(sc->stripe[i].error_count), 0);
32947+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
32948 }
32949
32950 ti->private = sc;
32951@@ -257,7 +257,7 @@ static int stripe_status(struct dm_targe
32952 DMEMIT("%d ", sc->stripes);
32953 for (i = 0; i < sc->stripes; i++) {
32954 DMEMIT("%s ", sc->stripe[i].dev->name);
32955- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
32956+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
32957 'D' : 'A';
32958 }
32959 buffer[i] = '\0';
32960@@ -304,8 +304,8 @@ static int stripe_end_io(struct dm_targe
32961 */
32962 for (i = 0; i < sc->stripes; i++)
32963 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
32964- atomic_inc(&(sc->stripe[i].error_count));
32965- if (atomic_read(&(sc->stripe[i].error_count)) <
32966+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
32967+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
32968 DM_IO_ERROR_THRESHOLD)
32969 queue_work(kstriped, &sc->kstriped_ws);
32970 }
32971diff -urNp linux-2.6.32.48/drivers/md/dm-sysfs.c linux-2.6.32.48/drivers/md/dm-sysfs.c
32972--- linux-2.6.32.48/drivers/md/dm-sysfs.c 2011-11-08 19:02:43.000000000 -0500
32973+++ linux-2.6.32.48/drivers/md/dm-sysfs.c 2011-11-15 19:59:43.000000000 -0500
32974@@ -75,7 +75,7 @@ static struct attribute *dm_attrs[] = {
32975 NULL,
32976 };
32977
32978-static struct sysfs_ops dm_sysfs_ops = {
32979+static const struct sysfs_ops dm_sysfs_ops = {
32980 .show = dm_attr_show,
32981 };
32982
32983diff -urNp linux-2.6.32.48/drivers/md/dm-table.c linux-2.6.32.48/drivers/md/dm-table.c
32984--- linux-2.6.32.48/drivers/md/dm-table.c 2011-11-08 19:02:43.000000000 -0500
32985+++ linux-2.6.32.48/drivers/md/dm-table.c 2011-11-15 19:59:43.000000000 -0500
32986@@ -376,7 +376,7 @@ static int device_area_is_invalid(struct
32987 if (!dev_size)
32988 return 0;
32989
32990- if ((start >= dev_size) || (start + len > dev_size)) {
32991+ if ((start >= dev_size) || (len > dev_size - start)) {
32992 DMWARN("%s: %s too small for target: "
32993 "start=%llu, len=%llu, dev_size=%llu",
32994 dm_device_name(ti->table->md), bdevname(bdev, b),
32995diff -urNp linux-2.6.32.48/drivers/md/md.c linux-2.6.32.48/drivers/md/md.c
32996--- linux-2.6.32.48/drivers/md/md.c 2011-11-08 19:02:43.000000000 -0500
32997+++ linux-2.6.32.48/drivers/md/md.c 2011-11-15 19:59:43.000000000 -0500
32998@@ -153,10 +153,10 @@ static int start_readonly;
32999 * start build, activate spare
33000 */
33001 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
33002-static atomic_t md_event_count;
33003+static atomic_unchecked_t md_event_count;
33004 void md_new_event(mddev_t *mddev)
33005 {
33006- atomic_inc(&md_event_count);
33007+ atomic_inc_unchecked(&md_event_count);
33008 wake_up(&md_event_waiters);
33009 }
33010 EXPORT_SYMBOL_GPL(md_new_event);
33011@@ -166,7 +166,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
33012 */
33013 static void md_new_event_inintr(mddev_t *mddev)
33014 {
33015- atomic_inc(&md_event_count);
33016+ atomic_inc_unchecked(&md_event_count);
33017 wake_up(&md_event_waiters);
33018 }
33019
33020@@ -1226,7 +1226,7 @@ static int super_1_load(mdk_rdev_t *rdev
33021
33022 rdev->preferred_minor = 0xffff;
33023 rdev->data_offset = le64_to_cpu(sb->data_offset);
33024- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
33025+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
33026
33027 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
33028 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
33029@@ -1400,7 +1400,7 @@ static void super_1_sync(mddev_t *mddev,
33030 else
33031 sb->resync_offset = cpu_to_le64(0);
33032
33033- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
33034+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
33035
33036 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
33037 sb->size = cpu_to_le64(mddev->dev_sectors);
33038@@ -2222,7 +2222,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
33039 static ssize_t
33040 errors_show(mdk_rdev_t *rdev, char *page)
33041 {
33042- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
33043+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
33044 }
33045
33046 static ssize_t
33047@@ -2231,7 +2231,7 @@ errors_store(mdk_rdev_t *rdev, const cha
33048 char *e;
33049 unsigned long n = simple_strtoul(buf, &e, 10);
33050 if (*buf && (*e == 0 || *e == '\n')) {
33051- atomic_set(&rdev->corrected_errors, n);
33052+ atomic_set_unchecked(&rdev->corrected_errors, n);
33053 return len;
33054 }
33055 return -EINVAL;
33056@@ -2525,7 +2525,7 @@ static void rdev_free(struct kobject *ko
33057 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
33058 kfree(rdev);
33059 }
33060-static struct sysfs_ops rdev_sysfs_ops = {
33061+static const struct sysfs_ops rdev_sysfs_ops = {
33062 .show = rdev_attr_show,
33063 .store = rdev_attr_store,
33064 };
33065@@ -2574,8 +2574,8 @@ static mdk_rdev_t *md_import_device(dev_
33066 rdev->data_offset = 0;
33067 rdev->sb_events = 0;
33068 atomic_set(&rdev->nr_pending, 0);
33069- atomic_set(&rdev->read_errors, 0);
33070- atomic_set(&rdev->corrected_errors, 0);
33071+ atomic_set_unchecked(&rdev->read_errors, 0);
33072+ atomic_set_unchecked(&rdev->corrected_errors, 0);
33073
33074 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
33075 if (!size) {
33076@@ -3895,7 +3895,7 @@ static void md_free(struct kobject *ko)
33077 kfree(mddev);
33078 }
33079
33080-static struct sysfs_ops md_sysfs_ops = {
33081+static const struct sysfs_ops md_sysfs_ops = {
33082 .show = md_attr_show,
33083 .store = md_attr_store,
33084 };
33085@@ -4482,7 +4482,8 @@ out:
33086 err = 0;
33087 blk_integrity_unregister(disk);
33088 md_new_event(mddev);
33089- sysfs_notify_dirent(mddev->sysfs_state);
33090+ if (mddev->sysfs_state)
33091+ sysfs_notify_dirent(mddev->sysfs_state);
33092 return err;
33093 }
33094
33095@@ -5962,7 +5963,7 @@ static int md_seq_show(struct seq_file *
33096
33097 spin_unlock(&pers_lock);
33098 seq_printf(seq, "\n");
33099- mi->event = atomic_read(&md_event_count);
33100+ mi->event = atomic_read_unchecked(&md_event_count);
33101 return 0;
33102 }
33103 if (v == (void*)2) {
33104@@ -6051,7 +6052,7 @@ static int md_seq_show(struct seq_file *
33105 chunk_kb ? "KB" : "B");
33106 if (bitmap->file) {
33107 seq_printf(seq, ", file: ");
33108- seq_path(seq, &bitmap->file->f_path, " \t\n");
33109+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
33110 }
33111
33112 seq_printf(seq, "\n");
33113@@ -6085,7 +6086,7 @@ static int md_seq_open(struct inode *ino
33114 else {
33115 struct seq_file *p = file->private_data;
33116 p->private = mi;
33117- mi->event = atomic_read(&md_event_count);
33118+ mi->event = atomic_read_unchecked(&md_event_count);
33119 }
33120 return error;
33121 }
33122@@ -6101,7 +6102,7 @@ static unsigned int mdstat_poll(struct f
33123 /* always allow read */
33124 mask = POLLIN | POLLRDNORM;
33125
33126- if (mi->event != atomic_read(&md_event_count))
33127+ if (mi->event != atomic_read_unchecked(&md_event_count))
33128 mask |= POLLERR | POLLPRI;
33129 return mask;
33130 }
33131@@ -6145,7 +6146,7 @@ static int is_mddev_idle(mddev_t *mddev,
33132 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
33133 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
33134 (int)part_stat_read(&disk->part0, sectors[1]) -
33135- atomic_read(&disk->sync_io);
33136+ atomic_read_unchecked(&disk->sync_io);
33137 /* sync IO will cause sync_io to increase before the disk_stats
33138 * as sync_io is counted when a request starts, and
33139 * disk_stats is counted when it completes.
33140diff -urNp linux-2.6.32.48/drivers/md/md.h linux-2.6.32.48/drivers/md/md.h
33141--- linux-2.6.32.48/drivers/md/md.h 2011-11-08 19:02:43.000000000 -0500
33142+++ linux-2.6.32.48/drivers/md/md.h 2011-11-15 19:59:43.000000000 -0500
33143@@ -94,10 +94,10 @@ struct mdk_rdev_s
33144 * only maintained for arrays that
33145 * support hot removal
33146 */
33147- atomic_t read_errors; /* number of consecutive read errors that
33148+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
33149 * we have tried to ignore.
33150 */
33151- atomic_t corrected_errors; /* number of corrected read errors,
33152+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
33153 * for reporting to userspace and storing
33154 * in superblock.
33155 */
33156@@ -304,7 +304,7 @@ static inline void rdev_dec_pending(mdk_
33157
33158 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
33159 {
33160- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
33161+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
33162 }
33163
33164 struct mdk_personality
33165diff -urNp linux-2.6.32.48/drivers/md/raid10.c linux-2.6.32.48/drivers/md/raid10.c
33166--- linux-2.6.32.48/drivers/md/raid10.c 2011-11-08 19:02:43.000000000 -0500
33167+++ linux-2.6.32.48/drivers/md/raid10.c 2011-11-15 19:59:43.000000000 -0500
33168@@ -1255,7 +1255,7 @@ static void end_sync_read(struct bio *bi
33169 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
33170 set_bit(R10BIO_Uptodate, &r10_bio->state);
33171 else {
33172- atomic_add(r10_bio->sectors,
33173+ atomic_add_unchecked(r10_bio->sectors,
33174 &conf->mirrors[d].rdev->corrected_errors);
33175 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
33176 md_error(r10_bio->mddev,
33177@@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf,
33178 test_bit(In_sync, &rdev->flags)) {
33179 atomic_inc(&rdev->nr_pending);
33180 rcu_read_unlock();
33181- atomic_add(s, &rdev->corrected_errors);
33182+ atomic_add_unchecked(s, &rdev->corrected_errors);
33183 if (sync_page_io(rdev->bdev,
33184 r10_bio->devs[sl].addr +
33185 sect + rdev->data_offset,
33186diff -urNp linux-2.6.32.48/drivers/md/raid1.c linux-2.6.32.48/drivers/md/raid1.c
33187--- linux-2.6.32.48/drivers/md/raid1.c 2011-11-08 19:02:43.000000000 -0500
33188+++ linux-2.6.32.48/drivers/md/raid1.c 2011-11-15 19:59:43.000000000 -0500
33189@@ -1415,7 +1415,7 @@ static void sync_request_write(mddev_t *
33190 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
33191 continue;
33192 rdev = conf->mirrors[d].rdev;
33193- atomic_add(s, &rdev->corrected_errors);
33194+ atomic_add_unchecked(s, &rdev->corrected_errors);
33195 if (sync_page_io(rdev->bdev,
33196 sect + rdev->data_offset,
33197 s<<9,
33198@@ -1564,7 +1564,7 @@ static void fix_read_error(conf_t *conf,
33199 /* Well, this device is dead */
33200 md_error(mddev, rdev);
33201 else {
33202- atomic_add(s, &rdev->corrected_errors);
33203+ atomic_add_unchecked(s, &rdev->corrected_errors);
33204 printk(KERN_INFO
33205 "raid1:%s: read error corrected "
33206 "(%d sectors at %llu on %s)\n",
33207diff -urNp linux-2.6.32.48/drivers/md/raid5.c linux-2.6.32.48/drivers/md/raid5.c
33208--- linux-2.6.32.48/drivers/md/raid5.c 2011-11-08 19:02:43.000000000 -0500
33209+++ linux-2.6.32.48/drivers/md/raid5.c 2011-11-15 19:59:43.000000000 -0500
33210@@ -482,7 +482,7 @@ static void ops_run_io(struct stripe_hea
33211 bi->bi_next = NULL;
33212 if ((rw & WRITE) &&
33213 test_bit(R5_ReWrite, &sh->dev[i].flags))
33214- atomic_add(STRIPE_SECTORS,
33215+ atomic_add_unchecked(STRIPE_SECTORS,
33216 &rdev->corrected_errors);
33217 generic_make_request(bi);
33218 } else {
33219@@ -1517,15 +1517,15 @@ static void raid5_end_read_request(struc
33220 clear_bit(R5_ReadError, &sh->dev[i].flags);
33221 clear_bit(R5_ReWrite, &sh->dev[i].flags);
33222 }
33223- if (atomic_read(&conf->disks[i].rdev->read_errors))
33224- atomic_set(&conf->disks[i].rdev->read_errors, 0);
33225+ if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
33226+ atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
33227 } else {
33228 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
33229 int retry = 0;
33230 rdev = conf->disks[i].rdev;
33231
33232 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
33233- atomic_inc(&rdev->read_errors);
33234+ atomic_inc_unchecked(&rdev->read_errors);
33235 if (conf->mddev->degraded >= conf->max_degraded)
33236 printk_rl(KERN_WARNING
33237 "raid5:%s: read error not correctable "
33238@@ -1543,7 +1543,7 @@ static void raid5_end_read_request(struc
33239 (unsigned long long)(sh->sector
33240 + rdev->data_offset),
33241 bdn);
33242- else if (atomic_read(&rdev->read_errors)
33243+ else if (atomic_read_unchecked(&rdev->read_errors)
33244 > conf->max_nr_stripes)
33245 printk(KERN_WARNING
33246 "raid5:%s: Too many read errors, failing device %s.\n",
33247@@ -1870,6 +1870,7 @@ static sector_t compute_blocknr(struct s
33248 sector_t r_sector;
33249 struct stripe_head sh2;
33250
33251+ pax_track_stack();
33252
33253 chunk_offset = sector_div(new_sector, sectors_per_chunk);
33254 stripe = new_sector;
33255diff -urNp linux-2.6.32.48/drivers/media/common/saa7146_hlp.c linux-2.6.32.48/drivers/media/common/saa7146_hlp.c
33256--- linux-2.6.32.48/drivers/media/common/saa7146_hlp.c 2011-11-08 19:02:43.000000000 -0500
33257+++ linux-2.6.32.48/drivers/media/common/saa7146_hlp.c 2011-11-15 19:59:43.000000000 -0500
33258@@ -353,6 +353,8 @@ static void calculate_clipping_registers
33259
33260 int x[32], y[32], w[32], h[32];
33261
33262+ pax_track_stack();
33263+
33264 /* clear out memory */
33265 memset(&line_list[0], 0x00, sizeof(u32)*32);
33266 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
33267diff -urNp linux-2.6.32.48/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-2.6.32.48/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
33268--- linux-2.6.32.48/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-11-08 19:02:43.000000000 -0500
33269+++ linux-2.6.32.48/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-11-15 19:59:43.000000000 -0500
33270@@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
33271 u8 buf[HOST_LINK_BUF_SIZE];
33272 int i;
33273
33274+ pax_track_stack();
33275+
33276 dprintk("%s\n", __func__);
33277
33278 /* check if we have space for a link buf in the rx_buffer */
33279@@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
33280 unsigned long timeout;
33281 int written;
33282
33283+ pax_track_stack();
33284+
33285 dprintk("%s\n", __func__);
33286
33287 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
33288diff -urNp linux-2.6.32.48/drivers/media/dvb/dvb-core/dvb_demux.h linux-2.6.32.48/drivers/media/dvb/dvb-core/dvb_demux.h
33289--- linux-2.6.32.48/drivers/media/dvb/dvb-core/dvb_demux.h 2011-11-08 19:02:43.000000000 -0500
33290+++ linux-2.6.32.48/drivers/media/dvb/dvb-core/dvb_demux.h 2011-11-15 19:59:43.000000000 -0500
33291@@ -71,7 +71,7 @@ struct dvb_demux_feed {
33292 union {
33293 dmx_ts_cb ts;
33294 dmx_section_cb sec;
33295- } cb;
33296+ } __no_const cb;
33297
33298 struct dvb_demux *demux;
33299 void *priv;
33300diff -urNp linux-2.6.32.48/drivers/media/dvb/dvb-core/dvbdev.c linux-2.6.32.48/drivers/media/dvb/dvb-core/dvbdev.c
33301--- linux-2.6.32.48/drivers/media/dvb/dvb-core/dvbdev.c 2011-11-08 19:02:43.000000000 -0500
33302+++ linux-2.6.32.48/drivers/media/dvb/dvb-core/dvbdev.c 2011-11-15 19:59:43.000000000 -0500
33303@@ -191,7 +191,7 @@ int dvb_register_device(struct dvb_adapt
33304 const struct dvb_device *template, void *priv, int type)
33305 {
33306 struct dvb_device *dvbdev;
33307- struct file_operations *dvbdevfops;
33308+ file_operations_no_const *dvbdevfops;
33309 struct device *clsdev;
33310 int minor;
33311 int id;
33312diff -urNp linux-2.6.32.48/drivers/media/dvb/dvb-usb/cxusb.c linux-2.6.32.48/drivers/media/dvb/dvb-usb/cxusb.c
33313--- linux-2.6.32.48/drivers/media/dvb/dvb-usb/cxusb.c 2011-11-08 19:02:43.000000000 -0500
33314+++ linux-2.6.32.48/drivers/media/dvb/dvb-usb/cxusb.c 2011-11-15 19:59:43.000000000 -0500
33315@@ -1040,7 +1040,7 @@ static struct dib0070_config dib7070p_di
33316 struct dib0700_adapter_state {
33317 int (*set_param_save) (struct dvb_frontend *,
33318 struct dvb_frontend_parameters *);
33319-};
33320+} __no_const;
33321
33322 static int dib7070_set_param_override(struct dvb_frontend *fe,
33323 struct dvb_frontend_parameters *fep)
33324diff -urNp linux-2.6.32.48/drivers/media/dvb/dvb-usb/dib0700_core.c linux-2.6.32.48/drivers/media/dvb/dvb-usb/dib0700_core.c
33325--- linux-2.6.32.48/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-11-08 19:02:43.000000000 -0500
33326+++ linux-2.6.32.48/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-11-15 19:59:43.000000000 -0500
33327@@ -332,6 +332,8 @@ int dib0700_download_firmware(struct usb
33328
33329 u8 buf[260];
33330
33331+ pax_track_stack();
33332+
33333 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
33334 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",hx.addr, hx.len, hx.chk);
33335
33336diff -urNp linux-2.6.32.48/drivers/media/dvb/dvb-usb/dib0700_devices.c linux-2.6.32.48/drivers/media/dvb/dvb-usb/dib0700_devices.c
33337--- linux-2.6.32.48/drivers/media/dvb/dvb-usb/dib0700_devices.c 2011-11-08 19:02:43.000000000 -0500
33338+++ linux-2.6.32.48/drivers/media/dvb/dvb-usb/dib0700_devices.c 2011-11-15 19:59:43.000000000 -0500
33339@@ -28,7 +28,7 @@ MODULE_PARM_DESC(force_lna_activation, "
33340
33341 struct dib0700_adapter_state {
33342 int (*set_param_save) (struct dvb_frontend *, struct dvb_frontend_parameters *);
33343-};
33344+} __no_const;
33345
33346 /* Hauppauge Nova-T 500 (aka Bristol)
33347 * has a LNA on GPIO0 which is enabled by setting 1 */
33348diff -urNp linux-2.6.32.48/drivers/media/dvb/frontends/dib3000.h linux-2.6.32.48/drivers/media/dvb/frontends/dib3000.h
33349--- linux-2.6.32.48/drivers/media/dvb/frontends/dib3000.h 2011-11-08 19:02:43.000000000 -0500
33350+++ linux-2.6.32.48/drivers/media/dvb/frontends/dib3000.h 2011-11-15 19:59:43.000000000 -0500
33351@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
33352 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
33353 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
33354 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
33355-};
33356+} __no_const;
33357
33358 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
33359 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
33360diff -urNp linux-2.6.32.48/drivers/media/dvb/frontends/or51211.c linux-2.6.32.48/drivers/media/dvb/frontends/or51211.c
33361--- linux-2.6.32.48/drivers/media/dvb/frontends/or51211.c 2011-11-08 19:02:43.000000000 -0500
33362+++ linux-2.6.32.48/drivers/media/dvb/frontends/or51211.c 2011-11-15 19:59:43.000000000 -0500
33363@@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
33364 u8 tudata[585];
33365 int i;
33366
33367+ pax_track_stack();
33368+
33369 dprintk("Firmware is %zd bytes\n",fw->size);
33370
33371 /* Get eprom data */
33372diff -urNp linux-2.6.32.48/drivers/media/radio/radio-cadet.c linux-2.6.32.48/drivers/media/radio/radio-cadet.c
33373--- linux-2.6.32.48/drivers/media/radio/radio-cadet.c 2011-11-08 19:02:43.000000000 -0500
33374+++ linux-2.6.32.48/drivers/media/radio/radio-cadet.c 2011-11-15 19:59:43.000000000 -0500
33375@@ -347,7 +347,7 @@ static ssize_t cadet_read(struct file *f
33376 while (i < count && dev->rdsin != dev->rdsout)
33377 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
33378
33379- if (copy_to_user(data, readbuf, i))
33380+ if (i > sizeof readbuf || copy_to_user(data, readbuf, i))
33381 return -EFAULT;
33382 return i;
33383 }
33384diff -urNp linux-2.6.32.48/drivers/media/video/cx18/cx18-driver.c linux-2.6.32.48/drivers/media/video/cx18/cx18-driver.c
33385--- linux-2.6.32.48/drivers/media/video/cx18/cx18-driver.c 2011-11-08 19:02:43.000000000 -0500
33386+++ linux-2.6.32.48/drivers/media/video/cx18/cx18-driver.c 2011-11-15 19:59:43.000000000 -0500
33387@@ -56,7 +56,7 @@ static struct pci_device_id cx18_pci_tbl
33388
33389 MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
33390
33391-static atomic_t cx18_instance = ATOMIC_INIT(0);
33392+static atomic_unchecked_t cx18_instance = ATOMIC_INIT(0);
33393
33394 /* Parameter declarations */
33395 static int cardtype[CX18_MAX_CARDS];
33396@@ -288,6 +288,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
33397 struct i2c_client c;
33398 u8 eedata[256];
33399
33400+ pax_track_stack();
33401+
33402 memset(&c, 0, sizeof(c));
33403 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
33404 c.adapter = &cx->i2c_adap[0];
33405@@ -800,7 +802,7 @@ static int __devinit cx18_probe(struct p
33406 struct cx18 *cx;
33407
33408 /* FIXME - module parameter arrays constrain max instances */
33409- i = atomic_inc_return(&cx18_instance) - 1;
33410+ i = atomic_inc_return_unchecked(&cx18_instance) - 1;
33411 if (i >= CX18_MAX_CARDS) {
33412 printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
33413 "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
33414diff -urNp linux-2.6.32.48/drivers/media/video/ivtv/ivtv-driver.c linux-2.6.32.48/drivers/media/video/ivtv/ivtv-driver.c
33415--- linux-2.6.32.48/drivers/media/video/ivtv/ivtv-driver.c 2011-11-08 19:02:43.000000000 -0500
33416+++ linux-2.6.32.48/drivers/media/video/ivtv/ivtv-driver.c 2011-11-15 19:59:43.000000000 -0500
33417@@ -79,7 +79,7 @@ static struct pci_device_id ivtv_pci_tbl
33418 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
33419
33420 /* ivtv instance counter */
33421-static atomic_t ivtv_instance = ATOMIC_INIT(0);
33422+static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
33423
33424 /* Parameter declarations */
33425 static int cardtype[IVTV_MAX_CARDS];
33426diff -urNp linux-2.6.32.48/drivers/media/video/omap24xxcam.c linux-2.6.32.48/drivers/media/video/omap24xxcam.c
33427--- linux-2.6.32.48/drivers/media/video/omap24xxcam.c 2011-11-08 19:02:43.000000000 -0500
33428+++ linux-2.6.32.48/drivers/media/video/omap24xxcam.c 2011-11-15 19:59:43.000000000 -0500
33429@@ -401,7 +401,7 @@ static void omap24xxcam_vbq_complete(str
33430 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
33431
33432 do_gettimeofday(&vb->ts);
33433- vb->field_count = atomic_add_return(2, &fh->field_count);
33434+ vb->field_count = atomic_add_return_unchecked(2, &fh->field_count);
33435 if (csr & csr_error) {
33436 vb->state = VIDEOBUF_ERROR;
33437 if (!atomic_read(&fh->cam->in_reset)) {
33438diff -urNp linux-2.6.32.48/drivers/media/video/omap24xxcam.h linux-2.6.32.48/drivers/media/video/omap24xxcam.h
33439--- linux-2.6.32.48/drivers/media/video/omap24xxcam.h 2011-11-08 19:02:43.000000000 -0500
33440+++ linux-2.6.32.48/drivers/media/video/omap24xxcam.h 2011-11-15 19:59:43.000000000 -0500
33441@@ -533,7 +533,7 @@ struct omap24xxcam_fh {
33442 spinlock_t vbq_lock; /* spinlock for the videobuf queue */
33443 struct videobuf_queue vbq;
33444 struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
33445- atomic_t field_count; /* field counter for videobuf_buffer */
33446+ atomic_unchecked_t field_count; /* field counter for videobuf_buffer */
33447 /* accessing cam here doesn't need serialisation: it's constant */
33448 struct omap24xxcam_device *cam;
33449 };
33450diff -urNp linux-2.6.32.48/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-2.6.32.48/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
33451--- linux-2.6.32.48/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-11-08 19:02:43.000000000 -0500
33452+++ linux-2.6.32.48/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-11-15 19:59:43.000000000 -0500
33453@@ -119,6 +119,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw
33454 u8 *eeprom;
33455 struct tveeprom tvdata;
33456
33457+ pax_track_stack();
33458+
33459 memset(&tvdata,0,sizeof(tvdata));
33460
33461 eeprom = pvr2_eeprom_fetch(hdw);
33462diff -urNp linux-2.6.32.48/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h linux-2.6.32.48/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
33463--- linux-2.6.32.48/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h 2011-11-08 19:02:43.000000000 -0500
33464+++ linux-2.6.32.48/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h 2011-11-15 19:59:43.000000000 -0500
33465@@ -195,7 +195,7 @@ struct pvr2_hdw {
33466
33467 /* I2C stuff */
33468 struct i2c_adapter i2c_adap;
33469- struct i2c_algorithm i2c_algo;
33470+ i2c_algorithm_no_const i2c_algo;
33471 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
33472 int i2c_cx25840_hack_state;
33473 int i2c_linked;
33474diff -urNp linux-2.6.32.48/drivers/media/video/saa7134/saa6752hs.c linux-2.6.32.48/drivers/media/video/saa7134/saa6752hs.c
33475--- linux-2.6.32.48/drivers/media/video/saa7134/saa6752hs.c 2011-11-08 19:02:43.000000000 -0500
33476+++ linux-2.6.32.48/drivers/media/video/saa7134/saa6752hs.c 2011-11-15 19:59:43.000000000 -0500
33477@@ -683,6 +683,8 @@ static int saa6752hs_init(struct v4l2_su
33478 unsigned char localPAT[256];
33479 unsigned char localPMT[256];
33480
33481+ pax_track_stack();
33482+
33483 /* Set video format - must be done first as it resets other settings */
33484 set_reg8(client, 0x41, h->video_format);
33485
33486diff -urNp linux-2.6.32.48/drivers/media/video/saa7164/saa7164-cmd.c linux-2.6.32.48/drivers/media/video/saa7164/saa7164-cmd.c
33487--- linux-2.6.32.48/drivers/media/video/saa7164/saa7164-cmd.c 2011-11-08 19:02:43.000000000 -0500
33488+++ linux-2.6.32.48/drivers/media/video/saa7164/saa7164-cmd.c 2011-11-15 19:59:43.000000000 -0500
33489@@ -87,6 +87,8 @@ int saa7164_irq_dequeue(struct saa7164_d
33490 wait_queue_head_t *q = 0;
33491 dprintk(DBGLVL_CMD, "%s()\n", __func__);
33492
33493+ pax_track_stack();
33494+
33495 /* While any outstand message on the bus exists... */
33496 do {
33497
33498@@ -126,6 +128,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
33499 u8 tmp[512];
33500 dprintk(DBGLVL_CMD, "%s()\n", __func__);
33501
33502+ pax_track_stack();
33503+
33504 while (loop) {
33505
33506 tmComResInfo_t tRsp = { 0, 0, 0, 0, 0, 0 };
33507diff -urNp linux-2.6.32.48/drivers/media/video/usbvideo/ibmcam.c linux-2.6.32.48/drivers/media/video/usbvideo/ibmcam.c
33508--- linux-2.6.32.48/drivers/media/video/usbvideo/ibmcam.c 2011-11-08 19:02:43.000000000 -0500
33509+++ linux-2.6.32.48/drivers/media/video/usbvideo/ibmcam.c 2011-11-15 19:59:43.000000000 -0500
33510@@ -3947,15 +3947,15 @@ static struct usb_device_id id_table[] =
33511 static int __init ibmcam_init(void)
33512 {
33513 struct usbvideo_cb cbTbl;
33514- memset(&cbTbl, 0, sizeof(cbTbl));
33515- cbTbl.probe = ibmcam_probe;
33516- cbTbl.setupOnOpen = ibmcam_setup_on_open;
33517- cbTbl.videoStart = ibmcam_video_start;
33518- cbTbl.videoStop = ibmcam_video_stop;
33519- cbTbl.processData = ibmcam_ProcessIsocData;
33520- cbTbl.postProcess = usbvideo_DeinterlaceFrame;
33521- cbTbl.adjustPicture = ibmcam_adjust_picture;
33522- cbTbl.getFPS = ibmcam_calculate_fps;
33523+ memset((void *)&cbTbl, 0, sizeof(cbTbl));
33524+ *(void **)&cbTbl.probe = ibmcam_probe;
33525+ *(void **)&cbTbl.setupOnOpen = ibmcam_setup_on_open;
33526+ *(void **)&cbTbl.videoStart = ibmcam_video_start;
33527+ *(void **)&cbTbl.videoStop = ibmcam_video_stop;
33528+ *(void **)&cbTbl.processData = ibmcam_ProcessIsocData;
33529+ *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
33530+ *(void **)&cbTbl.adjustPicture = ibmcam_adjust_picture;
33531+ *(void **)&cbTbl.getFPS = ibmcam_calculate_fps;
33532 return usbvideo_register(
33533 &cams,
33534 MAX_IBMCAM,
33535diff -urNp linux-2.6.32.48/drivers/media/video/usbvideo/konicawc.c linux-2.6.32.48/drivers/media/video/usbvideo/konicawc.c
33536--- linux-2.6.32.48/drivers/media/video/usbvideo/konicawc.c 2011-11-08 19:02:43.000000000 -0500
33537+++ linux-2.6.32.48/drivers/media/video/usbvideo/konicawc.c 2011-11-15 19:59:43.000000000 -0500
33538@@ -225,7 +225,7 @@ static void konicawc_register_input(stru
33539 int error;
33540
33541 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
33542- strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
33543+ strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
33544
33545 cam->input = input_dev = input_allocate_device();
33546 if (!input_dev) {
33547@@ -935,16 +935,16 @@ static int __init konicawc_init(void)
33548 struct usbvideo_cb cbTbl;
33549 printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
33550 DRIVER_DESC "\n");
33551- memset(&cbTbl, 0, sizeof(cbTbl));
33552- cbTbl.probe = konicawc_probe;
33553- cbTbl.setupOnOpen = konicawc_setup_on_open;
33554- cbTbl.processData = konicawc_process_isoc;
33555- cbTbl.getFPS = konicawc_calculate_fps;
33556- cbTbl.setVideoMode = konicawc_set_video_mode;
33557- cbTbl.startDataPump = konicawc_start_data;
33558- cbTbl.stopDataPump = konicawc_stop_data;
33559- cbTbl.adjustPicture = konicawc_adjust_picture;
33560- cbTbl.userFree = konicawc_free_uvd;
33561+ memset((void * )&cbTbl, 0, sizeof(cbTbl));
33562+ *(void **)&cbTbl.probe = konicawc_probe;
33563+ *(void **)&cbTbl.setupOnOpen = konicawc_setup_on_open;
33564+ *(void **)&cbTbl.processData = konicawc_process_isoc;
33565+ *(void **)&cbTbl.getFPS = konicawc_calculate_fps;
33566+ *(void **)&cbTbl.setVideoMode = konicawc_set_video_mode;
33567+ *(void **)&cbTbl.startDataPump = konicawc_start_data;
33568+ *(void **)&cbTbl.stopDataPump = konicawc_stop_data;
33569+ *(void **)&cbTbl.adjustPicture = konicawc_adjust_picture;
33570+ *(void **)&cbTbl.userFree = konicawc_free_uvd;
33571 return usbvideo_register(
33572 &cams,
33573 MAX_CAMERAS,
33574diff -urNp linux-2.6.32.48/drivers/media/video/usbvideo/quickcam_messenger.c linux-2.6.32.48/drivers/media/video/usbvideo/quickcam_messenger.c
33575--- linux-2.6.32.48/drivers/media/video/usbvideo/quickcam_messenger.c 2011-11-08 19:02:43.000000000 -0500
33576+++ linux-2.6.32.48/drivers/media/video/usbvideo/quickcam_messenger.c 2011-11-15 19:59:43.000000000 -0500
33577@@ -89,7 +89,7 @@ static void qcm_register_input(struct qc
33578 int error;
33579
33580 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
33581- strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
33582+ strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
33583
33584 cam->input = input_dev = input_allocate_device();
33585 if (!input_dev) {
33586diff -urNp linux-2.6.32.48/drivers/media/video/usbvideo/ultracam.c linux-2.6.32.48/drivers/media/video/usbvideo/ultracam.c
33587--- linux-2.6.32.48/drivers/media/video/usbvideo/ultracam.c 2011-11-08 19:02:43.000000000 -0500
33588+++ linux-2.6.32.48/drivers/media/video/usbvideo/ultracam.c 2011-11-15 19:59:43.000000000 -0500
33589@@ -655,14 +655,14 @@ static int __init ultracam_init(void)
33590 {
33591 struct usbvideo_cb cbTbl;
33592 memset(&cbTbl, 0, sizeof(cbTbl));
33593- cbTbl.probe = ultracam_probe;
33594- cbTbl.setupOnOpen = ultracam_setup_on_open;
33595- cbTbl.videoStart = ultracam_video_start;
33596- cbTbl.videoStop = ultracam_video_stop;
33597- cbTbl.processData = ultracam_ProcessIsocData;
33598- cbTbl.postProcess = usbvideo_DeinterlaceFrame;
33599- cbTbl.adjustPicture = ultracam_adjust_picture;
33600- cbTbl.getFPS = ultracam_calculate_fps;
33601+ *(void **)&cbTbl.probe = ultracam_probe;
33602+ *(void **)&cbTbl.setupOnOpen = ultracam_setup_on_open;
33603+ *(void **)&cbTbl.videoStart = ultracam_video_start;
33604+ *(void **)&cbTbl.videoStop = ultracam_video_stop;
33605+ *(void **)&cbTbl.processData = ultracam_ProcessIsocData;
33606+ *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
33607+ *(void **)&cbTbl.adjustPicture = ultracam_adjust_picture;
33608+ *(void **)&cbTbl.getFPS = ultracam_calculate_fps;
33609 return usbvideo_register(
33610 &cams,
33611 MAX_CAMERAS,
33612diff -urNp linux-2.6.32.48/drivers/media/video/usbvideo/usbvideo.c linux-2.6.32.48/drivers/media/video/usbvideo/usbvideo.c
33613--- linux-2.6.32.48/drivers/media/video/usbvideo/usbvideo.c 2011-11-08 19:02:43.000000000 -0500
33614+++ linux-2.6.32.48/drivers/media/video/usbvideo/usbvideo.c 2011-11-15 19:59:43.000000000 -0500
33615@@ -697,15 +697,15 @@ int usbvideo_register(
33616 __func__, cams, base_size, num_cams);
33617
33618 /* Copy callbacks, apply defaults for those that are not set */
33619- memmove(&cams->cb, cbTbl, sizeof(cams->cb));
33620+ memmove((void *)&cams->cb, cbTbl, sizeof(cams->cb));
33621 if (cams->cb.getFrame == NULL)
33622- cams->cb.getFrame = usbvideo_GetFrame;
33623+ *(void **)&cams->cb.getFrame = usbvideo_GetFrame;
33624 if (cams->cb.disconnect == NULL)
33625- cams->cb.disconnect = usbvideo_Disconnect;
33626+ *(void **)&cams->cb.disconnect = usbvideo_Disconnect;
33627 if (cams->cb.startDataPump == NULL)
33628- cams->cb.startDataPump = usbvideo_StartDataPump;
33629+ *(void **)&cams->cb.startDataPump = usbvideo_StartDataPump;
33630 if (cams->cb.stopDataPump == NULL)
33631- cams->cb.stopDataPump = usbvideo_StopDataPump;
33632+ *(void **)&cams->cb.stopDataPump = usbvideo_StopDataPump;
33633
33634 cams->num_cameras = num_cams;
33635 cams->cam = (struct uvd *) &cams[1];
33636diff -urNp linux-2.6.32.48/drivers/media/video/usbvideo/usbvideo.h linux-2.6.32.48/drivers/media/video/usbvideo/usbvideo.h
33637--- linux-2.6.32.48/drivers/media/video/usbvideo/usbvideo.h 2011-11-08 19:02:43.000000000 -0500
33638+++ linux-2.6.32.48/drivers/media/video/usbvideo/usbvideo.h 2011-11-15 19:59:43.000000000 -0500
33639@@ -268,7 +268,7 @@ struct usbvideo_cb {
33640 int (*startDataPump)(struct uvd *uvd);
33641 void (*stopDataPump)(struct uvd *uvd);
33642 int (*setVideoMode)(struct uvd *uvd, struct video_window *vw);
33643-};
33644+} __no_const;
33645
33646 struct usbvideo {
33647 int num_cameras; /* As allocated */
33648diff -urNp linux-2.6.32.48/drivers/media/video/usbvision/usbvision-core.c linux-2.6.32.48/drivers/media/video/usbvision/usbvision-core.c
33649--- linux-2.6.32.48/drivers/media/video/usbvision/usbvision-core.c 2011-11-08 19:02:43.000000000 -0500
33650+++ linux-2.6.32.48/drivers/media/video/usbvision/usbvision-core.c 2011-11-15 19:59:43.000000000 -0500
33651@@ -820,6 +820,8 @@ static enum ParseState usbvision_parse_c
33652 unsigned char rv, gv, bv;
33653 static unsigned char *Y, *U, *V;
33654
33655+ pax_track_stack();
33656+
33657 frame = usbvision->curFrame;
33658 imageSize = frame->frmwidth * frame->frmheight;
33659 if ( (frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
33660diff -urNp linux-2.6.32.48/drivers/media/video/v4l2-device.c linux-2.6.32.48/drivers/media/video/v4l2-device.c
33661--- linux-2.6.32.48/drivers/media/video/v4l2-device.c 2011-11-08 19:02:43.000000000 -0500
33662+++ linux-2.6.32.48/drivers/media/video/v4l2-device.c 2011-11-15 19:59:43.000000000 -0500
33663@@ -50,9 +50,9 @@ int v4l2_device_register(struct device *
33664 EXPORT_SYMBOL_GPL(v4l2_device_register);
33665
33666 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
33667- atomic_t *instance)
33668+ atomic_unchecked_t *instance)
33669 {
33670- int num = atomic_inc_return(instance) - 1;
33671+ int num = atomic_inc_return_unchecked(instance) - 1;
33672 int len = strlen(basename);
33673
33674 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
33675diff -urNp linux-2.6.32.48/drivers/media/video/videobuf-dma-sg.c linux-2.6.32.48/drivers/media/video/videobuf-dma-sg.c
33676--- linux-2.6.32.48/drivers/media/video/videobuf-dma-sg.c 2011-11-08 19:02:43.000000000 -0500
33677+++ linux-2.6.32.48/drivers/media/video/videobuf-dma-sg.c 2011-11-15 19:59:43.000000000 -0500
33678@@ -693,6 +693,8 @@ void *videobuf_sg_alloc(size_t size)
33679 {
33680 struct videobuf_queue q;
33681
33682+ pax_track_stack();
33683+
33684 /* Required to make generic handler to call __videobuf_alloc */
33685 q.int_ops = &sg_ops;
33686
33687diff -urNp linux-2.6.32.48/drivers/message/fusion/mptbase.c linux-2.6.32.48/drivers/message/fusion/mptbase.c
33688--- linux-2.6.32.48/drivers/message/fusion/mptbase.c 2011-11-08 19:02:43.000000000 -0500
33689+++ linux-2.6.32.48/drivers/message/fusion/mptbase.c 2011-11-15 19:59:43.000000000 -0500
33690@@ -6709,8 +6709,14 @@ procmpt_iocinfo_read(char *buf, char **s
33691 len += sprintf(buf+len, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
33692 len += sprintf(buf+len, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
33693
33694+#ifdef CONFIG_GRKERNSEC_HIDESYM
33695+ len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
33696+ NULL, NULL);
33697+#else
33698 len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
33699 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
33700+#endif
33701+
33702 /*
33703 * Rounding UP to nearest 4-kB boundary here...
33704 */
33705diff -urNp linux-2.6.32.48/drivers/message/fusion/mptsas.c linux-2.6.32.48/drivers/message/fusion/mptsas.c
33706--- linux-2.6.32.48/drivers/message/fusion/mptsas.c 2011-11-08 19:02:43.000000000 -0500
33707+++ linux-2.6.32.48/drivers/message/fusion/mptsas.c 2011-11-15 19:59:43.000000000 -0500
33708@@ -436,6 +436,23 @@ mptsas_is_end_device(struct mptsas_devin
33709 return 0;
33710 }
33711
33712+static inline void
33713+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
33714+{
33715+ if (phy_info->port_details) {
33716+ phy_info->port_details->rphy = rphy;
33717+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
33718+ ioc->name, rphy));
33719+ }
33720+
33721+ if (rphy) {
33722+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
33723+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
33724+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
33725+ ioc->name, rphy, rphy->dev.release));
33726+ }
33727+}
33728+
33729 /* no mutex */
33730 static void
33731 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
33732@@ -474,23 +491,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
33733 return NULL;
33734 }
33735
33736-static inline void
33737-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
33738-{
33739- if (phy_info->port_details) {
33740- phy_info->port_details->rphy = rphy;
33741- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
33742- ioc->name, rphy));
33743- }
33744-
33745- if (rphy) {
33746- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
33747- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
33748- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
33749- ioc->name, rphy, rphy->dev.release));
33750- }
33751-}
33752-
33753 static inline struct sas_port *
33754 mptsas_get_port(struct mptsas_phyinfo *phy_info)
33755 {
33756diff -urNp linux-2.6.32.48/drivers/message/fusion/mptscsih.c linux-2.6.32.48/drivers/message/fusion/mptscsih.c
33757--- linux-2.6.32.48/drivers/message/fusion/mptscsih.c 2011-11-08 19:02:43.000000000 -0500
33758+++ linux-2.6.32.48/drivers/message/fusion/mptscsih.c 2011-11-15 19:59:43.000000000 -0500
33759@@ -1248,15 +1248,16 @@ mptscsih_info(struct Scsi_Host *SChost)
33760
33761 h = shost_priv(SChost);
33762
33763- if (h) {
33764- if (h->info_kbuf == NULL)
33765- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
33766- return h->info_kbuf;
33767- h->info_kbuf[0] = '\0';
33768+ if (!h)
33769+ return NULL;
33770
33771- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
33772- h->info_kbuf[size-1] = '\0';
33773- }
33774+ if (h->info_kbuf == NULL)
33775+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
33776+ return h->info_kbuf;
33777+ h->info_kbuf[0] = '\0';
33778+
33779+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
33780+ h->info_kbuf[size-1] = '\0';
33781
33782 return h->info_kbuf;
33783 }
33784diff -urNp linux-2.6.32.48/drivers/message/i2o/i2o_config.c linux-2.6.32.48/drivers/message/i2o/i2o_config.c
33785--- linux-2.6.32.48/drivers/message/i2o/i2o_config.c 2011-11-08 19:02:43.000000000 -0500
33786+++ linux-2.6.32.48/drivers/message/i2o/i2o_config.c 2011-11-15 19:59:43.000000000 -0500
33787@@ -787,6 +787,8 @@ static int i2o_cfg_passthru(unsigned lon
33788 struct i2o_message *msg;
33789 unsigned int iop;
33790
33791+ pax_track_stack();
33792+
33793 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
33794 return -EFAULT;
33795
33796diff -urNp linux-2.6.32.48/drivers/message/i2o/i2o_proc.c linux-2.6.32.48/drivers/message/i2o/i2o_proc.c
33797--- linux-2.6.32.48/drivers/message/i2o/i2o_proc.c 2011-11-08 19:02:43.000000000 -0500
33798+++ linux-2.6.32.48/drivers/message/i2o/i2o_proc.c 2011-11-15 19:59:43.000000000 -0500
33799@@ -259,13 +259,6 @@ static char *scsi_devices[] = {
33800 "Array Controller Device"
33801 };
33802
33803-static char *chtostr(u8 * chars, int n)
33804-{
33805- char tmp[256];
33806- tmp[0] = 0;
33807- return strncat(tmp, (char *)chars, n);
33808-}
33809-
33810 static int i2o_report_query_status(struct seq_file *seq, int block_status,
33811 char *group)
33812 {
33813@@ -842,8 +835,7 @@ static int i2o_seq_show_ddm_table(struct
33814
33815 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
33816 seq_printf(seq, "%-#8x", ddm_table.module_id);
33817- seq_printf(seq, "%-29s",
33818- chtostr(ddm_table.module_name_version, 28));
33819+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
33820 seq_printf(seq, "%9d ", ddm_table.data_size);
33821 seq_printf(seq, "%8d", ddm_table.code_size);
33822
33823@@ -944,8 +936,8 @@ static int i2o_seq_show_drivers_stored(s
33824
33825 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
33826 seq_printf(seq, "%-#8x", dst->module_id);
33827- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
33828- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
33829+ seq_printf(seq, "%-.28s", dst->module_name_version);
33830+ seq_printf(seq, "%-.8s", dst->date);
33831 seq_printf(seq, "%8d ", dst->module_size);
33832 seq_printf(seq, "%8d ", dst->mpb_size);
33833 seq_printf(seq, "0x%04x", dst->module_flags);
33834@@ -1276,14 +1268,10 @@ static int i2o_seq_show_dev_identity(str
33835 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
33836 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
33837 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
33838- seq_printf(seq, "Vendor info : %s\n",
33839- chtostr((u8 *) (work32 + 2), 16));
33840- seq_printf(seq, "Product info : %s\n",
33841- chtostr((u8 *) (work32 + 6), 16));
33842- seq_printf(seq, "Description : %s\n",
33843- chtostr((u8 *) (work32 + 10), 16));
33844- seq_printf(seq, "Product rev. : %s\n",
33845- chtostr((u8 *) (work32 + 14), 8));
33846+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
33847+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
33848+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
33849+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
33850
33851 seq_printf(seq, "Serial number : ");
33852 print_serial_number(seq, (u8 *) (work32 + 16),
33853@@ -1328,10 +1316,8 @@ static int i2o_seq_show_ddm_identity(str
33854 }
33855
33856 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
33857- seq_printf(seq, "Module name : %s\n",
33858- chtostr(result.module_name, 24));
33859- seq_printf(seq, "Module revision : %s\n",
33860- chtostr(result.module_rev, 8));
33861+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
33862+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
33863
33864 seq_printf(seq, "Serial number : ");
33865 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
33866@@ -1362,14 +1348,10 @@ static int i2o_seq_show_uinfo(struct seq
33867 return 0;
33868 }
33869
33870- seq_printf(seq, "Device name : %s\n",
33871- chtostr(result.device_name, 64));
33872- seq_printf(seq, "Service name : %s\n",
33873- chtostr(result.service_name, 64));
33874- seq_printf(seq, "Physical name : %s\n",
33875- chtostr(result.physical_location, 64));
33876- seq_printf(seq, "Instance number : %s\n",
33877- chtostr(result.instance_number, 4));
33878+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
33879+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
33880+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
33881+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
33882
33883 return 0;
33884 }
33885diff -urNp linux-2.6.32.48/drivers/message/i2o/iop.c linux-2.6.32.48/drivers/message/i2o/iop.c
33886--- linux-2.6.32.48/drivers/message/i2o/iop.c 2011-11-08 19:02:43.000000000 -0500
33887+++ linux-2.6.32.48/drivers/message/i2o/iop.c 2011-11-15 19:59:43.000000000 -0500
33888@@ -110,10 +110,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
33889
33890 spin_lock_irqsave(&c->context_list_lock, flags);
33891
33892- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
33893- atomic_inc(&c->context_list_counter);
33894+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
33895+ atomic_inc_unchecked(&c->context_list_counter);
33896
33897- entry->context = atomic_read(&c->context_list_counter);
33898+ entry->context = atomic_read_unchecked(&c->context_list_counter);
33899
33900 list_add(&entry->list, &c->context_list);
33901
33902@@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(voi
33903
33904 #if BITS_PER_LONG == 64
33905 spin_lock_init(&c->context_list_lock);
33906- atomic_set(&c->context_list_counter, 0);
33907+ atomic_set_unchecked(&c->context_list_counter, 0);
33908 INIT_LIST_HEAD(&c->context_list);
33909 #endif
33910
33911diff -urNp linux-2.6.32.48/drivers/mfd/wm8350-i2c.c linux-2.6.32.48/drivers/mfd/wm8350-i2c.c
33912--- linux-2.6.32.48/drivers/mfd/wm8350-i2c.c 2011-11-08 19:02:43.000000000 -0500
33913+++ linux-2.6.32.48/drivers/mfd/wm8350-i2c.c 2011-11-15 19:59:43.000000000 -0500
33914@@ -43,6 +43,8 @@ static int wm8350_i2c_write_device(struc
33915 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
33916 int ret;
33917
33918+ pax_track_stack();
33919+
33920 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
33921 return -EINVAL;
33922
33923diff -urNp linux-2.6.32.48/drivers/misc/kgdbts.c linux-2.6.32.48/drivers/misc/kgdbts.c
33924--- linux-2.6.32.48/drivers/misc/kgdbts.c 2011-11-08 19:02:43.000000000 -0500
33925+++ linux-2.6.32.48/drivers/misc/kgdbts.c 2011-11-15 19:59:43.000000000 -0500
33926@@ -118,7 +118,7 @@
33927 } while (0)
33928 #define MAX_CONFIG_LEN 40
33929
33930-static struct kgdb_io kgdbts_io_ops;
33931+static const struct kgdb_io kgdbts_io_ops;
33932 static char get_buf[BUFMAX];
33933 static int get_buf_cnt;
33934 static char put_buf[BUFMAX];
33935@@ -1102,7 +1102,7 @@ static void kgdbts_post_exp_handler(void
33936 module_put(THIS_MODULE);
33937 }
33938
33939-static struct kgdb_io kgdbts_io_ops = {
33940+static const struct kgdb_io kgdbts_io_ops = {
33941 .name = "kgdbts",
33942 .read_char = kgdbts_get_char,
33943 .write_char = kgdbts_put_char,
33944diff -urNp linux-2.6.32.48/drivers/misc/sgi-gru/gruhandles.c linux-2.6.32.48/drivers/misc/sgi-gru/gruhandles.c
33945--- linux-2.6.32.48/drivers/misc/sgi-gru/gruhandles.c 2011-11-08 19:02:43.000000000 -0500
33946+++ linux-2.6.32.48/drivers/misc/sgi-gru/gruhandles.c 2011-11-15 19:59:43.000000000 -0500
33947@@ -39,8 +39,8 @@ struct mcs_op_statistic mcs_op_statistic
33948
33949 static void update_mcs_stats(enum mcs_op op, unsigned long clks)
33950 {
33951- atomic_long_inc(&mcs_op_statistics[op].count);
33952- atomic_long_add(clks, &mcs_op_statistics[op].total);
33953+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
33954+ atomic_long_add_unchecked(clks, &mcs_op_statistics[op].total);
33955 if (mcs_op_statistics[op].max < clks)
33956 mcs_op_statistics[op].max = clks;
33957 }
33958diff -urNp linux-2.6.32.48/drivers/misc/sgi-gru/gruprocfs.c linux-2.6.32.48/drivers/misc/sgi-gru/gruprocfs.c
33959--- linux-2.6.32.48/drivers/misc/sgi-gru/gruprocfs.c 2011-11-08 19:02:43.000000000 -0500
33960+++ linux-2.6.32.48/drivers/misc/sgi-gru/gruprocfs.c 2011-11-15 19:59:43.000000000 -0500
33961@@ -32,9 +32,9 @@
33962
33963 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
33964
33965-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
33966+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
33967 {
33968- unsigned long val = atomic_long_read(v);
33969+ unsigned long val = atomic_long_read_unchecked(v);
33970
33971 if (val)
33972 seq_printf(s, "%16lu %s\n", val, id);
33973@@ -136,8 +136,8 @@ static int mcs_statistics_show(struct se
33974 "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"};
33975
33976 for (op = 0; op < mcsop_last; op++) {
33977- count = atomic_long_read(&mcs_op_statistics[op].count);
33978- total = atomic_long_read(&mcs_op_statistics[op].total);
33979+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
33980+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
33981 max = mcs_op_statistics[op].max;
33982 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
33983 count ? total / count : 0, max);
33984diff -urNp linux-2.6.32.48/drivers/misc/sgi-gru/grutables.h linux-2.6.32.48/drivers/misc/sgi-gru/grutables.h
33985--- linux-2.6.32.48/drivers/misc/sgi-gru/grutables.h 2011-11-08 19:02:43.000000000 -0500
33986+++ linux-2.6.32.48/drivers/misc/sgi-gru/grutables.h 2011-11-15 19:59:43.000000000 -0500
33987@@ -167,84 +167,84 @@ extern unsigned int gru_max_gids;
33988 * GRU statistics.
33989 */
33990 struct gru_stats_s {
33991- atomic_long_t vdata_alloc;
33992- atomic_long_t vdata_free;
33993- atomic_long_t gts_alloc;
33994- atomic_long_t gts_free;
33995- atomic_long_t vdata_double_alloc;
33996- atomic_long_t gts_double_allocate;
33997- atomic_long_t assign_context;
33998- atomic_long_t assign_context_failed;
33999- atomic_long_t free_context;
34000- atomic_long_t load_user_context;
34001- atomic_long_t load_kernel_context;
34002- atomic_long_t lock_kernel_context;
34003- atomic_long_t unlock_kernel_context;
34004- atomic_long_t steal_user_context;
34005- atomic_long_t steal_kernel_context;
34006- atomic_long_t steal_context_failed;
34007- atomic_long_t nopfn;
34008- atomic_long_t break_cow;
34009- atomic_long_t asid_new;
34010- atomic_long_t asid_next;
34011- atomic_long_t asid_wrap;
34012- atomic_long_t asid_reuse;
34013- atomic_long_t intr;
34014- atomic_long_t intr_mm_lock_failed;
34015- atomic_long_t call_os;
34016- atomic_long_t call_os_offnode_reference;
34017- atomic_long_t call_os_check_for_bug;
34018- atomic_long_t call_os_wait_queue;
34019- atomic_long_t user_flush_tlb;
34020- atomic_long_t user_unload_context;
34021- atomic_long_t user_exception;
34022- atomic_long_t set_context_option;
34023- atomic_long_t migrate_check;
34024- atomic_long_t migrated_retarget;
34025- atomic_long_t migrated_unload;
34026- atomic_long_t migrated_unload_delay;
34027- atomic_long_t migrated_nopfn_retarget;
34028- atomic_long_t migrated_nopfn_unload;
34029- atomic_long_t tlb_dropin;
34030- atomic_long_t tlb_dropin_fail_no_asid;
34031- atomic_long_t tlb_dropin_fail_upm;
34032- atomic_long_t tlb_dropin_fail_invalid;
34033- atomic_long_t tlb_dropin_fail_range_active;
34034- atomic_long_t tlb_dropin_fail_idle;
34035- atomic_long_t tlb_dropin_fail_fmm;
34036- atomic_long_t tlb_dropin_fail_no_exception;
34037- atomic_long_t tlb_dropin_fail_no_exception_war;
34038- atomic_long_t tfh_stale_on_fault;
34039- atomic_long_t mmu_invalidate_range;
34040- atomic_long_t mmu_invalidate_page;
34041- atomic_long_t mmu_clear_flush_young;
34042- atomic_long_t flush_tlb;
34043- atomic_long_t flush_tlb_gru;
34044- atomic_long_t flush_tlb_gru_tgh;
34045- atomic_long_t flush_tlb_gru_zero_asid;
34046-
34047- atomic_long_t copy_gpa;
34048-
34049- atomic_long_t mesq_receive;
34050- atomic_long_t mesq_receive_none;
34051- atomic_long_t mesq_send;
34052- atomic_long_t mesq_send_failed;
34053- atomic_long_t mesq_noop;
34054- atomic_long_t mesq_send_unexpected_error;
34055- atomic_long_t mesq_send_lb_overflow;
34056- atomic_long_t mesq_send_qlimit_reached;
34057- atomic_long_t mesq_send_amo_nacked;
34058- atomic_long_t mesq_send_put_nacked;
34059- atomic_long_t mesq_qf_not_full;
34060- atomic_long_t mesq_qf_locked;
34061- atomic_long_t mesq_qf_noop_not_full;
34062- atomic_long_t mesq_qf_switch_head_failed;
34063- atomic_long_t mesq_qf_unexpected_error;
34064- atomic_long_t mesq_noop_unexpected_error;
34065- atomic_long_t mesq_noop_lb_overflow;
34066- atomic_long_t mesq_noop_qlimit_reached;
34067- atomic_long_t mesq_noop_amo_nacked;
34068- atomic_long_t mesq_noop_put_nacked;
34069+ atomic_long_unchecked_t vdata_alloc;
34070+ atomic_long_unchecked_t vdata_free;
34071+ atomic_long_unchecked_t gts_alloc;
34072+ atomic_long_unchecked_t gts_free;
34073+ atomic_long_unchecked_t vdata_double_alloc;
34074+ atomic_long_unchecked_t gts_double_allocate;
34075+ atomic_long_unchecked_t assign_context;
34076+ atomic_long_unchecked_t assign_context_failed;
34077+ atomic_long_unchecked_t free_context;
34078+ atomic_long_unchecked_t load_user_context;
34079+ atomic_long_unchecked_t load_kernel_context;
34080+ atomic_long_unchecked_t lock_kernel_context;
34081+ atomic_long_unchecked_t unlock_kernel_context;
34082+ atomic_long_unchecked_t steal_user_context;
34083+ atomic_long_unchecked_t steal_kernel_context;
34084+ atomic_long_unchecked_t steal_context_failed;
34085+ atomic_long_unchecked_t nopfn;
34086+ atomic_long_unchecked_t break_cow;
34087+ atomic_long_unchecked_t asid_new;
34088+ atomic_long_unchecked_t asid_next;
34089+ atomic_long_unchecked_t asid_wrap;
34090+ atomic_long_unchecked_t asid_reuse;
34091+ atomic_long_unchecked_t intr;
34092+ atomic_long_unchecked_t intr_mm_lock_failed;
34093+ atomic_long_unchecked_t call_os;
34094+ atomic_long_unchecked_t call_os_offnode_reference;
34095+ atomic_long_unchecked_t call_os_check_for_bug;
34096+ atomic_long_unchecked_t call_os_wait_queue;
34097+ atomic_long_unchecked_t user_flush_tlb;
34098+ atomic_long_unchecked_t user_unload_context;
34099+ atomic_long_unchecked_t user_exception;
34100+ atomic_long_unchecked_t set_context_option;
34101+ atomic_long_unchecked_t migrate_check;
34102+ atomic_long_unchecked_t migrated_retarget;
34103+ atomic_long_unchecked_t migrated_unload;
34104+ atomic_long_unchecked_t migrated_unload_delay;
34105+ atomic_long_unchecked_t migrated_nopfn_retarget;
34106+ atomic_long_unchecked_t migrated_nopfn_unload;
34107+ atomic_long_unchecked_t tlb_dropin;
34108+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
34109+ atomic_long_unchecked_t tlb_dropin_fail_upm;
34110+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
34111+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
34112+ atomic_long_unchecked_t tlb_dropin_fail_idle;
34113+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
34114+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
34115+ atomic_long_unchecked_t tlb_dropin_fail_no_exception_war;
34116+ atomic_long_unchecked_t tfh_stale_on_fault;
34117+ atomic_long_unchecked_t mmu_invalidate_range;
34118+ atomic_long_unchecked_t mmu_invalidate_page;
34119+ atomic_long_unchecked_t mmu_clear_flush_young;
34120+ atomic_long_unchecked_t flush_tlb;
34121+ atomic_long_unchecked_t flush_tlb_gru;
34122+ atomic_long_unchecked_t flush_tlb_gru_tgh;
34123+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
34124+
34125+ atomic_long_unchecked_t copy_gpa;
34126+
34127+ atomic_long_unchecked_t mesq_receive;
34128+ atomic_long_unchecked_t mesq_receive_none;
34129+ atomic_long_unchecked_t mesq_send;
34130+ atomic_long_unchecked_t mesq_send_failed;
34131+ atomic_long_unchecked_t mesq_noop;
34132+ atomic_long_unchecked_t mesq_send_unexpected_error;
34133+ atomic_long_unchecked_t mesq_send_lb_overflow;
34134+ atomic_long_unchecked_t mesq_send_qlimit_reached;
34135+ atomic_long_unchecked_t mesq_send_amo_nacked;
34136+ atomic_long_unchecked_t mesq_send_put_nacked;
34137+ atomic_long_unchecked_t mesq_qf_not_full;
34138+ atomic_long_unchecked_t mesq_qf_locked;
34139+ atomic_long_unchecked_t mesq_qf_noop_not_full;
34140+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
34141+ atomic_long_unchecked_t mesq_qf_unexpected_error;
34142+ atomic_long_unchecked_t mesq_noop_unexpected_error;
34143+ atomic_long_unchecked_t mesq_noop_lb_overflow;
34144+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
34145+ atomic_long_unchecked_t mesq_noop_amo_nacked;
34146+ atomic_long_unchecked_t mesq_noop_put_nacked;
34147
34148 };
34149
34150@@ -252,8 +252,8 @@ enum mcs_op {cchop_allocate, cchop_start
34151 cchop_deallocate, tghop_invalidate, mcsop_last};
34152
34153 struct mcs_op_statistic {
34154- atomic_long_t count;
34155- atomic_long_t total;
34156+ atomic_long_unchecked_t count;
34157+ atomic_long_unchecked_t total;
34158 unsigned long max;
34159 };
34160
34161@@ -276,7 +276,7 @@ extern struct mcs_op_statistic mcs_op_st
34162
34163 #define STAT(id) do { \
34164 if (gru_options & OPT_STATS) \
34165- atomic_long_inc(&gru_stats.id); \
34166+ atomic_long_inc_unchecked(&gru_stats.id); \
34167 } while (0)
34168
34169 #ifdef CONFIG_SGI_GRU_DEBUG
34170diff -urNp linux-2.6.32.48/drivers/misc/sgi-xp/xpc.h linux-2.6.32.48/drivers/misc/sgi-xp/xpc.h
34171--- linux-2.6.32.48/drivers/misc/sgi-xp/xpc.h 2011-11-08 19:02:43.000000000 -0500
34172+++ linux-2.6.32.48/drivers/misc/sgi-xp/xpc.h 2011-11-15 19:59:43.000000000 -0500
34173@@ -876,7 +876,7 @@ extern struct xpc_registration xpc_regis
34174 /* found in xpc_main.c */
34175 extern struct device *xpc_part;
34176 extern struct device *xpc_chan;
34177-extern struct xpc_arch_operations xpc_arch_ops;
34178+extern const struct xpc_arch_operations xpc_arch_ops;
34179 extern int xpc_disengage_timelimit;
34180 extern int xpc_disengage_timedout;
34181 extern int xpc_activate_IRQ_rcvd;
34182diff -urNp linux-2.6.32.48/drivers/misc/sgi-xp/xpc_main.c linux-2.6.32.48/drivers/misc/sgi-xp/xpc_main.c
34183--- linux-2.6.32.48/drivers/misc/sgi-xp/xpc_main.c 2011-11-08 19:02:43.000000000 -0500
34184+++ linux-2.6.32.48/drivers/misc/sgi-xp/xpc_main.c 2011-11-15 19:59:43.000000000 -0500
34185@@ -169,7 +169,7 @@ static struct notifier_block xpc_die_not
34186 .notifier_call = xpc_system_die,
34187 };
34188
34189-struct xpc_arch_operations xpc_arch_ops;
34190+const struct xpc_arch_operations xpc_arch_ops;
34191
34192 /*
34193 * Timer function to enforce the timelimit on the partition disengage.
34194diff -urNp linux-2.6.32.48/drivers/misc/sgi-xp/xpc_sn2.c linux-2.6.32.48/drivers/misc/sgi-xp/xpc_sn2.c
34195--- linux-2.6.32.48/drivers/misc/sgi-xp/xpc_sn2.c 2011-11-08 19:02:43.000000000 -0500
34196+++ linux-2.6.32.48/drivers/misc/sgi-xp/xpc_sn2.c 2011-11-15 19:59:43.000000000 -0500
34197@@ -2350,7 +2350,7 @@ xpc_received_payload_sn2(struct xpc_chan
34198 xpc_acknowledge_msgs_sn2(ch, get, msg->flags);
34199 }
34200
34201-static struct xpc_arch_operations xpc_arch_ops_sn2 = {
34202+static const struct xpc_arch_operations xpc_arch_ops_sn2 = {
34203 .setup_partitions = xpc_setup_partitions_sn2,
34204 .teardown_partitions = xpc_teardown_partitions_sn2,
34205 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2,
34206@@ -2413,7 +2413,9 @@ xpc_init_sn2(void)
34207 int ret;
34208 size_t buf_size;
34209
34210- xpc_arch_ops = xpc_arch_ops_sn2;
34211+ pax_open_kernel();
34212+ memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_sn2, sizeof(xpc_arch_ops_sn2));
34213+ pax_close_kernel();
34214
34215 if (offsetof(struct xpc_msg_sn2, payload) > XPC_MSG_HDR_MAX_SIZE) {
34216 dev_err(xpc_part, "header portion of struct xpc_msg_sn2 is "
34217diff -urNp linux-2.6.32.48/drivers/misc/sgi-xp/xpc_uv.c linux-2.6.32.48/drivers/misc/sgi-xp/xpc_uv.c
34218--- linux-2.6.32.48/drivers/misc/sgi-xp/xpc_uv.c 2011-11-08 19:02:43.000000000 -0500
34219+++ linux-2.6.32.48/drivers/misc/sgi-xp/xpc_uv.c 2011-11-15 19:59:43.000000000 -0500
34220@@ -1669,7 +1669,7 @@ xpc_received_payload_uv(struct xpc_chann
34221 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
34222 }
34223
34224-static struct xpc_arch_operations xpc_arch_ops_uv = {
34225+static const struct xpc_arch_operations xpc_arch_ops_uv = {
34226 .setup_partitions = xpc_setup_partitions_uv,
34227 .teardown_partitions = xpc_teardown_partitions_uv,
34228 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv,
34229@@ -1729,7 +1729,9 @@ static struct xpc_arch_operations xpc_ar
34230 int
34231 xpc_init_uv(void)
34232 {
34233- xpc_arch_ops = xpc_arch_ops_uv;
34234+ pax_open_kernel();
34235+ memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_uv, sizeof(xpc_arch_ops_uv));
34236+ pax_close_kernel();
34237
34238 if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
34239 dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n",
34240diff -urNp linux-2.6.32.48/drivers/misc/sgi-xp/xp.h linux-2.6.32.48/drivers/misc/sgi-xp/xp.h
34241--- linux-2.6.32.48/drivers/misc/sgi-xp/xp.h 2011-11-08 19:02:43.000000000 -0500
34242+++ linux-2.6.32.48/drivers/misc/sgi-xp/xp.h 2011-11-15 19:59:43.000000000 -0500
34243@@ -289,7 +289,7 @@ struct xpc_interface {
34244 xpc_notify_func, void *);
34245 void (*received) (short, int, void *);
34246 enum xp_retval (*partid_to_nasids) (short, void *);
34247-};
34248+} __no_const;
34249
34250 extern struct xpc_interface xpc_interface;
34251
34252diff -urNp linux-2.6.32.48/drivers/mtd/chips/cfi_cmdset_0001.c linux-2.6.32.48/drivers/mtd/chips/cfi_cmdset_0001.c
34253--- linux-2.6.32.48/drivers/mtd/chips/cfi_cmdset_0001.c 2011-11-08 19:02:43.000000000 -0500
34254+++ linux-2.6.32.48/drivers/mtd/chips/cfi_cmdset_0001.c 2011-11-15 19:59:43.000000000 -0500
34255@@ -743,6 +743,8 @@ static int chip_ready (struct map_info *
34256 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
34257 unsigned long timeo = jiffies + HZ;
34258
34259+ pax_track_stack();
34260+
34261 /* Prevent setting state FL_SYNCING for chip in suspended state. */
34262 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
34263 goto sleep;
34264@@ -1642,6 +1644,8 @@ static int __xipram do_write_buffer(stru
34265 unsigned long initial_adr;
34266 int initial_len = len;
34267
34268+ pax_track_stack();
34269+
34270 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
34271 adr += chip->start;
34272 initial_adr = adr;
34273@@ -1860,6 +1864,8 @@ static int __xipram do_erase_oneblock(st
34274 int retries = 3;
34275 int ret;
34276
34277+ pax_track_stack();
34278+
34279 adr += chip->start;
34280
34281 retry:
34282diff -urNp linux-2.6.32.48/drivers/mtd/chips/cfi_cmdset_0020.c linux-2.6.32.48/drivers/mtd/chips/cfi_cmdset_0020.c
34283--- linux-2.6.32.48/drivers/mtd/chips/cfi_cmdset_0020.c 2011-11-08 19:02:43.000000000 -0500
34284+++ linux-2.6.32.48/drivers/mtd/chips/cfi_cmdset_0020.c 2011-11-15 19:59:43.000000000 -0500
34285@@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
34286 unsigned long cmd_addr;
34287 struct cfi_private *cfi = map->fldrv_priv;
34288
34289+ pax_track_stack();
34290+
34291 adr += chip->start;
34292
34293 /* Ensure cmd read/writes are aligned. */
34294@@ -428,6 +430,8 @@ static inline int do_write_buffer(struct
34295 DECLARE_WAITQUEUE(wait, current);
34296 int wbufsize, z;
34297
34298+ pax_track_stack();
34299+
34300 /* M58LW064A requires bus alignment for buffer wriets -- saw */
34301 if (adr & (map_bankwidth(map)-1))
34302 return -EINVAL;
34303@@ -742,6 +746,8 @@ static inline int do_erase_oneblock(stru
34304 DECLARE_WAITQUEUE(wait, current);
34305 int ret = 0;
34306
34307+ pax_track_stack();
34308+
34309 adr += chip->start;
34310
34311 /* Let's determine this according to the interleave only once */
34312@@ -1047,6 +1053,8 @@ static inline int do_lock_oneblock(struc
34313 unsigned long timeo = jiffies + HZ;
34314 DECLARE_WAITQUEUE(wait, current);
34315
34316+ pax_track_stack();
34317+
34318 adr += chip->start;
34319
34320 /* Let's determine this according to the interleave only once */
34321@@ -1196,6 +1204,8 @@ static inline int do_unlock_oneblock(str
34322 unsigned long timeo = jiffies + HZ;
34323 DECLARE_WAITQUEUE(wait, current);
34324
34325+ pax_track_stack();
34326+
34327 adr += chip->start;
34328
34329 /* Let's determine this according to the interleave only once */
34330diff -urNp linux-2.6.32.48/drivers/mtd/devices/doc2000.c linux-2.6.32.48/drivers/mtd/devices/doc2000.c
34331--- linux-2.6.32.48/drivers/mtd/devices/doc2000.c 2011-11-08 19:02:43.000000000 -0500
34332+++ linux-2.6.32.48/drivers/mtd/devices/doc2000.c 2011-11-15 19:59:43.000000000 -0500
34333@@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
34334
34335 /* The ECC will not be calculated correctly if less than 512 is written */
34336 /* DBB-
34337- if (len != 0x200 && eccbuf)
34338+ if (len != 0x200)
34339 printk(KERN_WARNING
34340 "ECC needs a full sector write (adr: %lx size %lx)\n",
34341 (long) to, (long) len);
34342diff -urNp linux-2.6.32.48/drivers/mtd/devices/doc2001.c linux-2.6.32.48/drivers/mtd/devices/doc2001.c
34343--- linux-2.6.32.48/drivers/mtd/devices/doc2001.c 2011-11-08 19:02:43.000000000 -0500
34344+++ linux-2.6.32.48/drivers/mtd/devices/doc2001.c 2011-11-15 19:59:43.000000000 -0500
34345@@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
34346 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
34347
34348 /* Don't allow read past end of device */
34349- if (from >= this->totlen)
34350+ if (from >= this->totlen || !len)
34351 return -EINVAL;
34352
34353 /* Don't allow a single read to cross a 512-byte block boundary */
34354diff -urNp linux-2.6.32.48/drivers/mtd/ftl.c linux-2.6.32.48/drivers/mtd/ftl.c
34355--- linux-2.6.32.48/drivers/mtd/ftl.c 2011-11-08 19:02:43.000000000 -0500
34356+++ linux-2.6.32.48/drivers/mtd/ftl.c 2011-11-15 19:59:43.000000000 -0500
34357@@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
34358 loff_t offset;
34359 uint16_t srcunitswap = cpu_to_le16(srcunit);
34360
34361+ pax_track_stack();
34362+
34363 eun = &part->EUNInfo[srcunit];
34364 xfer = &part->XferInfo[xferunit];
34365 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
34366diff -urNp linux-2.6.32.48/drivers/mtd/inftlcore.c linux-2.6.32.48/drivers/mtd/inftlcore.c
34367--- linux-2.6.32.48/drivers/mtd/inftlcore.c 2011-11-08 19:02:43.000000000 -0500
34368+++ linux-2.6.32.48/drivers/mtd/inftlcore.c 2011-11-15 19:59:43.000000000 -0500
34369@@ -260,6 +260,8 @@ static u16 INFTL_foldchain(struct INFTLr
34370 struct inftl_oob oob;
34371 size_t retlen;
34372
34373+ pax_track_stack();
34374+
34375 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
34376 "pending=%d)\n", inftl, thisVUC, pendingblock);
34377
34378diff -urNp linux-2.6.32.48/drivers/mtd/inftlmount.c linux-2.6.32.48/drivers/mtd/inftlmount.c
34379--- linux-2.6.32.48/drivers/mtd/inftlmount.c 2011-11-08 19:02:43.000000000 -0500
34380+++ linux-2.6.32.48/drivers/mtd/inftlmount.c 2011-11-15 19:59:43.000000000 -0500
34381@@ -54,6 +54,8 @@ static int find_boot_record(struct INFTL
34382 struct INFTLPartition *ip;
34383 size_t retlen;
34384
34385+ pax_track_stack();
34386+
34387 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
34388
34389 /*
34390diff -urNp linux-2.6.32.48/drivers/mtd/lpddr/qinfo_probe.c linux-2.6.32.48/drivers/mtd/lpddr/qinfo_probe.c
34391--- linux-2.6.32.48/drivers/mtd/lpddr/qinfo_probe.c 2011-11-08 19:02:43.000000000 -0500
34392+++ linux-2.6.32.48/drivers/mtd/lpddr/qinfo_probe.c 2011-11-15 19:59:43.000000000 -0500
34393@@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
34394 {
34395 map_word pfow_val[4];
34396
34397+ pax_track_stack();
34398+
34399 /* Check identification string */
34400 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
34401 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
34402diff -urNp linux-2.6.32.48/drivers/mtd/mtdchar.c linux-2.6.32.48/drivers/mtd/mtdchar.c
34403--- linux-2.6.32.48/drivers/mtd/mtdchar.c 2011-11-08 19:02:43.000000000 -0500
34404+++ linux-2.6.32.48/drivers/mtd/mtdchar.c 2011-11-15 19:59:43.000000000 -0500
34405@@ -460,6 +460,8 @@ static int mtd_ioctl(struct inode *inode
34406 u_long size;
34407 struct mtd_info_user info;
34408
34409+ pax_track_stack();
34410+
34411 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
34412
34413 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
34414diff -urNp linux-2.6.32.48/drivers/mtd/nftlcore.c linux-2.6.32.48/drivers/mtd/nftlcore.c
34415--- linux-2.6.32.48/drivers/mtd/nftlcore.c 2011-11-08 19:02:43.000000000 -0500
34416+++ linux-2.6.32.48/drivers/mtd/nftlcore.c 2011-11-15 19:59:43.000000000 -0500
34417@@ -254,6 +254,8 @@ static u16 NFTL_foldchain (struct NFTLre
34418 int inplace = 1;
34419 size_t retlen;
34420
34421+ pax_track_stack();
34422+
34423 memset(BlockMap, 0xff, sizeof(BlockMap));
34424 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
34425
34426diff -urNp linux-2.6.32.48/drivers/mtd/nftlmount.c linux-2.6.32.48/drivers/mtd/nftlmount.c
34427--- linux-2.6.32.48/drivers/mtd/nftlmount.c 2011-11-08 19:02:43.000000000 -0500
34428+++ linux-2.6.32.48/drivers/mtd/nftlmount.c 2011-11-15 19:59:43.000000000 -0500
34429@@ -23,6 +23,7 @@
34430 #include <asm/errno.h>
34431 #include <linux/delay.h>
34432 #include <linux/slab.h>
34433+#include <linux/sched.h>
34434 #include <linux/mtd/mtd.h>
34435 #include <linux/mtd/nand.h>
34436 #include <linux/mtd/nftl.h>
34437@@ -44,6 +45,8 @@ static int find_boot_record(struct NFTLr
34438 struct mtd_info *mtd = nftl->mbd.mtd;
34439 unsigned int i;
34440
34441+ pax_track_stack();
34442+
34443 /* Assume logical EraseSize == physical erasesize for starting the scan.
34444 We'll sort it out later if we find a MediaHeader which says otherwise */
34445 /* Actually, we won't. The new DiskOnChip driver has already scanned
34446diff -urNp linux-2.6.32.48/drivers/mtd/ubi/build.c linux-2.6.32.48/drivers/mtd/ubi/build.c
34447--- linux-2.6.32.48/drivers/mtd/ubi/build.c 2011-11-08 19:02:43.000000000 -0500
34448+++ linux-2.6.32.48/drivers/mtd/ubi/build.c 2011-11-15 19:59:43.000000000 -0500
34449@@ -1255,7 +1255,7 @@ module_exit(ubi_exit);
34450 static int __init bytes_str_to_int(const char *str)
34451 {
34452 char *endp;
34453- unsigned long result;
34454+ unsigned long result, scale = 1;
34455
34456 result = simple_strtoul(str, &endp, 0);
34457 if (str == endp || result >= INT_MAX) {
34458@@ -1266,11 +1266,11 @@ static int __init bytes_str_to_int(const
34459
34460 switch (*endp) {
34461 case 'G':
34462- result *= 1024;
34463+ scale *= 1024;
34464 case 'M':
34465- result *= 1024;
34466+ scale *= 1024;
34467 case 'K':
34468- result *= 1024;
34469+ scale *= 1024;
34470 if (endp[1] == 'i' && endp[2] == 'B')
34471 endp += 2;
34472 case '\0':
34473@@ -1281,7 +1281,13 @@ static int __init bytes_str_to_int(const
34474 return -EINVAL;
34475 }
34476
34477- return result;
34478+ if ((intoverflow_t)result*scale >= INT_MAX) {
34479+ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
34480+ str);
34481+ return -EINVAL;
34482+ }
34483+
34484+ return result*scale;
34485 }
34486
34487 /**
34488diff -urNp linux-2.6.32.48/drivers/net/bnx2.c linux-2.6.32.48/drivers/net/bnx2.c
34489--- linux-2.6.32.48/drivers/net/bnx2.c 2011-11-08 19:02:43.000000000 -0500
34490+++ linux-2.6.32.48/drivers/net/bnx2.c 2011-11-15 19:59:43.000000000 -0500
34491@@ -5809,6 +5809,8 @@ bnx2_test_nvram(struct bnx2 *bp)
34492 int rc = 0;
34493 u32 magic, csum;
34494
34495+ pax_track_stack();
34496+
34497 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
34498 goto test_nvram_done;
34499
34500diff -urNp linux-2.6.32.48/drivers/net/cxgb3/l2t.h linux-2.6.32.48/drivers/net/cxgb3/l2t.h
34501--- linux-2.6.32.48/drivers/net/cxgb3/l2t.h 2011-11-08 19:02:43.000000000 -0500
34502+++ linux-2.6.32.48/drivers/net/cxgb3/l2t.h 2011-11-15 19:59:43.000000000 -0500
34503@@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)
34504 */
34505 struct l2t_skb_cb {
34506 arp_failure_handler_func arp_failure_handler;
34507-};
34508+} __no_const;
34509
34510 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
34511
34512diff -urNp linux-2.6.32.48/drivers/net/cxgb3/t3_hw.c linux-2.6.32.48/drivers/net/cxgb3/t3_hw.c
34513--- linux-2.6.32.48/drivers/net/cxgb3/t3_hw.c 2011-11-08 19:02:43.000000000 -0500
34514+++ linux-2.6.32.48/drivers/net/cxgb3/t3_hw.c 2011-11-15 19:59:43.000000000 -0500
34515@@ -699,6 +699,8 @@ static int get_vpd_params(struct adapter
34516 int i, addr, ret;
34517 struct t3_vpd vpd;
34518
34519+ pax_track_stack();
34520+
34521 /*
34522 * Card information is normally at VPD_BASE but some early cards had
34523 * it at 0.
34524diff -urNp linux-2.6.32.48/drivers/net/e1000e/82571.c linux-2.6.32.48/drivers/net/e1000e/82571.c
34525--- linux-2.6.32.48/drivers/net/e1000e/82571.c 2011-11-08 19:02:43.000000000 -0500
34526+++ linux-2.6.32.48/drivers/net/e1000e/82571.c 2011-11-15 19:59:43.000000000 -0500
34527@@ -212,7 +212,7 @@ static s32 e1000_init_mac_params_82571(s
34528 {
34529 struct e1000_hw *hw = &adapter->hw;
34530 struct e1000_mac_info *mac = &hw->mac;
34531- struct e1000_mac_operations *func = &mac->ops;
34532+ e1000_mac_operations_no_const *func = &mac->ops;
34533 u32 swsm = 0;
34534 u32 swsm2 = 0;
34535 bool force_clear_smbi = false;
34536@@ -1656,7 +1656,7 @@ static void e1000_clear_hw_cntrs_82571(s
34537 temp = er32(ICRXDMTC);
34538 }
34539
34540-static struct e1000_mac_operations e82571_mac_ops = {
34541+static const struct e1000_mac_operations e82571_mac_ops = {
34542 /* .check_mng_mode: mac type dependent */
34543 /* .check_for_link: media type dependent */
34544 .id_led_init = e1000e_id_led_init,
34545@@ -1674,7 +1674,7 @@ static struct e1000_mac_operations e8257
34546 .setup_led = e1000e_setup_led_generic,
34547 };
34548
34549-static struct e1000_phy_operations e82_phy_ops_igp = {
34550+static const struct e1000_phy_operations e82_phy_ops_igp = {
34551 .acquire_phy = e1000_get_hw_semaphore_82571,
34552 .check_reset_block = e1000e_check_reset_block_generic,
34553 .commit_phy = NULL,
34554@@ -1691,7 +1691,7 @@ static struct e1000_phy_operations e82_p
34555 .cfg_on_link_up = NULL,
34556 };
34557
34558-static struct e1000_phy_operations e82_phy_ops_m88 = {
34559+static const struct e1000_phy_operations e82_phy_ops_m88 = {
34560 .acquire_phy = e1000_get_hw_semaphore_82571,
34561 .check_reset_block = e1000e_check_reset_block_generic,
34562 .commit_phy = e1000e_phy_sw_reset,
34563@@ -1708,7 +1708,7 @@ static struct e1000_phy_operations e82_p
34564 .cfg_on_link_up = NULL,
34565 };
34566
34567-static struct e1000_phy_operations e82_phy_ops_bm = {
34568+static const struct e1000_phy_operations e82_phy_ops_bm = {
34569 .acquire_phy = e1000_get_hw_semaphore_82571,
34570 .check_reset_block = e1000e_check_reset_block_generic,
34571 .commit_phy = e1000e_phy_sw_reset,
34572@@ -1725,7 +1725,7 @@ static struct e1000_phy_operations e82_p
34573 .cfg_on_link_up = NULL,
34574 };
34575
34576-static struct e1000_nvm_operations e82571_nvm_ops = {
34577+static const struct e1000_nvm_operations e82571_nvm_ops = {
34578 .acquire_nvm = e1000_acquire_nvm_82571,
34579 .read_nvm = e1000e_read_nvm_eerd,
34580 .release_nvm = e1000_release_nvm_82571,
34581diff -urNp linux-2.6.32.48/drivers/net/e1000e/e1000.h linux-2.6.32.48/drivers/net/e1000e/e1000.h
34582--- linux-2.6.32.48/drivers/net/e1000e/e1000.h 2011-11-08 19:02:43.000000000 -0500
34583+++ linux-2.6.32.48/drivers/net/e1000e/e1000.h 2011-11-15 19:59:43.000000000 -0500
34584@@ -375,9 +375,9 @@ struct e1000_info {
34585 u32 pba;
34586 u32 max_hw_frame_size;
34587 s32 (*get_variants)(struct e1000_adapter *);
34588- struct e1000_mac_operations *mac_ops;
34589- struct e1000_phy_operations *phy_ops;
34590- struct e1000_nvm_operations *nvm_ops;
34591+ const struct e1000_mac_operations *mac_ops;
34592+ const struct e1000_phy_operations *phy_ops;
34593+ const struct e1000_nvm_operations *nvm_ops;
34594 };
34595
34596 /* hardware capability, feature, and workaround flags */
34597diff -urNp linux-2.6.32.48/drivers/net/e1000e/es2lan.c linux-2.6.32.48/drivers/net/e1000e/es2lan.c
34598--- linux-2.6.32.48/drivers/net/e1000e/es2lan.c 2011-11-08 19:02:43.000000000 -0500
34599+++ linux-2.6.32.48/drivers/net/e1000e/es2lan.c 2011-11-15 19:59:43.000000000 -0500
34600@@ -207,7 +207,7 @@ static s32 e1000_init_mac_params_80003es
34601 {
34602 struct e1000_hw *hw = &adapter->hw;
34603 struct e1000_mac_info *mac = &hw->mac;
34604- struct e1000_mac_operations *func = &mac->ops;
34605+ e1000_mac_operations_no_const *func = &mac->ops;
34606
34607 /* Set media type */
34608 switch (adapter->pdev->device) {
34609@@ -1365,7 +1365,7 @@ static void e1000_clear_hw_cntrs_80003es
34610 temp = er32(ICRXDMTC);
34611 }
34612
34613-static struct e1000_mac_operations es2_mac_ops = {
34614+static const struct e1000_mac_operations es2_mac_ops = {
34615 .id_led_init = e1000e_id_led_init,
34616 .check_mng_mode = e1000e_check_mng_mode_generic,
34617 /* check_for_link dependent on media type */
34618@@ -1383,7 +1383,7 @@ static struct e1000_mac_operations es2_m
34619 .setup_led = e1000e_setup_led_generic,
34620 };
34621
34622-static struct e1000_phy_operations es2_phy_ops = {
34623+static const struct e1000_phy_operations es2_phy_ops = {
34624 .acquire_phy = e1000_acquire_phy_80003es2lan,
34625 .check_reset_block = e1000e_check_reset_block_generic,
34626 .commit_phy = e1000e_phy_sw_reset,
34627@@ -1400,7 +1400,7 @@ static struct e1000_phy_operations es2_p
34628 .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
34629 };
34630
34631-static struct e1000_nvm_operations es2_nvm_ops = {
34632+static const struct e1000_nvm_operations es2_nvm_ops = {
34633 .acquire_nvm = e1000_acquire_nvm_80003es2lan,
34634 .read_nvm = e1000e_read_nvm_eerd,
34635 .release_nvm = e1000_release_nvm_80003es2lan,
34636diff -urNp linux-2.6.32.48/drivers/net/e1000e/hw.h linux-2.6.32.48/drivers/net/e1000e/hw.h
34637--- linux-2.6.32.48/drivers/net/e1000e/hw.h 2011-11-08 19:02:43.000000000 -0500
34638+++ linux-2.6.32.48/drivers/net/e1000e/hw.h 2011-11-15 19:59:43.000000000 -0500
34639@@ -753,6 +753,7 @@ struct e1000_mac_operations {
34640 s32 (*setup_physical_interface)(struct e1000_hw *);
34641 s32 (*setup_led)(struct e1000_hw *);
34642 };
34643+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
34644
34645 /* Function pointers for the PHY. */
34646 struct e1000_phy_operations {
34647@@ -774,6 +775,7 @@ struct e1000_phy_operations {
34648 s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16);
34649 s32 (*cfg_on_link_up)(struct e1000_hw *);
34650 };
34651+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
34652
34653 /* Function pointers for the NVM. */
34654 struct e1000_nvm_operations {
34655@@ -785,9 +787,10 @@ struct e1000_nvm_operations {
34656 s32 (*validate_nvm)(struct e1000_hw *);
34657 s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *);
34658 };
34659+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
34660
34661 struct e1000_mac_info {
34662- struct e1000_mac_operations ops;
34663+ e1000_mac_operations_no_const ops;
34664
34665 u8 addr[6];
34666 u8 perm_addr[6];
34667@@ -823,7 +826,7 @@ struct e1000_mac_info {
34668 };
34669
34670 struct e1000_phy_info {
34671- struct e1000_phy_operations ops;
34672+ e1000_phy_operations_no_const ops;
34673
34674 enum e1000_phy_type type;
34675
34676@@ -857,7 +860,7 @@ struct e1000_phy_info {
34677 };
34678
34679 struct e1000_nvm_info {
34680- struct e1000_nvm_operations ops;
34681+ e1000_nvm_operations_no_const ops;
34682
34683 enum e1000_nvm_type type;
34684 enum e1000_nvm_override override;
34685diff -urNp linux-2.6.32.48/drivers/net/e1000e/ich8lan.c linux-2.6.32.48/drivers/net/e1000e/ich8lan.c
34686--- linux-2.6.32.48/drivers/net/e1000e/ich8lan.c 2011-11-08 19:02:43.000000000 -0500
34687+++ linux-2.6.32.48/drivers/net/e1000e/ich8lan.c 2011-11-15 19:59:43.000000000 -0500
34688@@ -3463,7 +3463,7 @@ static void e1000_clear_hw_cntrs_ich8lan
34689 }
34690 }
34691
34692-static struct e1000_mac_operations ich8_mac_ops = {
34693+static const struct e1000_mac_operations ich8_mac_ops = {
34694 .id_led_init = e1000e_id_led_init,
34695 .check_mng_mode = e1000_check_mng_mode_ich8lan,
34696 .check_for_link = e1000_check_for_copper_link_ich8lan,
34697@@ -3481,7 +3481,7 @@ static struct e1000_mac_operations ich8_
34698 /* id_led_init dependent on mac type */
34699 };
34700
34701-static struct e1000_phy_operations ich8_phy_ops = {
34702+static const struct e1000_phy_operations ich8_phy_ops = {
34703 .acquire_phy = e1000_acquire_swflag_ich8lan,
34704 .check_reset_block = e1000_check_reset_block_ich8lan,
34705 .commit_phy = NULL,
34706@@ -3497,7 +3497,7 @@ static struct e1000_phy_operations ich8_
34707 .write_phy_reg = e1000e_write_phy_reg_igp,
34708 };
34709
34710-static struct e1000_nvm_operations ich8_nvm_ops = {
34711+static const struct e1000_nvm_operations ich8_nvm_ops = {
34712 .acquire_nvm = e1000_acquire_nvm_ich8lan,
34713 .read_nvm = e1000_read_nvm_ich8lan,
34714 .release_nvm = e1000_release_nvm_ich8lan,
34715diff -urNp linux-2.6.32.48/drivers/net/hamradio/6pack.c linux-2.6.32.48/drivers/net/hamradio/6pack.c
34716--- linux-2.6.32.48/drivers/net/hamradio/6pack.c 2011-11-08 19:02:43.000000000 -0500
34717+++ linux-2.6.32.48/drivers/net/hamradio/6pack.c 2011-11-15 19:59:43.000000000 -0500
34718@@ -461,6 +461,8 @@ static void sixpack_receive_buf(struct t
34719 unsigned char buf[512];
34720 int count1;
34721
34722+ pax_track_stack();
34723+
34724 if (!count)
34725 return;
34726
34727diff -urNp linux-2.6.32.48/drivers/net/ibmveth.c linux-2.6.32.48/drivers/net/ibmveth.c
34728--- linux-2.6.32.48/drivers/net/ibmveth.c 2011-11-08 19:02:43.000000000 -0500
34729+++ linux-2.6.32.48/drivers/net/ibmveth.c 2011-11-15 19:59:43.000000000 -0500
34730@@ -1577,7 +1577,7 @@ static struct attribute * veth_pool_attr
34731 NULL,
34732 };
34733
34734-static struct sysfs_ops veth_pool_ops = {
34735+static const struct sysfs_ops veth_pool_ops = {
34736 .show = veth_pool_show,
34737 .store = veth_pool_store,
34738 };
34739diff -urNp linux-2.6.32.48/drivers/net/igb/e1000_82575.c linux-2.6.32.48/drivers/net/igb/e1000_82575.c
34740--- linux-2.6.32.48/drivers/net/igb/e1000_82575.c 2011-11-08 19:02:43.000000000 -0500
34741+++ linux-2.6.32.48/drivers/net/igb/e1000_82575.c 2011-11-15 19:59:43.000000000 -0500
34742@@ -1411,7 +1411,7 @@ void igb_vmdq_set_replication_pf(struct
34743 wr32(E1000_VT_CTL, vt_ctl);
34744 }
34745
34746-static struct e1000_mac_operations e1000_mac_ops_82575 = {
34747+static const struct e1000_mac_operations e1000_mac_ops_82575 = {
34748 .reset_hw = igb_reset_hw_82575,
34749 .init_hw = igb_init_hw_82575,
34750 .check_for_link = igb_check_for_link_82575,
34751@@ -1420,13 +1420,13 @@ static struct e1000_mac_operations e1000
34752 .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
34753 };
34754
34755-static struct e1000_phy_operations e1000_phy_ops_82575 = {
34756+static const struct e1000_phy_operations e1000_phy_ops_82575 = {
34757 .acquire = igb_acquire_phy_82575,
34758 .get_cfg_done = igb_get_cfg_done_82575,
34759 .release = igb_release_phy_82575,
34760 };
34761
34762-static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
34763+static const struct e1000_nvm_operations e1000_nvm_ops_82575 = {
34764 .acquire = igb_acquire_nvm_82575,
34765 .read = igb_read_nvm_eerd,
34766 .release = igb_release_nvm_82575,
34767diff -urNp linux-2.6.32.48/drivers/net/igb/e1000_hw.h linux-2.6.32.48/drivers/net/igb/e1000_hw.h
34768--- linux-2.6.32.48/drivers/net/igb/e1000_hw.h 2011-11-08 19:02:43.000000000 -0500
34769+++ linux-2.6.32.48/drivers/net/igb/e1000_hw.h 2011-11-15 19:59:43.000000000 -0500
34770@@ -288,6 +288,7 @@ struct e1000_mac_operations {
34771 s32 (*read_mac_addr)(struct e1000_hw *);
34772 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
34773 };
34774+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
34775
34776 struct e1000_phy_operations {
34777 s32 (*acquire)(struct e1000_hw *);
34778@@ -303,6 +304,7 @@ struct e1000_phy_operations {
34779 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
34780 s32 (*write_reg)(struct e1000_hw *, u32, u16);
34781 };
34782+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
34783
34784 struct e1000_nvm_operations {
34785 s32 (*acquire)(struct e1000_hw *);
34786@@ -310,6 +312,7 @@ struct e1000_nvm_operations {
34787 void (*release)(struct e1000_hw *);
34788 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
34789 };
34790+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
34791
34792 struct e1000_info {
34793 s32 (*get_invariants)(struct e1000_hw *);
34794@@ -321,7 +324,7 @@ struct e1000_info {
34795 extern const struct e1000_info e1000_82575_info;
34796
34797 struct e1000_mac_info {
34798- struct e1000_mac_operations ops;
34799+ e1000_mac_operations_no_const ops;
34800
34801 u8 addr[6];
34802 u8 perm_addr[6];
34803@@ -365,7 +368,7 @@ struct e1000_mac_info {
34804 };
34805
34806 struct e1000_phy_info {
34807- struct e1000_phy_operations ops;
34808+ e1000_phy_operations_no_const ops;
34809
34810 enum e1000_phy_type type;
34811
34812@@ -400,7 +403,7 @@ struct e1000_phy_info {
34813 };
34814
34815 struct e1000_nvm_info {
34816- struct e1000_nvm_operations ops;
34817+ e1000_nvm_operations_no_const ops;
34818
34819 enum e1000_nvm_type type;
34820 enum e1000_nvm_override override;
34821@@ -446,6 +449,7 @@ struct e1000_mbx_operations {
34822 s32 (*check_for_ack)(struct e1000_hw *, u16);
34823 s32 (*check_for_rst)(struct e1000_hw *, u16);
34824 };
34825+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
34826
34827 struct e1000_mbx_stats {
34828 u32 msgs_tx;
34829@@ -457,7 +461,7 @@ struct e1000_mbx_stats {
34830 };
34831
34832 struct e1000_mbx_info {
34833- struct e1000_mbx_operations ops;
34834+ e1000_mbx_operations_no_const ops;
34835 struct e1000_mbx_stats stats;
34836 u32 timeout;
34837 u32 usec_delay;
34838diff -urNp linux-2.6.32.48/drivers/net/igbvf/vf.h linux-2.6.32.48/drivers/net/igbvf/vf.h
34839--- linux-2.6.32.48/drivers/net/igbvf/vf.h 2011-11-08 19:02:43.000000000 -0500
34840+++ linux-2.6.32.48/drivers/net/igbvf/vf.h 2011-11-15 19:59:43.000000000 -0500
34841@@ -187,9 +187,10 @@ struct e1000_mac_operations {
34842 s32 (*read_mac_addr)(struct e1000_hw *);
34843 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
34844 };
34845+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
34846
34847 struct e1000_mac_info {
34848- struct e1000_mac_operations ops;
34849+ e1000_mac_operations_no_const ops;
34850 u8 addr[6];
34851 u8 perm_addr[6];
34852
34853@@ -211,6 +212,7 @@ struct e1000_mbx_operations {
34854 s32 (*check_for_ack)(struct e1000_hw *);
34855 s32 (*check_for_rst)(struct e1000_hw *);
34856 };
34857+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
34858
34859 struct e1000_mbx_stats {
34860 u32 msgs_tx;
34861@@ -222,7 +224,7 @@ struct e1000_mbx_stats {
34862 };
34863
34864 struct e1000_mbx_info {
34865- struct e1000_mbx_operations ops;
34866+ e1000_mbx_operations_no_const ops;
34867 struct e1000_mbx_stats stats;
34868 u32 timeout;
34869 u32 usec_delay;
34870diff -urNp linux-2.6.32.48/drivers/net/iseries_veth.c linux-2.6.32.48/drivers/net/iseries_veth.c
34871--- linux-2.6.32.48/drivers/net/iseries_veth.c 2011-11-08 19:02:43.000000000 -0500
34872+++ linux-2.6.32.48/drivers/net/iseries_veth.c 2011-11-15 19:59:43.000000000 -0500
34873@@ -384,7 +384,7 @@ static struct attribute *veth_cnx_defaul
34874 NULL
34875 };
34876
34877-static struct sysfs_ops veth_cnx_sysfs_ops = {
34878+static const struct sysfs_ops veth_cnx_sysfs_ops = {
34879 .show = veth_cnx_attribute_show
34880 };
34881
34882@@ -441,7 +441,7 @@ static struct attribute *veth_port_defau
34883 NULL
34884 };
34885
34886-static struct sysfs_ops veth_port_sysfs_ops = {
34887+static const struct sysfs_ops veth_port_sysfs_ops = {
34888 .show = veth_port_attribute_show
34889 };
34890
34891diff -urNp linux-2.6.32.48/drivers/net/ixgb/ixgb_main.c linux-2.6.32.48/drivers/net/ixgb/ixgb_main.c
34892--- linux-2.6.32.48/drivers/net/ixgb/ixgb_main.c 2011-11-08 19:02:43.000000000 -0500
34893+++ linux-2.6.32.48/drivers/net/ixgb/ixgb_main.c 2011-11-15 19:59:43.000000000 -0500
34894@@ -1052,6 +1052,8 @@ ixgb_set_multi(struct net_device *netdev
34895 u32 rctl;
34896 int i;
34897
34898+ pax_track_stack();
34899+
34900 /* Check for Promiscuous and All Multicast modes */
34901
34902 rctl = IXGB_READ_REG(hw, RCTL);
34903diff -urNp linux-2.6.32.48/drivers/net/ixgb/ixgb_param.c linux-2.6.32.48/drivers/net/ixgb/ixgb_param.c
34904--- linux-2.6.32.48/drivers/net/ixgb/ixgb_param.c 2011-11-08 19:02:43.000000000 -0500
34905+++ linux-2.6.32.48/drivers/net/ixgb/ixgb_param.c 2011-11-15 19:59:43.000000000 -0500
34906@@ -260,6 +260,9 @@ void __devinit
34907 ixgb_check_options(struct ixgb_adapter *adapter)
34908 {
34909 int bd = adapter->bd_number;
34910+
34911+ pax_track_stack();
34912+
34913 if (bd >= IXGB_MAX_NIC) {
34914 printk(KERN_NOTICE
34915 "Warning: no configuration for board #%i\n", bd);
34916diff -urNp linux-2.6.32.48/drivers/net/ixgbe/ixgbe_type.h linux-2.6.32.48/drivers/net/ixgbe/ixgbe_type.h
34917--- linux-2.6.32.48/drivers/net/ixgbe/ixgbe_type.h 2011-11-08 19:02:43.000000000 -0500
34918+++ linux-2.6.32.48/drivers/net/ixgbe/ixgbe_type.h 2011-11-15 19:59:43.000000000 -0500
34919@@ -2327,6 +2327,7 @@ struct ixgbe_eeprom_operations {
34920 s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
34921 s32 (*update_checksum)(struct ixgbe_hw *);
34922 };
34923+typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
34924
34925 struct ixgbe_mac_operations {
34926 s32 (*init_hw)(struct ixgbe_hw *);
34927@@ -2376,6 +2377,7 @@ struct ixgbe_mac_operations {
34928 /* Flow Control */
34929 s32 (*fc_enable)(struct ixgbe_hw *, s32);
34930 };
34931+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
34932
34933 struct ixgbe_phy_operations {
34934 s32 (*identify)(struct ixgbe_hw *);
34935@@ -2394,9 +2396,10 @@ struct ixgbe_phy_operations {
34936 s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
34937 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
34938 };
34939+typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
34940
34941 struct ixgbe_eeprom_info {
34942- struct ixgbe_eeprom_operations ops;
34943+ ixgbe_eeprom_operations_no_const ops;
34944 enum ixgbe_eeprom_type type;
34945 u32 semaphore_delay;
34946 u16 word_size;
34947@@ -2404,7 +2407,7 @@ struct ixgbe_eeprom_info {
34948 };
34949
34950 struct ixgbe_mac_info {
34951- struct ixgbe_mac_operations ops;
34952+ ixgbe_mac_operations_no_const ops;
34953 enum ixgbe_mac_type type;
34954 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
34955 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
34956@@ -2423,7 +2426,7 @@ struct ixgbe_mac_info {
34957 };
34958
34959 struct ixgbe_phy_info {
34960- struct ixgbe_phy_operations ops;
34961+ ixgbe_phy_operations_no_const ops;
34962 struct mdio_if_info mdio;
34963 enum ixgbe_phy_type type;
34964 u32 id;
34965diff -urNp linux-2.6.32.48/drivers/net/mlx4/main.c linux-2.6.32.48/drivers/net/mlx4/main.c
34966--- linux-2.6.32.48/drivers/net/mlx4/main.c 2011-11-08 19:02:43.000000000 -0500
34967+++ linux-2.6.32.48/drivers/net/mlx4/main.c 2011-11-15 19:59:43.000000000 -0500
34968@@ -38,6 +38,7 @@
34969 #include <linux/errno.h>
34970 #include <linux/pci.h>
34971 #include <linux/dma-mapping.h>
34972+#include <linux/sched.h>
34973
34974 #include <linux/mlx4/device.h>
34975 #include <linux/mlx4/doorbell.h>
34976@@ -730,6 +731,8 @@ static int mlx4_init_hca(struct mlx4_dev
34977 u64 icm_size;
34978 int err;
34979
34980+ pax_track_stack();
34981+
34982 err = mlx4_QUERY_FW(dev);
34983 if (err) {
34984 if (err == -EACCES)
34985diff -urNp linux-2.6.32.48/drivers/net/niu.c linux-2.6.32.48/drivers/net/niu.c
34986--- linux-2.6.32.48/drivers/net/niu.c 2011-11-08 19:02:43.000000000 -0500
34987+++ linux-2.6.32.48/drivers/net/niu.c 2011-11-15 19:59:43.000000000 -0500
34988@@ -9128,6 +9128,8 @@ static void __devinit niu_try_msix(struc
34989 int i, num_irqs, err;
34990 u8 first_ldg;
34991
34992+ pax_track_stack();
34993+
34994 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
34995 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
34996 ldg_num_map[i] = first_ldg + i;
34997diff -urNp linux-2.6.32.48/drivers/net/pcnet32.c linux-2.6.32.48/drivers/net/pcnet32.c
34998--- linux-2.6.32.48/drivers/net/pcnet32.c 2011-11-08 19:02:43.000000000 -0500
34999+++ linux-2.6.32.48/drivers/net/pcnet32.c 2011-11-15 19:59:43.000000000 -0500
35000@@ -79,7 +79,7 @@ static int cards_found;
35001 /*
35002 * VLB I/O addresses
35003 */
35004-static unsigned int pcnet32_portlist[] __initdata =
35005+static unsigned int pcnet32_portlist[] __devinitdata =
35006 { 0x300, 0x320, 0x340, 0x360, 0 };
35007
35008 static int pcnet32_debug = 0;
35009@@ -267,7 +267,7 @@ struct pcnet32_private {
35010 struct sk_buff **rx_skbuff;
35011 dma_addr_t *tx_dma_addr;
35012 dma_addr_t *rx_dma_addr;
35013- struct pcnet32_access a;
35014+ struct pcnet32_access *a;
35015 spinlock_t lock; /* Guard lock */
35016 unsigned int cur_rx, cur_tx; /* The next free ring entry */
35017 unsigned int rx_ring_size; /* current rx ring size */
35018@@ -457,9 +457,9 @@ static void pcnet32_netif_start(struct n
35019 u16 val;
35020
35021 netif_wake_queue(dev);
35022- val = lp->a.read_csr(ioaddr, CSR3);
35023+ val = lp->a->read_csr(ioaddr, CSR3);
35024 val &= 0x00ff;
35025- lp->a.write_csr(ioaddr, CSR3, val);
35026+ lp->a->write_csr(ioaddr, CSR3, val);
35027 napi_enable(&lp->napi);
35028 }
35029
35030@@ -744,7 +744,7 @@ static u32 pcnet32_get_link(struct net_d
35031 r = mii_link_ok(&lp->mii_if);
35032 } else if (lp->chip_version >= PCNET32_79C970A) {
35033 ulong ioaddr = dev->base_addr; /* card base I/O address */
35034- r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
35035+ r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
35036 } else { /* can not detect link on really old chips */
35037 r = 1;
35038 }
35039@@ -806,7 +806,7 @@ static int pcnet32_set_ringparam(struct
35040 pcnet32_netif_stop(dev);
35041
35042 spin_lock_irqsave(&lp->lock, flags);
35043- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
35044+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
35045
35046 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
35047
35048@@ -886,7 +886,7 @@ static void pcnet32_ethtool_test(struct
35049 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
35050 {
35051 struct pcnet32_private *lp = netdev_priv(dev);
35052- struct pcnet32_access *a = &lp->a; /* access to registers */
35053+ struct pcnet32_access *a = lp->a; /* access to registers */
35054 ulong ioaddr = dev->base_addr; /* card base I/O address */
35055 struct sk_buff *skb; /* sk buff */
35056 int x, i; /* counters */
35057@@ -906,21 +906,21 @@ static int pcnet32_loopback_test(struct
35058 pcnet32_netif_stop(dev);
35059
35060 spin_lock_irqsave(&lp->lock, flags);
35061- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
35062+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
35063
35064 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
35065
35066 /* Reset the PCNET32 */
35067- lp->a.reset(ioaddr);
35068- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
35069+ lp->a->reset(ioaddr);
35070+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
35071
35072 /* switch pcnet32 to 32bit mode */
35073- lp->a.write_bcr(ioaddr, 20, 2);
35074+ lp->a->write_bcr(ioaddr, 20, 2);
35075
35076 /* purge & init rings but don't actually restart */
35077 pcnet32_restart(dev, 0x0000);
35078
35079- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
35080+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
35081
35082 /* Initialize Transmit buffers. */
35083 size = data_len + 15;
35084@@ -966,10 +966,10 @@ static int pcnet32_loopback_test(struct
35085
35086 /* set int loopback in CSR15 */
35087 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
35088- lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
35089+ lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
35090
35091 teststatus = cpu_to_le16(0x8000);
35092- lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
35093+ lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
35094
35095 /* Check status of descriptors */
35096 for (x = 0; x < numbuffs; x++) {
35097@@ -990,7 +990,7 @@ static int pcnet32_loopback_test(struct
35098 }
35099 }
35100
35101- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
35102+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
35103 wmb();
35104 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
35105 printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name);
35106@@ -1039,7 +1039,7 @@ static int pcnet32_loopback_test(struct
35107 pcnet32_restart(dev, CSR0_NORMAL);
35108 } else {
35109 pcnet32_purge_rx_ring(dev);
35110- lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
35111+ lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
35112 }
35113 spin_unlock_irqrestore(&lp->lock, flags);
35114
35115@@ -1049,7 +1049,7 @@ static int pcnet32_loopback_test(struct
35116 static void pcnet32_led_blink_callback(struct net_device *dev)
35117 {
35118 struct pcnet32_private *lp = netdev_priv(dev);
35119- struct pcnet32_access *a = &lp->a;
35120+ struct pcnet32_access *a = lp->a;
35121 ulong ioaddr = dev->base_addr;
35122 unsigned long flags;
35123 int i;
35124@@ -1066,7 +1066,7 @@ static void pcnet32_led_blink_callback(s
35125 static int pcnet32_phys_id(struct net_device *dev, u32 data)
35126 {
35127 struct pcnet32_private *lp = netdev_priv(dev);
35128- struct pcnet32_access *a = &lp->a;
35129+ struct pcnet32_access *a = lp->a;
35130 ulong ioaddr = dev->base_addr;
35131 unsigned long flags;
35132 int i, regs[4];
35133@@ -1112,7 +1112,7 @@ static int pcnet32_suspend(struct net_de
35134 {
35135 int csr5;
35136 struct pcnet32_private *lp = netdev_priv(dev);
35137- struct pcnet32_access *a = &lp->a;
35138+ struct pcnet32_access *a = lp->a;
35139 ulong ioaddr = dev->base_addr;
35140 int ticks;
35141
35142@@ -1388,8 +1388,8 @@ static int pcnet32_poll(struct napi_stru
35143 spin_lock_irqsave(&lp->lock, flags);
35144 if (pcnet32_tx(dev)) {
35145 /* reset the chip to clear the error condition, then restart */
35146- lp->a.reset(ioaddr);
35147- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
35148+ lp->a->reset(ioaddr);
35149+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
35150 pcnet32_restart(dev, CSR0_START);
35151 netif_wake_queue(dev);
35152 }
35153@@ -1401,12 +1401,12 @@ static int pcnet32_poll(struct napi_stru
35154 __napi_complete(napi);
35155
35156 /* clear interrupt masks */
35157- val = lp->a.read_csr(ioaddr, CSR3);
35158+ val = lp->a->read_csr(ioaddr, CSR3);
35159 val &= 0x00ff;
35160- lp->a.write_csr(ioaddr, CSR3, val);
35161+ lp->a->write_csr(ioaddr, CSR3, val);
35162
35163 /* Set interrupt enable. */
35164- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
35165+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
35166
35167 spin_unlock_irqrestore(&lp->lock, flags);
35168 }
35169@@ -1429,7 +1429,7 @@ static void pcnet32_get_regs(struct net_
35170 int i, csr0;
35171 u16 *buff = ptr;
35172 struct pcnet32_private *lp = netdev_priv(dev);
35173- struct pcnet32_access *a = &lp->a;
35174+ struct pcnet32_access *a = lp->a;
35175 ulong ioaddr = dev->base_addr;
35176 unsigned long flags;
35177
35178@@ -1466,9 +1466,9 @@ static void pcnet32_get_regs(struct net_
35179 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
35180 if (lp->phymask & (1 << j)) {
35181 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
35182- lp->a.write_bcr(ioaddr, 33,
35183+ lp->a->write_bcr(ioaddr, 33,
35184 (j << 5) | i);
35185- *buff++ = lp->a.read_bcr(ioaddr, 34);
35186+ *buff++ = lp->a->read_bcr(ioaddr, 34);
35187 }
35188 }
35189 }
35190@@ -1858,7 +1858,7 @@ pcnet32_probe1(unsigned long ioaddr, int
35191 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
35192 lp->options |= PCNET32_PORT_FD;
35193
35194- lp->a = *a;
35195+ lp->a = a;
35196
35197 /* prior to register_netdev, dev->name is not yet correct */
35198 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
35199@@ -1917,7 +1917,7 @@ pcnet32_probe1(unsigned long ioaddr, int
35200 if (lp->mii) {
35201 /* lp->phycount and lp->phymask are set to 0 by memset above */
35202
35203- lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
35204+ lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
35205 /* scan for PHYs */
35206 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
35207 unsigned short id1, id2;
35208@@ -1938,7 +1938,7 @@ pcnet32_probe1(unsigned long ioaddr, int
35209 "Found PHY %04x:%04x at address %d.\n",
35210 id1, id2, i);
35211 }
35212- lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
35213+ lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
35214 if (lp->phycount > 1) {
35215 lp->options |= PCNET32_PORT_MII;
35216 }
35217@@ -2109,10 +2109,10 @@ static int pcnet32_open(struct net_devic
35218 }
35219
35220 /* Reset the PCNET32 */
35221- lp->a.reset(ioaddr);
35222+ lp->a->reset(ioaddr);
35223
35224 /* switch pcnet32 to 32bit mode */
35225- lp->a.write_bcr(ioaddr, 20, 2);
35226+ lp->a->write_bcr(ioaddr, 20, 2);
35227
35228 if (netif_msg_ifup(lp))
35229 printk(KERN_DEBUG
35230@@ -2122,14 +2122,14 @@ static int pcnet32_open(struct net_devic
35231 (u32) (lp->init_dma_addr));
35232
35233 /* set/reset autoselect bit */
35234- val = lp->a.read_bcr(ioaddr, 2) & ~2;
35235+ val = lp->a->read_bcr(ioaddr, 2) & ~2;
35236 if (lp->options & PCNET32_PORT_ASEL)
35237 val |= 2;
35238- lp->a.write_bcr(ioaddr, 2, val);
35239+ lp->a->write_bcr(ioaddr, 2, val);
35240
35241 /* handle full duplex setting */
35242 if (lp->mii_if.full_duplex) {
35243- val = lp->a.read_bcr(ioaddr, 9) & ~3;
35244+ val = lp->a->read_bcr(ioaddr, 9) & ~3;
35245 if (lp->options & PCNET32_PORT_FD) {
35246 val |= 1;
35247 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
35248@@ -2139,14 +2139,14 @@ static int pcnet32_open(struct net_devic
35249 if (lp->chip_version == 0x2627)
35250 val |= 3;
35251 }
35252- lp->a.write_bcr(ioaddr, 9, val);
35253+ lp->a->write_bcr(ioaddr, 9, val);
35254 }
35255
35256 /* set/reset GPSI bit in test register */
35257- val = lp->a.read_csr(ioaddr, 124) & ~0x10;
35258+ val = lp->a->read_csr(ioaddr, 124) & ~0x10;
35259 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
35260 val |= 0x10;
35261- lp->a.write_csr(ioaddr, 124, val);
35262+ lp->a->write_csr(ioaddr, 124, val);
35263
35264 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
35265 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
35266@@ -2167,24 +2167,24 @@ static int pcnet32_open(struct net_devic
35267 * duplex, and/or enable auto negotiation, and clear DANAS
35268 */
35269 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
35270- lp->a.write_bcr(ioaddr, 32,
35271- lp->a.read_bcr(ioaddr, 32) | 0x0080);
35272+ lp->a->write_bcr(ioaddr, 32,
35273+ lp->a->read_bcr(ioaddr, 32) | 0x0080);
35274 /* disable Auto Negotiation, set 10Mpbs, HD */
35275- val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
35276+ val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
35277 if (lp->options & PCNET32_PORT_FD)
35278 val |= 0x10;
35279 if (lp->options & PCNET32_PORT_100)
35280 val |= 0x08;
35281- lp->a.write_bcr(ioaddr, 32, val);
35282+ lp->a->write_bcr(ioaddr, 32, val);
35283 } else {
35284 if (lp->options & PCNET32_PORT_ASEL) {
35285- lp->a.write_bcr(ioaddr, 32,
35286- lp->a.read_bcr(ioaddr,
35287+ lp->a->write_bcr(ioaddr, 32,
35288+ lp->a->read_bcr(ioaddr,
35289 32) | 0x0080);
35290 /* enable auto negotiate, setup, disable fd */
35291- val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
35292+ val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
35293 val |= 0x20;
35294- lp->a.write_bcr(ioaddr, 32, val);
35295+ lp->a->write_bcr(ioaddr, 32, val);
35296 }
35297 }
35298 } else {
35299@@ -2197,10 +2197,10 @@ static int pcnet32_open(struct net_devic
35300 * There is really no good other way to handle multiple PHYs
35301 * other than turning off all automatics
35302 */
35303- val = lp->a.read_bcr(ioaddr, 2);
35304- lp->a.write_bcr(ioaddr, 2, val & ~2);
35305- val = lp->a.read_bcr(ioaddr, 32);
35306- lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
35307+ val = lp->a->read_bcr(ioaddr, 2);
35308+ lp->a->write_bcr(ioaddr, 2, val & ~2);
35309+ val = lp->a->read_bcr(ioaddr, 32);
35310+ lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
35311
35312 if (!(lp->options & PCNET32_PORT_ASEL)) {
35313 /* setup ecmd */
35314@@ -2210,7 +2210,7 @@ static int pcnet32_open(struct net_devic
35315 ecmd.speed =
35316 lp->
35317 options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10;
35318- bcr9 = lp->a.read_bcr(ioaddr, 9);
35319+ bcr9 = lp->a->read_bcr(ioaddr, 9);
35320
35321 if (lp->options & PCNET32_PORT_FD) {
35322 ecmd.duplex = DUPLEX_FULL;
35323@@ -2219,7 +2219,7 @@ static int pcnet32_open(struct net_devic
35324 ecmd.duplex = DUPLEX_HALF;
35325 bcr9 |= ~(1 << 0);
35326 }
35327- lp->a.write_bcr(ioaddr, 9, bcr9);
35328+ lp->a->write_bcr(ioaddr, 9, bcr9);
35329 }
35330
35331 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
35332@@ -2252,9 +2252,9 @@ static int pcnet32_open(struct net_devic
35333
35334 #ifdef DO_DXSUFLO
35335 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
35336- val = lp->a.read_csr(ioaddr, CSR3);
35337+ val = lp->a->read_csr(ioaddr, CSR3);
35338 val |= 0x40;
35339- lp->a.write_csr(ioaddr, CSR3, val);
35340+ lp->a->write_csr(ioaddr, CSR3, val);
35341 }
35342 #endif
35343
35344@@ -2270,11 +2270,11 @@ static int pcnet32_open(struct net_devic
35345 napi_enable(&lp->napi);
35346
35347 /* Re-initialize the PCNET32, and start it when done. */
35348- lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
35349- lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
35350+ lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
35351+ lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
35352
35353- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
35354- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
35355+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
35356+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
35357
35358 netif_start_queue(dev);
35359
35360@@ -2286,20 +2286,20 @@ static int pcnet32_open(struct net_devic
35361
35362 i = 0;
35363 while (i++ < 100)
35364- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
35365+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
35366 break;
35367 /*
35368 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
35369 * reports that doing so triggers a bug in the '974.
35370 */
35371- lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
35372+ lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
35373
35374 if (netif_msg_ifup(lp))
35375 printk(KERN_DEBUG
35376 "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n",
35377 dev->name, i,
35378 (u32) (lp->init_dma_addr),
35379- lp->a.read_csr(ioaddr, CSR0));
35380+ lp->a->read_csr(ioaddr, CSR0));
35381
35382 spin_unlock_irqrestore(&lp->lock, flags);
35383
35384@@ -2313,7 +2313,7 @@ static int pcnet32_open(struct net_devic
35385 * Switch back to 16bit mode to avoid problems with dumb
35386 * DOS packet driver after a warm reboot
35387 */
35388- lp->a.write_bcr(ioaddr, 20, 4);
35389+ lp->a->write_bcr(ioaddr, 20, 4);
35390
35391 err_free_irq:
35392 spin_unlock_irqrestore(&lp->lock, flags);
35393@@ -2420,7 +2420,7 @@ static void pcnet32_restart(struct net_d
35394
35395 /* wait for stop */
35396 for (i = 0; i < 100; i++)
35397- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
35398+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
35399 break;
35400
35401 if (i >= 100 && netif_msg_drv(lp))
35402@@ -2433,13 +2433,13 @@ static void pcnet32_restart(struct net_d
35403 return;
35404
35405 /* ReInit Ring */
35406- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
35407+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
35408 i = 0;
35409 while (i++ < 1000)
35410- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
35411+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
35412 break;
35413
35414- lp->a.write_csr(ioaddr, CSR0, csr0_bits);
35415+ lp->a->write_csr(ioaddr, CSR0, csr0_bits);
35416 }
35417
35418 static void pcnet32_tx_timeout(struct net_device *dev)
35419@@ -2452,8 +2452,8 @@ static void pcnet32_tx_timeout(struct ne
35420 if (pcnet32_debug & NETIF_MSG_DRV)
35421 printk(KERN_ERR
35422 "%s: transmit timed out, status %4.4x, resetting.\n",
35423- dev->name, lp->a.read_csr(ioaddr, CSR0));
35424- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
35425+ dev->name, lp->a->read_csr(ioaddr, CSR0));
35426+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
35427 dev->stats.tx_errors++;
35428 if (netif_msg_tx_err(lp)) {
35429 int i;
35430@@ -2497,7 +2497,7 @@ static netdev_tx_t pcnet32_start_xmit(st
35431 if (netif_msg_tx_queued(lp)) {
35432 printk(KERN_DEBUG
35433 "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n",
35434- dev->name, lp->a.read_csr(ioaddr, CSR0));
35435+ dev->name, lp->a->read_csr(ioaddr, CSR0));
35436 }
35437
35438 /* Default status -- will not enable Successful-TxDone
35439@@ -2528,7 +2528,7 @@ static netdev_tx_t pcnet32_start_xmit(st
35440 dev->stats.tx_bytes += skb->len;
35441
35442 /* Trigger an immediate send poll. */
35443- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
35444+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
35445
35446 dev->trans_start = jiffies;
35447
35448@@ -2555,18 +2555,18 @@ pcnet32_interrupt(int irq, void *dev_id)
35449
35450 spin_lock(&lp->lock);
35451
35452- csr0 = lp->a.read_csr(ioaddr, CSR0);
35453+ csr0 = lp->a->read_csr(ioaddr, CSR0);
35454 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
35455 if (csr0 == 0xffff) {
35456 break; /* PCMCIA remove happened */
35457 }
35458 /* Acknowledge all of the current interrupt sources ASAP. */
35459- lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
35460+ lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
35461
35462 if (netif_msg_intr(lp))
35463 printk(KERN_DEBUG
35464 "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
35465- dev->name, csr0, lp->a.read_csr(ioaddr, CSR0));
35466+ dev->name, csr0, lp->a->read_csr(ioaddr, CSR0));
35467
35468 /* Log misc errors. */
35469 if (csr0 & 0x4000)
35470@@ -2595,19 +2595,19 @@ pcnet32_interrupt(int irq, void *dev_id)
35471 if (napi_schedule_prep(&lp->napi)) {
35472 u16 val;
35473 /* set interrupt masks */
35474- val = lp->a.read_csr(ioaddr, CSR3);
35475+ val = lp->a->read_csr(ioaddr, CSR3);
35476 val |= 0x5f00;
35477- lp->a.write_csr(ioaddr, CSR3, val);
35478+ lp->a->write_csr(ioaddr, CSR3, val);
35479
35480 __napi_schedule(&lp->napi);
35481 break;
35482 }
35483- csr0 = lp->a.read_csr(ioaddr, CSR0);
35484+ csr0 = lp->a->read_csr(ioaddr, CSR0);
35485 }
35486
35487 if (netif_msg_intr(lp))
35488 printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n",
35489- dev->name, lp->a.read_csr(ioaddr, CSR0));
35490+ dev->name, lp->a->read_csr(ioaddr, CSR0));
35491
35492 spin_unlock(&lp->lock);
35493
35494@@ -2627,21 +2627,21 @@ static int pcnet32_close(struct net_devi
35495
35496 spin_lock_irqsave(&lp->lock, flags);
35497
35498- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
35499+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
35500
35501 if (netif_msg_ifdown(lp))
35502 printk(KERN_DEBUG
35503 "%s: Shutting down ethercard, status was %2.2x.\n",
35504- dev->name, lp->a.read_csr(ioaddr, CSR0));
35505+ dev->name, lp->a->read_csr(ioaddr, CSR0));
35506
35507 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
35508- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
35509+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
35510
35511 /*
35512 * Switch back to 16bit mode to avoid problems with dumb
35513 * DOS packet driver after a warm reboot
35514 */
35515- lp->a.write_bcr(ioaddr, 20, 4);
35516+ lp->a->write_bcr(ioaddr, 20, 4);
35517
35518 spin_unlock_irqrestore(&lp->lock, flags);
35519
35520@@ -2664,7 +2664,7 @@ static struct net_device_stats *pcnet32_
35521 unsigned long flags;
35522
35523 spin_lock_irqsave(&lp->lock, flags);
35524- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
35525+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
35526 spin_unlock_irqrestore(&lp->lock, flags);
35527
35528 return &dev->stats;
35529@@ -2686,10 +2686,10 @@ static void pcnet32_load_multicast(struc
35530 if (dev->flags & IFF_ALLMULTI) {
35531 ib->filter[0] = cpu_to_le32(~0U);
35532 ib->filter[1] = cpu_to_le32(~0U);
35533- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
35534- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
35535- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
35536- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
35537+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
35538+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
35539+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
35540+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
35541 return;
35542 }
35543 /* clear the multicast filter */
35544@@ -2710,7 +2710,7 @@ static void pcnet32_load_multicast(struc
35545 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
35546 }
35547 for (i = 0; i < 4; i++)
35548- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
35549+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
35550 le16_to_cpu(mcast_table[i]));
35551 return;
35552 }
35553@@ -2726,7 +2726,7 @@ static void pcnet32_set_multicast_list(s
35554
35555 spin_lock_irqsave(&lp->lock, flags);
35556 suspended = pcnet32_suspend(dev, &flags, 0);
35557- csr15 = lp->a.read_csr(ioaddr, CSR15);
35558+ csr15 = lp->a->read_csr(ioaddr, CSR15);
35559 if (dev->flags & IFF_PROMISC) {
35560 /* Log any net taps. */
35561 if (netif_msg_hw(lp))
35562@@ -2735,21 +2735,21 @@ static void pcnet32_set_multicast_list(s
35563 lp->init_block->mode =
35564 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
35565 7);
35566- lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
35567+ lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
35568 } else {
35569 lp->init_block->mode =
35570 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
35571- lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
35572+ lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
35573 pcnet32_load_multicast(dev);
35574 }
35575
35576 if (suspended) {
35577 int csr5;
35578 /* clear SUSPEND (SPND) - CSR5 bit 0 */
35579- csr5 = lp->a.read_csr(ioaddr, CSR5);
35580- lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
35581+ csr5 = lp->a->read_csr(ioaddr, CSR5);
35582+ lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
35583 } else {
35584- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
35585+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
35586 pcnet32_restart(dev, CSR0_NORMAL);
35587 netif_wake_queue(dev);
35588 }
35589@@ -2767,8 +2767,8 @@ static int mdio_read(struct net_device *
35590 if (!lp->mii)
35591 return 0;
35592
35593- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
35594- val_out = lp->a.read_bcr(ioaddr, 34);
35595+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
35596+ val_out = lp->a->read_bcr(ioaddr, 34);
35597
35598 return val_out;
35599 }
35600@@ -2782,8 +2782,8 @@ static void mdio_write(struct net_device
35601 if (!lp->mii)
35602 return;
35603
35604- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
35605- lp->a.write_bcr(ioaddr, 34, val);
35606+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
35607+ lp->a->write_bcr(ioaddr, 34, val);
35608 }
35609
35610 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
35611@@ -2862,7 +2862,7 @@ static void pcnet32_check_media(struct n
35612 curr_link = mii_link_ok(&lp->mii_if);
35613 } else {
35614 ulong ioaddr = dev->base_addr; /* card base I/O address */
35615- curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
35616+ curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
35617 }
35618 if (!curr_link) {
35619 if (prev_link || verbose) {
35620@@ -2887,13 +2887,13 @@ static void pcnet32_check_media(struct n
35621 (ecmd.duplex ==
35622 DUPLEX_FULL) ? "full" : "half");
35623 }
35624- bcr9 = lp->a.read_bcr(dev->base_addr, 9);
35625+ bcr9 = lp->a->read_bcr(dev->base_addr, 9);
35626 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
35627 if (lp->mii_if.full_duplex)
35628 bcr9 |= (1 << 0);
35629 else
35630 bcr9 &= ~(1 << 0);
35631- lp->a.write_bcr(dev->base_addr, 9, bcr9);
35632+ lp->a->write_bcr(dev->base_addr, 9, bcr9);
35633 }
35634 } else {
35635 if (netif_msg_link(lp))
35636diff -urNp linux-2.6.32.48/drivers/net/tg3.h linux-2.6.32.48/drivers/net/tg3.h
35637--- linux-2.6.32.48/drivers/net/tg3.h 2011-11-08 19:02:43.000000000 -0500
35638+++ linux-2.6.32.48/drivers/net/tg3.h 2011-11-15 19:59:43.000000000 -0500
35639@@ -95,6 +95,7 @@
35640 #define CHIPREV_ID_5750_A0 0x4000
35641 #define CHIPREV_ID_5750_A1 0x4001
35642 #define CHIPREV_ID_5750_A3 0x4003
35643+#define CHIPREV_ID_5750_C1 0x4201
35644 #define CHIPREV_ID_5750_C2 0x4202
35645 #define CHIPREV_ID_5752_A0_HW 0x5000
35646 #define CHIPREV_ID_5752_A0 0x6000
35647diff -urNp linux-2.6.32.48/drivers/net/tokenring/abyss.c linux-2.6.32.48/drivers/net/tokenring/abyss.c
35648--- linux-2.6.32.48/drivers/net/tokenring/abyss.c 2011-11-08 19:02:43.000000000 -0500
35649+++ linux-2.6.32.48/drivers/net/tokenring/abyss.c 2011-11-15 19:59:43.000000000 -0500
35650@@ -451,10 +451,12 @@ static struct pci_driver abyss_driver =
35651
35652 static int __init abyss_init (void)
35653 {
35654- abyss_netdev_ops = tms380tr_netdev_ops;
35655+ pax_open_kernel();
35656+ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35657
35658- abyss_netdev_ops.ndo_open = abyss_open;
35659- abyss_netdev_ops.ndo_stop = abyss_close;
35660+ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
35661+ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
35662+ pax_close_kernel();
35663
35664 return pci_register_driver(&abyss_driver);
35665 }
35666diff -urNp linux-2.6.32.48/drivers/net/tokenring/madgemc.c linux-2.6.32.48/drivers/net/tokenring/madgemc.c
35667--- linux-2.6.32.48/drivers/net/tokenring/madgemc.c 2011-11-08 19:02:43.000000000 -0500
35668+++ linux-2.6.32.48/drivers/net/tokenring/madgemc.c 2011-11-15 19:59:43.000000000 -0500
35669@@ -755,9 +755,11 @@ static struct mca_driver madgemc_driver
35670
35671 static int __init madgemc_init (void)
35672 {
35673- madgemc_netdev_ops = tms380tr_netdev_ops;
35674- madgemc_netdev_ops.ndo_open = madgemc_open;
35675- madgemc_netdev_ops.ndo_stop = madgemc_close;
35676+ pax_open_kernel();
35677+ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35678+ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
35679+ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
35680+ pax_close_kernel();
35681
35682 return mca_register_driver (&madgemc_driver);
35683 }
35684diff -urNp linux-2.6.32.48/drivers/net/tokenring/proteon.c linux-2.6.32.48/drivers/net/tokenring/proteon.c
35685--- linux-2.6.32.48/drivers/net/tokenring/proteon.c 2011-11-08 19:02:43.000000000 -0500
35686+++ linux-2.6.32.48/drivers/net/tokenring/proteon.c 2011-11-15 19:59:43.000000000 -0500
35687@@ -353,9 +353,11 @@ static int __init proteon_init(void)
35688 struct platform_device *pdev;
35689 int i, num = 0, err = 0;
35690
35691- proteon_netdev_ops = tms380tr_netdev_ops;
35692- proteon_netdev_ops.ndo_open = proteon_open;
35693- proteon_netdev_ops.ndo_stop = tms380tr_close;
35694+ pax_open_kernel();
35695+ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35696+ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
35697+ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
35698+ pax_close_kernel();
35699
35700 err = platform_driver_register(&proteon_driver);
35701 if (err)
35702diff -urNp linux-2.6.32.48/drivers/net/tokenring/skisa.c linux-2.6.32.48/drivers/net/tokenring/skisa.c
35703--- linux-2.6.32.48/drivers/net/tokenring/skisa.c 2011-11-08 19:02:43.000000000 -0500
35704+++ linux-2.6.32.48/drivers/net/tokenring/skisa.c 2011-11-15 19:59:43.000000000 -0500
35705@@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
35706 struct platform_device *pdev;
35707 int i, num = 0, err = 0;
35708
35709- sk_isa_netdev_ops = tms380tr_netdev_ops;
35710- sk_isa_netdev_ops.ndo_open = sk_isa_open;
35711- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
35712+ pax_open_kernel();
35713+ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35714+ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
35715+ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
35716+ pax_close_kernel();
35717
35718 err = platform_driver_register(&sk_isa_driver);
35719 if (err)
35720diff -urNp linux-2.6.32.48/drivers/net/tulip/de2104x.c linux-2.6.32.48/drivers/net/tulip/de2104x.c
35721--- linux-2.6.32.48/drivers/net/tulip/de2104x.c 2011-11-08 19:02:43.000000000 -0500
35722+++ linux-2.6.32.48/drivers/net/tulip/de2104x.c 2011-11-15 19:59:43.000000000 -0500
35723@@ -1785,6 +1785,8 @@ static void __devinit de21041_get_srom_i
35724 struct de_srom_info_leaf *il;
35725 void *bufp;
35726
35727+ pax_track_stack();
35728+
35729 /* download entire eeprom */
35730 for (i = 0; i < DE_EEPROM_WORDS; i++)
35731 ((__le16 *)ee_data)[i] =
35732diff -urNp linux-2.6.32.48/drivers/net/tulip/de4x5.c linux-2.6.32.48/drivers/net/tulip/de4x5.c
35733--- linux-2.6.32.48/drivers/net/tulip/de4x5.c 2011-11-08 19:02:43.000000000 -0500
35734+++ linux-2.6.32.48/drivers/net/tulip/de4x5.c 2011-11-15 19:59:43.000000000 -0500
35735@@ -5472,7 +5472,7 @@ de4x5_ioctl(struct net_device *dev, stru
35736 for (i=0; i<ETH_ALEN; i++) {
35737 tmp.addr[i] = dev->dev_addr[i];
35738 }
35739- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
35740+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
35741 break;
35742
35743 case DE4X5_SET_HWADDR: /* Set the hardware address */
35744@@ -5512,7 +5512,7 @@ de4x5_ioctl(struct net_device *dev, stru
35745 spin_lock_irqsave(&lp->lock, flags);
35746 memcpy(&statbuf, &lp->pktStats, ioc->len);
35747 spin_unlock_irqrestore(&lp->lock, flags);
35748- if (copy_to_user(ioc->data, &statbuf, ioc->len))
35749+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
35750 return -EFAULT;
35751 break;
35752 }
35753diff -urNp linux-2.6.32.48/drivers/net/usb/hso.c linux-2.6.32.48/drivers/net/usb/hso.c
35754--- linux-2.6.32.48/drivers/net/usb/hso.c 2011-11-08 19:02:43.000000000 -0500
35755+++ linux-2.6.32.48/drivers/net/usb/hso.c 2011-11-15 19:59:43.000000000 -0500
35756@@ -71,7 +71,7 @@
35757 #include <asm/byteorder.h>
35758 #include <linux/serial_core.h>
35759 #include <linux/serial.h>
35760-
35761+#include <asm/local.h>
35762
35763 #define DRIVER_VERSION "1.2"
35764 #define MOD_AUTHOR "Option Wireless"
35765@@ -258,7 +258,7 @@ struct hso_serial {
35766
35767 /* from usb_serial_port */
35768 struct tty_struct *tty;
35769- int open_count;
35770+ local_t open_count;
35771 spinlock_t serial_lock;
35772
35773 int (*write_data) (struct hso_serial *serial);
35774@@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_
35775 struct urb *urb;
35776
35777 urb = serial->rx_urb[0];
35778- if (serial->open_count > 0) {
35779+ if (local_read(&serial->open_count) > 0) {
35780 count = put_rxbuf_data(urb, serial);
35781 if (count == -1)
35782 return;
35783@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_cal
35784 DUMP1(urb->transfer_buffer, urb->actual_length);
35785
35786 /* Anyone listening? */
35787- if (serial->open_count == 0)
35788+ if (local_read(&serial->open_count) == 0)
35789 return;
35790
35791 if (status == 0) {
35792@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
35793 spin_unlock_irq(&serial->serial_lock);
35794
35795 /* check for port already opened, if not set the termios */
35796- serial->open_count++;
35797- if (serial->open_count == 1) {
35798+ if (local_inc_return(&serial->open_count) == 1) {
35799 tty->low_latency = 1;
35800 serial->rx_state = RX_IDLE;
35801 /* Force default termio settings */
35802@@ -1325,7 +1324,7 @@ static int hso_serial_open(struct tty_st
35803 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
35804 if (result) {
35805 hso_stop_serial_device(serial->parent);
35806- serial->open_count--;
35807+ local_dec(&serial->open_count);
35808 kref_put(&serial->parent->ref, hso_serial_ref_free);
35809 }
35810 } else {
35811@@ -1362,10 +1361,10 @@ static void hso_serial_close(struct tty_
35812
35813 /* reset the rts and dtr */
35814 /* do the actual close */
35815- serial->open_count--;
35816+ local_dec(&serial->open_count);
35817
35818- if (serial->open_count <= 0) {
35819- serial->open_count = 0;
35820+ if (local_read(&serial->open_count) <= 0) {
35821+ local_set(&serial->open_count, 0);
35822 spin_lock_irq(&serial->serial_lock);
35823 if (serial->tty == tty) {
35824 serial->tty->driver_data = NULL;
35825@@ -1447,7 +1446,7 @@ static void hso_serial_set_termios(struc
35826
35827 /* the actual setup */
35828 spin_lock_irqsave(&serial->serial_lock, flags);
35829- if (serial->open_count)
35830+ if (local_read(&serial->open_count))
35831 _hso_serial_set_termios(tty, old);
35832 else
35833 tty->termios = old;
35834@@ -3097,7 +3096,7 @@ static int hso_resume(struct usb_interfa
35835 /* Start all serial ports */
35836 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
35837 if (serial_table[i] && (serial_table[i]->interface == iface)) {
35838- if (dev2ser(serial_table[i])->open_count) {
35839+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
35840 result =
35841 hso_start_serial_device(serial_table[i], GFP_NOIO);
35842 hso_kick_transmit(dev2ser(serial_table[i]));
35843diff -urNp linux-2.6.32.48/drivers/net/vxge/vxge-config.h linux-2.6.32.48/drivers/net/vxge/vxge-config.h
35844--- linux-2.6.32.48/drivers/net/vxge/vxge-config.h 2011-11-08 19:02:43.000000000 -0500
35845+++ linux-2.6.32.48/drivers/net/vxge/vxge-config.h 2011-11-15 19:59:43.000000000 -0500
35846@@ -474,7 +474,7 @@ struct vxge_hw_uld_cbs {
35847 void (*link_down)(struct __vxge_hw_device *devh);
35848 void (*crit_err)(struct __vxge_hw_device *devh,
35849 enum vxge_hw_event type, u64 ext_data);
35850-};
35851+} __no_const;
35852
35853 /*
35854 * struct __vxge_hw_blockpool_entry - Block private data structure
35855diff -urNp linux-2.6.32.48/drivers/net/vxge/vxge-main.c linux-2.6.32.48/drivers/net/vxge/vxge-main.c
35856--- linux-2.6.32.48/drivers/net/vxge/vxge-main.c 2011-11-08 19:02:43.000000000 -0500
35857+++ linux-2.6.32.48/drivers/net/vxge/vxge-main.c 2011-11-15 19:59:43.000000000 -0500
35858@@ -93,6 +93,8 @@ static inline void VXGE_COMPLETE_VPATH_T
35859 struct sk_buff *completed[NR_SKB_COMPLETED];
35860 int more;
35861
35862+ pax_track_stack();
35863+
35864 do {
35865 more = 0;
35866 skb_ptr = completed;
35867@@ -1779,6 +1781,8 @@ static enum vxge_hw_status vxge_rth_conf
35868 u8 mtable[256] = {0}; /* CPU to vpath mapping */
35869 int index;
35870
35871+ pax_track_stack();
35872+
35873 /*
35874 * Filling
35875 * - itable with bucket numbers
35876diff -urNp linux-2.6.32.48/drivers/net/vxge/vxge-traffic.h linux-2.6.32.48/drivers/net/vxge/vxge-traffic.h
35877--- linux-2.6.32.48/drivers/net/vxge/vxge-traffic.h 2011-11-08 19:02:43.000000000 -0500
35878+++ linux-2.6.32.48/drivers/net/vxge/vxge-traffic.h 2011-11-15 19:59:43.000000000 -0500
35879@@ -2123,7 +2123,7 @@ struct vxge_hw_mempool_cbs {
35880 struct vxge_hw_mempool_dma *dma_object,
35881 u32 index,
35882 u32 is_last);
35883-};
35884+} __no_const;
35885
35886 void
35887 __vxge_hw_mempool_destroy(
35888diff -urNp linux-2.6.32.48/drivers/net/wan/cycx_x25.c linux-2.6.32.48/drivers/net/wan/cycx_x25.c
35889--- linux-2.6.32.48/drivers/net/wan/cycx_x25.c 2011-11-08 19:02:43.000000000 -0500
35890+++ linux-2.6.32.48/drivers/net/wan/cycx_x25.c 2011-11-15 19:59:43.000000000 -0500
35891@@ -1017,6 +1017,8 @@ static void hex_dump(char *msg, unsigned
35892 unsigned char hex[1024],
35893 * phex = hex;
35894
35895+ pax_track_stack();
35896+
35897 if (len >= (sizeof(hex) / 2))
35898 len = (sizeof(hex) / 2) - 1;
35899
35900diff -urNp linux-2.6.32.48/drivers/net/wan/hdlc_x25.c linux-2.6.32.48/drivers/net/wan/hdlc_x25.c
35901--- linux-2.6.32.48/drivers/net/wan/hdlc_x25.c 2011-11-08 19:02:43.000000000 -0500
35902+++ linux-2.6.32.48/drivers/net/wan/hdlc_x25.c 2011-11-15 19:59:43.000000000 -0500
35903@@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_bu
35904
35905 static int x25_open(struct net_device *dev)
35906 {
35907- struct lapb_register_struct cb;
35908+ static struct lapb_register_struct cb = {
35909+ .connect_confirmation = x25_connected,
35910+ .connect_indication = x25_connected,
35911+ .disconnect_confirmation = x25_disconnected,
35912+ .disconnect_indication = x25_disconnected,
35913+ .data_indication = x25_data_indication,
35914+ .data_transmit = x25_data_transmit
35915+ };
35916 int result;
35917
35918- cb.connect_confirmation = x25_connected;
35919- cb.connect_indication = x25_connected;
35920- cb.disconnect_confirmation = x25_disconnected;
35921- cb.disconnect_indication = x25_disconnected;
35922- cb.data_indication = x25_data_indication;
35923- cb.data_transmit = x25_data_transmit;
35924-
35925 result = lapb_register(dev, &cb);
35926 if (result != LAPB_OK)
35927 return result;
35928diff -urNp linux-2.6.32.48/drivers/net/wimax/i2400m/usb-fw.c linux-2.6.32.48/drivers/net/wimax/i2400m/usb-fw.c
35929--- linux-2.6.32.48/drivers/net/wimax/i2400m/usb-fw.c 2011-11-08 19:02:43.000000000 -0500
35930+++ linux-2.6.32.48/drivers/net/wimax/i2400m/usb-fw.c 2011-11-15 19:59:43.000000000 -0500
35931@@ -263,6 +263,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
35932 int do_autopm = 1;
35933 DECLARE_COMPLETION_ONSTACK(notif_completion);
35934
35935+ pax_track_stack();
35936+
35937 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
35938 i2400m, ack, ack_size);
35939 BUG_ON(_ack == i2400m->bm_ack_buf);
35940diff -urNp linux-2.6.32.48/drivers/net/wireless/airo.c linux-2.6.32.48/drivers/net/wireless/airo.c
35941--- linux-2.6.32.48/drivers/net/wireless/airo.c 2011-11-08 19:02:43.000000000 -0500
35942+++ linux-2.6.32.48/drivers/net/wireless/airo.c 2011-11-15 19:59:43.000000000 -0500
35943@@ -3003,6 +3003,8 @@ static void airo_process_scan_results (s
35944 BSSListElement * loop_net;
35945 BSSListElement * tmp_net;
35946
35947+ pax_track_stack();
35948+
35949 /* Blow away current list of scan results */
35950 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
35951 list_move_tail (&loop_net->list, &ai->network_free_list);
35952@@ -3783,6 +3785,8 @@ static u16 setup_card(struct airo_info *
35953 WepKeyRid wkr;
35954 int rc;
35955
35956+ pax_track_stack();
35957+
35958 memset( &mySsid, 0, sizeof( mySsid ) );
35959 kfree (ai->flash);
35960 ai->flash = NULL;
35961@@ -4758,6 +4762,8 @@ static int proc_stats_rid_open( struct i
35962 __le32 *vals = stats.vals;
35963 int len;
35964
35965+ pax_track_stack();
35966+
35967 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
35968 return -ENOMEM;
35969 data = (struct proc_data *)file->private_data;
35970@@ -5487,6 +5493,8 @@ static int proc_BSSList_open( struct ino
35971 /* If doLoseSync is not 1, we won't do a Lose Sync */
35972 int doLoseSync = -1;
35973
35974+ pax_track_stack();
35975+
35976 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
35977 return -ENOMEM;
35978 data = (struct proc_data *)file->private_data;
35979@@ -7193,6 +7201,8 @@ static int airo_get_aplist(struct net_de
35980 int i;
35981 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
35982
35983+ pax_track_stack();
35984+
35985 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
35986 if (!qual)
35987 return -ENOMEM;
35988@@ -7753,6 +7763,8 @@ static void airo_read_wireless_stats(str
35989 CapabilityRid cap_rid;
35990 __le32 *vals = stats_rid.vals;
35991
35992+ pax_track_stack();
35993+
35994 /* Get stats out of the card */
35995 clear_bit(JOB_WSTATS, &local->jobs);
35996 if (local->power.event) {
35997diff -urNp linux-2.6.32.48/drivers/net/wireless/ath/ath5k/debug.c linux-2.6.32.48/drivers/net/wireless/ath/ath5k/debug.c
35998--- linux-2.6.32.48/drivers/net/wireless/ath/ath5k/debug.c 2011-11-08 19:02:43.000000000 -0500
35999+++ linux-2.6.32.48/drivers/net/wireless/ath/ath5k/debug.c 2011-11-15 19:59:43.000000000 -0500
36000@@ -205,6 +205,8 @@ static ssize_t read_file_beacon(struct f
36001 unsigned int v;
36002 u64 tsf;
36003
36004+ pax_track_stack();
36005+
36006 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
36007 len += snprintf(buf+len, sizeof(buf)-len,
36008 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
36009@@ -318,6 +320,8 @@ static ssize_t read_file_debug(struct fi
36010 unsigned int len = 0;
36011 unsigned int i;
36012
36013+ pax_track_stack();
36014+
36015 len += snprintf(buf+len, sizeof(buf)-len,
36016 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
36017
36018diff -urNp linux-2.6.32.48/drivers/net/wireless/ath/ath9k/debug.c linux-2.6.32.48/drivers/net/wireless/ath/ath9k/debug.c
36019--- linux-2.6.32.48/drivers/net/wireless/ath/ath9k/debug.c 2011-11-08 19:02:43.000000000 -0500
36020+++ linux-2.6.32.48/drivers/net/wireless/ath/ath9k/debug.c 2011-11-15 19:59:43.000000000 -0500
36021@@ -220,6 +220,8 @@ static ssize_t read_file_interrupt(struc
36022 char buf[512];
36023 unsigned int len = 0;
36024
36025+ pax_track_stack();
36026+
36027 len += snprintf(buf + len, sizeof(buf) - len,
36028 "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
36029 len += snprintf(buf + len, sizeof(buf) - len,
36030@@ -360,6 +362,8 @@ static ssize_t read_file_wiphy(struct fi
36031 int i;
36032 u8 addr[ETH_ALEN];
36033
36034+ pax_track_stack();
36035+
36036 len += snprintf(buf + len, sizeof(buf) - len,
36037 "primary: %s (%s chan=%d ht=%d)\n",
36038 wiphy_name(sc->pri_wiphy->hw->wiphy),
36039diff -urNp linux-2.6.32.48/drivers/net/wireless/b43/debugfs.c linux-2.6.32.48/drivers/net/wireless/b43/debugfs.c
36040--- linux-2.6.32.48/drivers/net/wireless/b43/debugfs.c 2011-11-08 19:02:43.000000000 -0500
36041+++ linux-2.6.32.48/drivers/net/wireless/b43/debugfs.c 2011-11-15 19:59:43.000000000 -0500
36042@@ -43,7 +43,7 @@ static struct dentry *rootdir;
36043 struct b43_debugfs_fops {
36044 ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize);
36045 int (*write)(struct b43_wldev *dev, const char *buf, size_t count);
36046- struct file_operations fops;
36047+ const struct file_operations fops;
36048 /* Offset of struct b43_dfs_file in struct b43_dfsentry */
36049 size_t file_struct_offset;
36050 };
36051diff -urNp linux-2.6.32.48/drivers/net/wireless/b43legacy/debugfs.c linux-2.6.32.48/drivers/net/wireless/b43legacy/debugfs.c
36052--- linux-2.6.32.48/drivers/net/wireless/b43legacy/debugfs.c 2011-11-08 19:02:43.000000000 -0500
36053+++ linux-2.6.32.48/drivers/net/wireless/b43legacy/debugfs.c 2011-11-15 19:59:43.000000000 -0500
36054@@ -44,7 +44,7 @@ static struct dentry *rootdir;
36055 struct b43legacy_debugfs_fops {
36056 ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize);
36057 int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count);
36058- struct file_operations fops;
36059+ const struct file_operations fops;
36060 /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */
36061 size_t file_struct_offset;
36062 /* Take wl->irq_lock before calling read/write? */
36063diff -urNp linux-2.6.32.48/drivers/net/wireless/ipw2x00/ipw2100.c linux-2.6.32.48/drivers/net/wireless/ipw2x00/ipw2100.c
36064--- linux-2.6.32.48/drivers/net/wireless/ipw2x00/ipw2100.c 2011-11-08 19:02:43.000000000 -0500
36065+++ linux-2.6.32.48/drivers/net/wireless/ipw2x00/ipw2100.c 2011-11-15 19:59:43.000000000 -0500
36066@@ -2014,6 +2014,8 @@ static int ipw2100_set_essid(struct ipw2
36067 int err;
36068 DECLARE_SSID_BUF(ssid);
36069
36070+ pax_track_stack();
36071+
36072 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
36073
36074 if (ssid_len)
36075@@ -5380,6 +5382,8 @@ static int ipw2100_set_key(struct ipw210
36076 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
36077 int err;
36078
36079+ pax_track_stack();
36080+
36081 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
36082 idx, keylen, len);
36083
36084diff -urNp linux-2.6.32.48/drivers/net/wireless/ipw2x00/libipw_rx.c linux-2.6.32.48/drivers/net/wireless/ipw2x00/libipw_rx.c
36085--- linux-2.6.32.48/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-11-08 19:02:43.000000000 -0500
36086+++ linux-2.6.32.48/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-11-15 19:59:43.000000000 -0500
36087@@ -1566,6 +1566,8 @@ static void libipw_process_probe_respons
36088 unsigned long flags;
36089 DECLARE_SSID_BUF(ssid);
36090
36091+ pax_track_stack();
36092+
36093 LIBIPW_DEBUG_SCAN("'%s' (%pM"
36094 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
36095 print_ssid(ssid, info_element->data, info_element->len),
36096diff -urNp linux-2.6.32.48/drivers/net/wireless/iwlwifi/iwl-1000.c linux-2.6.32.48/drivers/net/wireless/iwlwifi/iwl-1000.c
36097--- linux-2.6.32.48/drivers/net/wireless/iwlwifi/iwl-1000.c 2011-11-08 19:02:43.000000000 -0500
36098+++ linux-2.6.32.48/drivers/net/wireless/iwlwifi/iwl-1000.c 2011-11-15 19:59:43.000000000 -0500
36099@@ -137,7 +137,7 @@ static struct iwl_lib_ops iwl1000_lib =
36100 },
36101 };
36102
36103-static struct iwl_ops iwl1000_ops = {
36104+static const struct iwl_ops iwl1000_ops = {
36105 .ucode = &iwl5000_ucode,
36106 .lib = &iwl1000_lib,
36107 .hcmd = &iwl5000_hcmd,
36108diff -urNp linux-2.6.32.48/drivers/net/wireless/iwlwifi/iwl3945-base.c linux-2.6.32.48/drivers/net/wireless/iwlwifi/iwl3945-base.c
36109--- linux-2.6.32.48/drivers/net/wireless/iwlwifi/iwl3945-base.c 2011-11-08 19:02:43.000000000 -0500
36110+++ linux-2.6.32.48/drivers/net/wireless/iwlwifi/iwl3945-base.c 2011-11-15 19:59:43.000000000 -0500
36111@@ -3927,7 +3927,9 @@ static int iwl3945_pci_probe(struct pci_
36112 */
36113 if (iwl3945_mod_params.disable_hw_scan) {
36114 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
36115- iwl3945_hw_ops.hw_scan = NULL;
36116+ pax_open_kernel();
36117+ *(void **)&iwl3945_hw_ops.hw_scan = NULL;
36118+ pax_close_kernel();
36119 }
36120
36121
36122diff -urNp linux-2.6.32.48/drivers/net/wireless/iwlwifi/iwl-3945.c linux-2.6.32.48/drivers/net/wireless/iwlwifi/iwl-3945.c
36123--- linux-2.6.32.48/drivers/net/wireless/iwlwifi/iwl-3945.c 2011-11-08 19:02:43.000000000 -0500
36124+++ linux-2.6.32.48/drivers/net/wireless/iwlwifi/iwl-3945.c 2011-11-15 19:59:43.000000000 -0500
36125@@ -2874,7 +2874,7 @@ static struct iwl_hcmd_utils_ops iwl3945
36126 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
36127 };
36128
36129-static struct iwl_ops iwl3945_ops = {
36130+static const struct iwl_ops iwl3945_ops = {
36131 .ucode = &iwl3945_ucode,
36132 .lib = &iwl3945_lib,
36133 .hcmd = &iwl3945_hcmd,
36134diff -urNp linux-2.6.32.48/drivers/net/wireless/iwlwifi/iwl-4965.c linux-2.6.32.48/drivers/net/wireless/iwlwifi/iwl-4965.c
36135--- linux-2.6.32.48/drivers/net/wireless/iwlwifi/iwl-4965.c 2011-11-08 19:02:43.000000000 -0500
36136+++ linux-2.6.32.48/drivers/net/wireless/iwlwifi/iwl-4965.c 2011-11-15 19:59:43.000000000 -0500
36137@@ -2345,7 +2345,7 @@ static struct iwl_lib_ops iwl4965_lib =
36138 },
36139 };
36140
36141-static struct iwl_ops iwl4965_ops = {
36142+static const struct iwl_ops iwl4965_ops = {
36143 .ucode = &iwl4965_ucode,
36144 .lib = &iwl4965_lib,
36145 .hcmd = &iwl4965_hcmd,
36146diff -urNp linux-2.6.32.48/drivers/net/wireless/iwlwifi/iwl-5000.c linux-2.6.32.48/drivers/net/wireless/iwlwifi/iwl-5000.c
36147--- linux-2.6.32.48/drivers/net/wireless/iwlwifi/iwl-5000.c 2011-11-08 19:02:43.000000000 -0500
36148+++ linux-2.6.32.48/drivers/net/wireless/iwlwifi/iwl-5000.c 2011-11-15 19:59:43.000000000 -0500
36149@@ -1633,14 +1633,14 @@ static struct iwl_lib_ops iwl5150_lib =
36150 },
36151 };
36152
36153-struct iwl_ops iwl5000_ops = {
36154+const struct iwl_ops iwl5000_ops = {
36155 .ucode = &iwl5000_ucode,
36156 .lib = &iwl5000_lib,
36157 .hcmd = &iwl5000_hcmd,
36158 .utils = &iwl5000_hcmd_utils,
36159 };
36160
36161-static struct iwl_ops iwl5150_ops = {
36162+static const struct iwl_ops iwl5150_ops = {
36163 .ucode = &iwl5000_ucode,
36164 .lib = &iwl5150_lib,
36165 .hcmd = &iwl5000_hcmd,
36166diff -urNp linux-2.6.32.48/drivers/net/wireless/iwlwifi/iwl-6000.c linux-2.6.32.48/drivers/net/wireless/iwlwifi/iwl-6000.c
36167--- linux-2.6.32.48/drivers/net/wireless/iwlwifi/iwl-6000.c 2011-11-08 19:02:43.000000000 -0500
36168+++ linux-2.6.32.48/drivers/net/wireless/iwlwifi/iwl-6000.c 2011-11-15 19:59:43.000000000 -0500
36169@@ -146,7 +146,7 @@ static struct iwl_hcmd_utils_ops iwl6000
36170 .calc_rssi = iwl5000_calc_rssi,
36171 };
36172
36173-static struct iwl_ops iwl6000_ops = {
36174+static const struct iwl_ops iwl6000_ops = {
36175 .ucode = &iwl5000_ucode,
36176 .lib = &iwl6000_lib,
36177 .hcmd = &iwl5000_hcmd,
36178diff -urNp linux-2.6.32.48/drivers/net/wireless/iwlwifi/iwl-agn.c linux-2.6.32.48/drivers/net/wireless/iwlwifi/iwl-agn.c
36179--- linux-2.6.32.48/drivers/net/wireless/iwlwifi/iwl-agn.c 2011-11-08 19:02:43.000000000 -0500
36180+++ linux-2.6.32.48/drivers/net/wireless/iwlwifi/iwl-agn.c 2011-11-15 19:59:43.000000000 -0500
36181@@ -2911,7 +2911,9 @@ static int iwl_pci_probe(struct pci_dev
36182 if (iwl_debug_level & IWL_DL_INFO)
36183 dev_printk(KERN_DEBUG, &(pdev->dev),
36184 "Disabling hw_scan\n");
36185- iwl_hw_ops.hw_scan = NULL;
36186+ pax_open_kernel();
36187+ *(void **)&iwl_hw_ops.hw_scan = NULL;
36188+ pax_close_kernel();
36189 }
36190
36191 hw = iwl_alloc_all(cfg, &iwl_hw_ops);
36192diff -urNp linux-2.6.32.48/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-2.6.32.48/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
36193--- linux-2.6.32.48/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-11-08 19:02:43.000000000 -0500
36194+++ linux-2.6.32.48/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-11-15 19:59:43.000000000 -0500
36195@@ -857,6 +857,8 @@ static void rs_tx_status(void *priv_r, s
36196 u8 active_index = 0;
36197 s32 tpt = 0;
36198
36199+ pax_track_stack();
36200+
36201 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
36202
36203 if (!ieee80211_is_data(hdr->frame_control) ||
36204@@ -2722,6 +2724,8 @@ static void rs_fill_link_cmd(struct iwl_
36205 u8 valid_tx_ant = 0;
36206 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
36207
36208+ pax_track_stack();
36209+
36210 /* Override starting rate (index 0) if needed for debug purposes */
36211 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
36212
36213diff -urNp linux-2.6.32.48/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-2.6.32.48/drivers/net/wireless/iwlwifi/iwl-debugfs.c
36214--- linux-2.6.32.48/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-11-08 19:02:43.000000000 -0500
36215+++ linux-2.6.32.48/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-11-15 19:59:43.000000000 -0500
36216@@ -524,6 +524,8 @@ static ssize_t iwl_dbgfs_status_read(str
36217 int pos = 0;
36218 const size_t bufsz = sizeof(buf);
36219
36220+ pax_track_stack();
36221+
36222 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
36223 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
36224 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_SYNC_ACTIVE: %d\n",
36225@@ -658,6 +660,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
36226 const size_t bufsz = sizeof(buf);
36227 ssize_t ret;
36228
36229+ pax_track_stack();
36230+
36231 for (i = 0; i < AC_NUM; i++) {
36232 pos += scnprintf(buf + pos, bufsz - pos,
36233 "\tcw_min\tcw_max\taifsn\ttxop\n");
36234diff -urNp linux-2.6.32.48/drivers/net/wireless/iwlwifi/iwl-debug.h linux-2.6.32.48/drivers/net/wireless/iwlwifi/iwl-debug.h
36235--- linux-2.6.32.48/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-11-08 19:02:43.000000000 -0500
36236+++ linux-2.6.32.48/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-11-15 19:59:43.000000000 -0500
36237@@ -118,8 +118,8 @@ void iwl_dbgfs_unregister(struct iwl_pri
36238 #endif
36239
36240 #else
36241-#define IWL_DEBUG(__priv, level, fmt, args...)
36242-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
36243+#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
36244+#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
36245 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
36246 void *p, u32 len)
36247 {}
36248diff -urNp linux-2.6.32.48/drivers/net/wireless/iwlwifi/iwl-dev.h linux-2.6.32.48/drivers/net/wireless/iwlwifi/iwl-dev.h
36249--- linux-2.6.32.48/drivers/net/wireless/iwlwifi/iwl-dev.h 2011-11-08 19:02:43.000000000 -0500
36250+++ linux-2.6.32.48/drivers/net/wireless/iwlwifi/iwl-dev.h 2011-11-15 19:59:43.000000000 -0500
36251@@ -68,7 +68,7 @@ struct iwl_tx_queue;
36252
36253 /* shared structures from iwl-5000.c */
36254 extern struct iwl_mod_params iwl50_mod_params;
36255-extern struct iwl_ops iwl5000_ops;
36256+extern const struct iwl_ops iwl5000_ops;
36257 extern struct iwl_ucode_ops iwl5000_ucode;
36258 extern struct iwl_lib_ops iwl5000_lib;
36259 extern struct iwl_hcmd_ops iwl5000_hcmd;
36260diff -urNp linux-2.6.32.48/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-2.6.32.48/drivers/net/wireless/iwmc3200wifi/debugfs.c
36261--- linux-2.6.32.48/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-11-08 19:02:43.000000000 -0500
36262+++ linux-2.6.32.48/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-11-15 19:59:43.000000000 -0500
36263@@ -299,6 +299,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
36264 int buf_len = 512;
36265 size_t len = 0;
36266
36267+ pax_track_stack();
36268+
36269 if (*ppos != 0)
36270 return 0;
36271 if (count < sizeof(buf))
36272diff -urNp linux-2.6.32.48/drivers/net/wireless/libertas/debugfs.c linux-2.6.32.48/drivers/net/wireless/libertas/debugfs.c
36273--- linux-2.6.32.48/drivers/net/wireless/libertas/debugfs.c 2011-11-08 19:02:43.000000000 -0500
36274+++ linux-2.6.32.48/drivers/net/wireless/libertas/debugfs.c 2011-11-15 19:59:43.000000000 -0500
36275@@ -708,7 +708,7 @@ out_unlock:
36276 struct lbs_debugfs_files {
36277 const char *name;
36278 int perm;
36279- struct file_operations fops;
36280+ const struct file_operations fops;
36281 };
36282
36283 static const struct lbs_debugfs_files debugfs_files[] = {
36284diff -urNp linux-2.6.32.48/drivers/net/wireless/rndis_wlan.c linux-2.6.32.48/drivers/net/wireless/rndis_wlan.c
36285--- linux-2.6.32.48/drivers/net/wireless/rndis_wlan.c 2011-11-08 19:02:43.000000000 -0500
36286+++ linux-2.6.32.48/drivers/net/wireless/rndis_wlan.c 2011-11-15 19:59:43.000000000 -0500
36287@@ -1176,7 +1176,7 @@ static int set_rts_threshold(struct usbn
36288
36289 devdbg(usbdev, "set_rts_threshold %i", rts_threshold);
36290
36291- if (rts_threshold < 0 || rts_threshold > 2347)
36292+ if (rts_threshold > 2347)
36293 rts_threshold = 2347;
36294
36295 tmp = cpu_to_le32(rts_threshold);
36296diff -urNp linux-2.6.32.48/drivers/oprofile/buffer_sync.c linux-2.6.32.48/drivers/oprofile/buffer_sync.c
36297--- linux-2.6.32.48/drivers/oprofile/buffer_sync.c 2011-11-08 19:02:43.000000000 -0500
36298+++ linux-2.6.32.48/drivers/oprofile/buffer_sync.c 2011-11-15 19:59:43.000000000 -0500
36299@@ -341,7 +341,7 @@ static void add_data(struct op_entry *en
36300 if (cookie == NO_COOKIE)
36301 offset = pc;
36302 if (cookie == INVALID_COOKIE) {
36303- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
36304+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
36305 offset = pc;
36306 }
36307 if (cookie != last_cookie) {
36308@@ -385,14 +385,14 @@ add_sample(struct mm_struct *mm, struct
36309 /* add userspace sample */
36310
36311 if (!mm) {
36312- atomic_inc(&oprofile_stats.sample_lost_no_mm);
36313+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
36314 return 0;
36315 }
36316
36317 cookie = lookup_dcookie(mm, s->eip, &offset);
36318
36319 if (cookie == INVALID_COOKIE) {
36320- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
36321+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
36322 return 0;
36323 }
36324
36325@@ -561,7 +561,7 @@ void sync_buffer(int cpu)
36326 /* ignore backtraces if failed to add a sample */
36327 if (state == sb_bt_start) {
36328 state = sb_bt_ignore;
36329- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
36330+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
36331 }
36332 }
36333 release_mm(mm);
36334diff -urNp linux-2.6.32.48/drivers/oprofile/event_buffer.c linux-2.6.32.48/drivers/oprofile/event_buffer.c
36335--- linux-2.6.32.48/drivers/oprofile/event_buffer.c 2011-11-08 19:02:43.000000000 -0500
36336+++ linux-2.6.32.48/drivers/oprofile/event_buffer.c 2011-11-15 19:59:43.000000000 -0500
36337@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
36338 }
36339
36340 if (buffer_pos == buffer_size) {
36341- atomic_inc(&oprofile_stats.event_lost_overflow);
36342+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
36343 return;
36344 }
36345
36346diff -urNp linux-2.6.32.48/drivers/oprofile/oprof.c linux-2.6.32.48/drivers/oprofile/oprof.c
36347--- linux-2.6.32.48/drivers/oprofile/oprof.c 2011-11-08 19:02:43.000000000 -0500
36348+++ linux-2.6.32.48/drivers/oprofile/oprof.c 2011-11-15 19:59:43.000000000 -0500
36349@@ -110,7 +110,7 @@ static void switch_worker(struct work_st
36350 if (oprofile_ops.switch_events())
36351 return;
36352
36353- atomic_inc(&oprofile_stats.multiplex_counter);
36354+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
36355 start_switch_worker();
36356 }
36357
36358diff -urNp linux-2.6.32.48/drivers/oprofile/oprofilefs.c linux-2.6.32.48/drivers/oprofile/oprofilefs.c
36359--- linux-2.6.32.48/drivers/oprofile/oprofilefs.c 2011-11-08 19:02:43.000000000 -0500
36360+++ linux-2.6.32.48/drivers/oprofile/oprofilefs.c 2011-11-15 19:59:43.000000000 -0500
36361@@ -187,7 +187,7 @@ static const struct file_operations atom
36362
36363
36364 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
36365- char const *name, atomic_t *val)
36366+ char const *name, atomic_unchecked_t *val)
36367 {
36368 struct dentry *d = __oprofilefs_create_file(sb, root, name,
36369 &atomic_ro_fops, 0444);
36370diff -urNp linux-2.6.32.48/drivers/oprofile/oprofile_stats.c linux-2.6.32.48/drivers/oprofile/oprofile_stats.c
36371--- linux-2.6.32.48/drivers/oprofile/oprofile_stats.c 2011-11-08 19:02:43.000000000 -0500
36372+++ linux-2.6.32.48/drivers/oprofile/oprofile_stats.c 2011-11-15 19:59:43.000000000 -0500
36373@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
36374 cpu_buf->sample_invalid_eip = 0;
36375 }
36376
36377- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
36378- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
36379- atomic_set(&oprofile_stats.event_lost_overflow, 0);
36380- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
36381- atomic_set(&oprofile_stats.multiplex_counter, 0);
36382+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
36383+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
36384+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
36385+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
36386+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
36387 }
36388
36389
36390diff -urNp linux-2.6.32.48/drivers/oprofile/oprofile_stats.h linux-2.6.32.48/drivers/oprofile/oprofile_stats.h
36391--- linux-2.6.32.48/drivers/oprofile/oprofile_stats.h 2011-11-08 19:02:43.000000000 -0500
36392+++ linux-2.6.32.48/drivers/oprofile/oprofile_stats.h 2011-11-15 19:59:43.000000000 -0500
36393@@ -13,11 +13,11 @@
36394 #include <asm/atomic.h>
36395
36396 struct oprofile_stat_struct {
36397- atomic_t sample_lost_no_mm;
36398- atomic_t sample_lost_no_mapping;
36399- atomic_t bt_lost_no_mapping;
36400- atomic_t event_lost_overflow;
36401- atomic_t multiplex_counter;
36402+ atomic_unchecked_t sample_lost_no_mm;
36403+ atomic_unchecked_t sample_lost_no_mapping;
36404+ atomic_unchecked_t bt_lost_no_mapping;
36405+ atomic_unchecked_t event_lost_overflow;
36406+ atomic_unchecked_t multiplex_counter;
36407 };
36408
36409 extern struct oprofile_stat_struct oprofile_stats;
36410diff -urNp linux-2.6.32.48/drivers/parisc/pdc_stable.c linux-2.6.32.48/drivers/parisc/pdc_stable.c
36411--- linux-2.6.32.48/drivers/parisc/pdc_stable.c 2011-11-08 19:02:43.000000000 -0500
36412+++ linux-2.6.32.48/drivers/parisc/pdc_stable.c 2011-11-15 19:59:43.000000000 -0500
36413@@ -481,7 +481,7 @@ pdcspath_attr_store(struct kobject *kobj
36414 return ret;
36415 }
36416
36417-static struct sysfs_ops pdcspath_attr_ops = {
36418+static const struct sysfs_ops pdcspath_attr_ops = {
36419 .show = pdcspath_attr_show,
36420 .store = pdcspath_attr_store,
36421 };
36422diff -urNp linux-2.6.32.48/drivers/parport/procfs.c linux-2.6.32.48/drivers/parport/procfs.c
36423--- linux-2.6.32.48/drivers/parport/procfs.c 2011-11-08 19:02:43.000000000 -0500
36424+++ linux-2.6.32.48/drivers/parport/procfs.c 2011-11-15 19:59:43.000000000 -0500
36425@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
36426
36427 *ppos += len;
36428
36429- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
36430+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
36431 }
36432
36433 #ifdef CONFIG_PARPORT_1284
36434@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
36435
36436 *ppos += len;
36437
36438- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
36439+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
36440 }
36441 #endif /* IEEE1284.3 support. */
36442
36443diff -urNp linux-2.6.32.48/drivers/pci/hotplug/acpiphp_glue.c linux-2.6.32.48/drivers/pci/hotplug/acpiphp_glue.c
36444--- linux-2.6.32.48/drivers/pci/hotplug/acpiphp_glue.c 2011-11-08 19:02:43.000000000 -0500
36445+++ linux-2.6.32.48/drivers/pci/hotplug/acpiphp_glue.c 2011-11-15 19:59:43.000000000 -0500
36446@@ -111,7 +111,7 @@ static int post_dock_fixups(struct notif
36447 }
36448
36449
36450-static struct acpi_dock_ops acpiphp_dock_ops = {
36451+static const struct acpi_dock_ops acpiphp_dock_ops = {
36452 .handler = handle_hotplug_event_func,
36453 };
36454
36455diff -urNp linux-2.6.32.48/drivers/pci/hotplug/cpci_hotplug.h linux-2.6.32.48/drivers/pci/hotplug/cpci_hotplug.h
36456--- linux-2.6.32.48/drivers/pci/hotplug/cpci_hotplug.h 2011-11-08 19:02:43.000000000 -0500
36457+++ linux-2.6.32.48/drivers/pci/hotplug/cpci_hotplug.h 2011-11-15 19:59:43.000000000 -0500
36458@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
36459 int (*hardware_test) (struct slot* slot, u32 value);
36460 u8 (*get_power) (struct slot* slot);
36461 int (*set_power) (struct slot* slot, int value);
36462-};
36463+} __no_const;
36464
36465 struct cpci_hp_controller {
36466 unsigned int irq;
36467diff -urNp linux-2.6.32.48/drivers/pci/hotplug/cpqphp_nvram.c linux-2.6.32.48/drivers/pci/hotplug/cpqphp_nvram.c
36468--- linux-2.6.32.48/drivers/pci/hotplug/cpqphp_nvram.c 2011-11-08 19:02:43.000000000 -0500
36469+++ linux-2.6.32.48/drivers/pci/hotplug/cpqphp_nvram.c 2011-11-15 19:59:43.000000000 -0500
36470@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
36471
36472 void compaq_nvram_init (void __iomem *rom_start)
36473 {
36474+
36475+#ifndef CONFIG_PAX_KERNEXEC
36476 if (rom_start) {
36477 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
36478 }
36479+#endif
36480+
36481 dbg("int15 entry = %p\n", compaq_int15_entry_point);
36482
36483 /* initialize our int15 lock */
36484diff -urNp linux-2.6.32.48/drivers/pci/hotplug/fakephp.c linux-2.6.32.48/drivers/pci/hotplug/fakephp.c
36485--- linux-2.6.32.48/drivers/pci/hotplug/fakephp.c 2011-11-08 19:02:43.000000000 -0500
36486+++ linux-2.6.32.48/drivers/pci/hotplug/fakephp.c 2011-11-15 19:59:43.000000000 -0500
36487@@ -73,7 +73,7 @@ static void legacy_release(struct kobjec
36488 }
36489
36490 static struct kobj_type legacy_ktype = {
36491- .sysfs_ops = &(struct sysfs_ops){
36492+ .sysfs_ops = &(const struct sysfs_ops){
36493 .store = legacy_store, .show = legacy_show
36494 },
36495 .release = &legacy_release,
36496diff -urNp linux-2.6.32.48/drivers/pci/intel-iommu.c linux-2.6.32.48/drivers/pci/intel-iommu.c
36497--- linux-2.6.32.48/drivers/pci/intel-iommu.c 2011-11-08 19:02:43.000000000 -0500
36498+++ linux-2.6.32.48/drivers/pci/intel-iommu.c 2011-11-15 19:59:43.000000000 -0500
36499@@ -2643,7 +2643,7 @@ error:
36500 return 0;
36501 }
36502
36503-static dma_addr_t intel_map_page(struct device *dev, struct page *page,
36504+dma_addr_t intel_map_page(struct device *dev, struct page *page,
36505 unsigned long offset, size_t size,
36506 enum dma_data_direction dir,
36507 struct dma_attrs *attrs)
36508@@ -2719,7 +2719,7 @@ static void add_unmap(struct dmar_domain
36509 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
36510 }
36511
36512-static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
36513+void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
36514 size_t size, enum dma_data_direction dir,
36515 struct dma_attrs *attrs)
36516 {
36517@@ -2768,7 +2768,7 @@ static void intel_unmap_page(struct devi
36518 }
36519 }
36520
36521-static void *intel_alloc_coherent(struct device *hwdev, size_t size,
36522+void *intel_alloc_coherent(struct device *hwdev, size_t size,
36523 dma_addr_t *dma_handle, gfp_t flags)
36524 {
36525 void *vaddr;
36526@@ -2800,7 +2800,7 @@ static void *intel_alloc_coherent(struct
36527 return NULL;
36528 }
36529
36530-static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
36531+void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
36532 dma_addr_t dma_handle)
36533 {
36534 int order;
36535@@ -2812,7 +2812,7 @@ static void intel_free_coherent(struct d
36536 free_pages((unsigned long)vaddr, order);
36537 }
36538
36539-static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
36540+void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
36541 int nelems, enum dma_data_direction dir,
36542 struct dma_attrs *attrs)
36543 {
36544@@ -2872,7 +2872,7 @@ static int intel_nontranslate_map_sg(str
36545 return nelems;
36546 }
36547
36548-static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
36549+int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
36550 enum dma_data_direction dir, struct dma_attrs *attrs)
36551 {
36552 int i;
36553@@ -2941,12 +2941,12 @@ static int intel_map_sg(struct device *h
36554 return nelems;
36555 }
36556
36557-static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
36558+int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
36559 {
36560 return !dma_addr;
36561 }
36562
36563-struct dma_map_ops intel_dma_ops = {
36564+const struct dma_map_ops intel_dma_ops = {
36565 .alloc_coherent = intel_alloc_coherent,
36566 .free_coherent = intel_free_coherent,
36567 .map_sg = intel_map_sg,
36568diff -urNp linux-2.6.32.48/drivers/pci/pcie/aspm.c linux-2.6.32.48/drivers/pci/pcie/aspm.c
36569--- linux-2.6.32.48/drivers/pci/pcie/aspm.c 2011-11-08 19:02:43.000000000 -0500
36570+++ linux-2.6.32.48/drivers/pci/pcie/aspm.c 2011-11-15 19:59:43.000000000 -0500
36571@@ -27,9 +27,9 @@
36572 #define MODULE_PARAM_PREFIX "pcie_aspm."
36573
36574 /* Note: those are not register definitions */
36575-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
36576-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
36577-#define ASPM_STATE_L1 (4) /* L1 state */
36578+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
36579+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
36580+#define ASPM_STATE_L1 (4U) /* L1 state */
36581 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
36582 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
36583
36584diff -urNp linux-2.6.32.48/drivers/pci/probe.c linux-2.6.32.48/drivers/pci/probe.c
36585--- linux-2.6.32.48/drivers/pci/probe.c 2011-11-08 19:02:43.000000000 -0500
36586+++ linux-2.6.32.48/drivers/pci/probe.c 2011-11-15 19:59:43.000000000 -0500
36587@@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(
36588 return ret;
36589 }
36590
36591-static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
36592+static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
36593 struct device_attribute *attr,
36594 char *buf)
36595 {
36596 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
36597 }
36598
36599-static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
36600+static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
36601 struct device_attribute *attr,
36602 char *buf)
36603 {
36604diff -urNp linux-2.6.32.48/drivers/pci/proc.c linux-2.6.32.48/drivers/pci/proc.c
36605--- linux-2.6.32.48/drivers/pci/proc.c 2011-11-08 19:02:43.000000000 -0500
36606+++ linux-2.6.32.48/drivers/pci/proc.c 2011-11-15 19:59:43.000000000 -0500
36607@@ -480,7 +480,16 @@ static const struct file_operations proc
36608 static int __init pci_proc_init(void)
36609 {
36610 struct pci_dev *dev = NULL;
36611+
36612+#ifdef CONFIG_GRKERNSEC_PROC_ADD
36613+#ifdef CONFIG_GRKERNSEC_PROC_USER
36614+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
36615+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
36616+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
36617+#endif
36618+#else
36619 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
36620+#endif
36621 proc_create("devices", 0, proc_bus_pci_dir,
36622 &proc_bus_pci_dev_operations);
36623 proc_initialized = 1;
36624diff -urNp linux-2.6.32.48/drivers/pci/slot.c linux-2.6.32.48/drivers/pci/slot.c
36625--- linux-2.6.32.48/drivers/pci/slot.c 2011-11-08 19:02:43.000000000 -0500
36626+++ linux-2.6.32.48/drivers/pci/slot.c 2011-11-15 19:59:43.000000000 -0500
36627@@ -29,7 +29,7 @@ static ssize_t pci_slot_attr_store(struc
36628 return attribute->store ? attribute->store(slot, buf, len) : -EIO;
36629 }
36630
36631-static struct sysfs_ops pci_slot_sysfs_ops = {
36632+static const struct sysfs_ops pci_slot_sysfs_ops = {
36633 .show = pci_slot_attr_show,
36634 .store = pci_slot_attr_store,
36635 };
36636diff -urNp linux-2.6.32.48/drivers/pcmcia/pcmcia_ioctl.c linux-2.6.32.48/drivers/pcmcia/pcmcia_ioctl.c
36637--- linux-2.6.32.48/drivers/pcmcia/pcmcia_ioctl.c 2011-11-08 19:02:43.000000000 -0500
36638+++ linux-2.6.32.48/drivers/pcmcia/pcmcia_ioctl.c 2011-11-15 19:59:43.000000000 -0500
36639@@ -819,7 +819,7 @@ static int ds_ioctl(struct inode * inode
36640 return -EFAULT;
36641 }
36642 }
36643- buf = kmalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
36644+ buf = kzalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
36645 if (!buf)
36646 return -ENOMEM;
36647
36648diff -urNp linux-2.6.32.48/drivers/platform/x86/acer-wmi.c linux-2.6.32.48/drivers/platform/x86/acer-wmi.c
36649--- linux-2.6.32.48/drivers/platform/x86/acer-wmi.c 2011-11-08 19:02:43.000000000 -0500
36650+++ linux-2.6.32.48/drivers/platform/x86/acer-wmi.c 2011-11-15 19:59:43.000000000 -0500
36651@@ -918,7 +918,7 @@ static int update_bl_status(struct backl
36652 return 0;
36653 }
36654
36655-static struct backlight_ops acer_bl_ops = {
36656+static const struct backlight_ops acer_bl_ops = {
36657 .get_brightness = read_brightness,
36658 .update_status = update_bl_status,
36659 };
36660diff -urNp linux-2.6.32.48/drivers/platform/x86/asus_acpi.c linux-2.6.32.48/drivers/platform/x86/asus_acpi.c
36661--- linux-2.6.32.48/drivers/platform/x86/asus_acpi.c 2011-11-08 19:02:43.000000000 -0500
36662+++ linux-2.6.32.48/drivers/platform/x86/asus_acpi.c 2011-11-15 19:59:43.000000000 -0500
36663@@ -1396,7 +1396,7 @@ static int asus_hotk_remove(struct acpi_
36664 return 0;
36665 }
36666
36667-static struct backlight_ops asus_backlight_data = {
36668+static const struct backlight_ops asus_backlight_data = {
36669 .get_brightness = read_brightness,
36670 .update_status = set_brightness_status,
36671 };
36672diff -urNp linux-2.6.32.48/drivers/platform/x86/asus-laptop.c linux-2.6.32.48/drivers/platform/x86/asus-laptop.c
36673--- linux-2.6.32.48/drivers/platform/x86/asus-laptop.c 2011-11-08 19:02:43.000000000 -0500
36674+++ linux-2.6.32.48/drivers/platform/x86/asus-laptop.c 2011-11-15 19:59:43.000000000 -0500
36675@@ -250,7 +250,7 @@ static struct backlight_device *asus_bac
36676 */
36677 static int read_brightness(struct backlight_device *bd);
36678 static int update_bl_status(struct backlight_device *bd);
36679-static struct backlight_ops asusbl_ops = {
36680+static const struct backlight_ops asusbl_ops = {
36681 .get_brightness = read_brightness,
36682 .update_status = update_bl_status,
36683 };
36684diff -urNp linux-2.6.32.48/drivers/platform/x86/compal-laptop.c linux-2.6.32.48/drivers/platform/x86/compal-laptop.c
36685--- linux-2.6.32.48/drivers/platform/x86/compal-laptop.c 2011-11-08 19:02:43.000000000 -0500
36686+++ linux-2.6.32.48/drivers/platform/x86/compal-laptop.c 2011-11-15 19:59:43.000000000 -0500
36687@@ -163,7 +163,7 @@ static int bl_update_status(struct backl
36688 return set_lcd_level(b->props.brightness);
36689 }
36690
36691-static struct backlight_ops compalbl_ops = {
36692+static const struct backlight_ops compalbl_ops = {
36693 .get_brightness = bl_get_brightness,
36694 .update_status = bl_update_status,
36695 };
36696diff -urNp linux-2.6.32.48/drivers/platform/x86/dell-laptop.c linux-2.6.32.48/drivers/platform/x86/dell-laptop.c
36697--- linux-2.6.32.48/drivers/platform/x86/dell-laptop.c 2011-11-08 19:02:43.000000000 -0500
36698+++ linux-2.6.32.48/drivers/platform/x86/dell-laptop.c 2011-11-15 19:59:43.000000000 -0500
36699@@ -318,7 +318,7 @@ static int dell_get_intensity(struct bac
36700 return buffer.output[1];
36701 }
36702
36703-static struct backlight_ops dell_ops = {
36704+static const struct backlight_ops dell_ops = {
36705 .get_brightness = dell_get_intensity,
36706 .update_status = dell_send_intensity,
36707 };
36708diff -urNp linux-2.6.32.48/drivers/platform/x86/eeepc-laptop.c linux-2.6.32.48/drivers/platform/x86/eeepc-laptop.c
36709--- linux-2.6.32.48/drivers/platform/x86/eeepc-laptop.c 2011-11-08 19:02:43.000000000 -0500
36710+++ linux-2.6.32.48/drivers/platform/x86/eeepc-laptop.c 2011-11-15 19:59:43.000000000 -0500
36711@@ -245,7 +245,7 @@ static struct device *eeepc_hwmon_device
36712 */
36713 static int read_brightness(struct backlight_device *bd);
36714 static int update_bl_status(struct backlight_device *bd);
36715-static struct backlight_ops eeepcbl_ops = {
36716+static const struct backlight_ops eeepcbl_ops = {
36717 .get_brightness = read_brightness,
36718 .update_status = update_bl_status,
36719 };
36720diff -urNp linux-2.6.32.48/drivers/platform/x86/fujitsu-laptop.c linux-2.6.32.48/drivers/platform/x86/fujitsu-laptop.c
36721--- linux-2.6.32.48/drivers/platform/x86/fujitsu-laptop.c 2011-11-08 19:02:43.000000000 -0500
36722+++ linux-2.6.32.48/drivers/platform/x86/fujitsu-laptop.c 2011-11-15 19:59:43.000000000 -0500
36723@@ -436,7 +436,7 @@ static int bl_update_status(struct backl
36724 return ret;
36725 }
36726
36727-static struct backlight_ops fujitsubl_ops = {
36728+static const struct backlight_ops fujitsubl_ops = {
36729 .get_brightness = bl_get_brightness,
36730 .update_status = bl_update_status,
36731 };
36732diff -urNp linux-2.6.32.48/drivers/platform/x86/msi-laptop.c linux-2.6.32.48/drivers/platform/x86/msi-laptop.c
36733--- linux-2.6.32.48/drivers/platform/x86/msi-laptop.c 2011-11-08 19:02:43.000000000 -0500
36734+++ linux-2.6.32.48/drivers/platform/x86/msi-laptop.c 2011-11-15 19:59:43.000000000 -0500
36735@@ -161,7 +161,7 @@ static int bl_update_status(struct backl
36736 return set_lcd_level(b->props.brightness);
36737 }
36738
36739-static struct backlight_ops msibl_ops = {
36740+static const struct backlight_ops msibl_ops = {
36741 .get_brightness = bl_get_brightness,
36742 .update_status = bl_update_status,
36743 };
36744diff -urNp linux-2.6.32.48/drivers/platform/x86/panasonic-laptop.c linux-2.6.32.48/drivers/platform/x86/panasonic-laptop.c
36745--- linux-2.6.32.48/drivers/platform/x86/panasonic-laptop.c 2011-11-08 19:02:43.000000000 -0500
36746+++ linux-2.6.32.48/drivers/platform/x86/panasonic-laptop.c 2011-11-15 19:59:43.000000000 -0500
36747@@ -352,7 +352,7 @@ static int bl_set_status(struct backligh
36748 return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright);
36749 }
36750
36751-static struct backlight_ops pcc_backlight_ops = {
36752+static const struct backlight_ops pcc_backlight_ops = {
36753 .get_brightness = bl_get,
36754 .update_status = bl_set_status,
36755 };
36756diff -urNp linux-2.6.32.48/drivers/platform/x86/sony-laptop.c linux-2.6.32.48/drivers/platform/x86/sony-laptop.c
36757--- linux-2.6.32.48/drivers/platform/x86/sony-laptop.c 2011-11-08 19:02:43.000000000 -0500
36758+++ linux-2.6.32.48/drivers/platform/x86/sony-laptop.c 2011-11-15 19:59:43.000000000 -0500
36759@@ -850,7 +850,7 @@ static int sony_backlight_get_brightness
36760 }
36761
36762 static struct backlight_device *sony_backlight_device;
36763-static struct backlight_ops sony_backlight_ops = {
36764+static const struct backlight_ops sony_backlight_ops = {
36765 .update_status = sony_backlight_update_status,
36766 .get_brightness = sony_backlight_get_brightness,
36767 };
36768diff -urNp linux-2.6.32.48/drivers/platform/x86/thinkpad_acpi.c linux-2.6.32.48/drivers/platform/x86/thinkpad_acpi.c
36769--- linux-2.6.32.48/drivers/platform/x86/thinkpad_acpi.c 2011-11-08 19:02:43.000000000 -0500
36770+++ linux-2.6.32.48/drivers/platform/x86/thinkpad_acpi.c 2011-11-15 19:59:43.000000000 -0500
36771@@ -2139,7 +2139,7 @@ static int hotkey_mask_get(void)
36772 return 0;
36773 }
36774
36775-void static hotkey_mask_warn_incomplete_mask(void)
36776+static void hotkey_mask_warn_incomplete_mask(void)
36777 {
36778 /* log only what the user can fix... */
36779 const u32 wantedmask = hotkey_driver_mask &
36780@@ -6125,7 +6125,7 @@ static void tpacpi_brightness_notify_cha
36781 BACKLIGHT_UPDATE_HOTKEY);
36782 }
36783
36784-static struct backlight_ops ibm_backlight_data = {
36785+static const struct backlight_ops ibm_backlight_data = {
36786 .get_brightness = brightness_get,
36787 .update_status = brightness_update_status,
36788 };
36789diff -urNp linux-2.6.32.48/drivers/platform/x86/toshiba_acpi.c linux-2.6.32.48/drivers/platform/x86/toshiba_acpi.c
36790--- linux-2.6.32.48/drivers/platform/x86/toshiba_acpi.c 2011-11-08 19:02:43.000000000 -0500
36791+++ linux-2.6.32.48/drivers/platform/x86/toshiba_acpi.c 2011-11-15 19:59:43.000000000 -0500
36792@@ -671,7 +671,7 @@ static acpi_status remove_device(void)
36793 return AE_OK;
36794 }
36795
36796-static struct backlight_ops toshiba_backlight_data = {
36797+static const struct backlight_ops toshiba_backlight_data = {
36798 .get_brightness = get_lcd,
36799 .update_status = set_lcd_status,
36800 };
36801diff -urNp linux-2.6.32.48/drivers/pnp/pnpbios/bioscalls.c linux-2.6.32.48/drivers/pnp/pnpbios/bioscalls.c
36802--- linux-2.6.32.48/drivers/pnp/pnpbios/bioscalls.c 2011-11-08 19:02:43.000000000 -0500
36803+++ linux-2.6.32.48/drivers/pnp/pnpbios/bioscalls.c 2011-11-15 19:59:43.000000000 -0500
36804@@ -60,7 +60,7 @@ do { \
36805 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
36806 } while(0)
36807
36808-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
36809+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
36810 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
36811
36812 /*
36813@@ -97,7 +97,10 @@ static inline u16 call_pnp_bios(u16 func
36814
36815 cpu = get_cpu();
36816 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
36817+
36818+ pax_open_kernel();
36819 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
36820+ pax_close_kernel();
36821
36822 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
36823 spin_lock_irqsave(&pnp_bios_lock, flags);
36824@@ -135,7 +138,10 @@ static inline u16 call_pnp_bios(u16 func
36825 :"memory");
36826 spin_unlock_irqrestore(&pnp_bios_lock, flags);
36827
36828+ pax_open_kernel();
36829 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
36830+ pax_close_kernel();
36831+
36832 put_cpu();
36833
36834 /* If we get here and this is set then the PnP BIOS faulted on us. */
36835@@ -469,7 +475,7 @@ int pnp_bios_read_escd(char *data, u32 n
36836 return status;
36837 }
36838
36839-void pnpbios_calls_init(union pnp_bios_install_struct *header)
36840+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
36841 {
36842 int i;
36843
36844@@ -477,6 +483,8 @@ void pnpbios_calls_init(union pnp_bios_i
36845 pnp_bios_callpoint.offset = header->fields.pm16offset;
36846 pnp_bios_callpoint.segment = PNP_CS16;
36847
36848+ pax_open_kernel();
36849+
36850 for_each_possible_cpu(i) {
36851 struct desc_struct *gdt = get_cpu_gdt_table(i);
36852 if (!gdt)
36853@@ -488,4 +496,6 @@ void pnpbios_calls_init(union pnp_bios_i
36854 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
36855 (unsigned long)__va(header->fields.pm16dseg));
36856 }
36857+
36858+ pax_close_kernel();
36859 }
36860diff -urNp linux-2.6.32.48/drivers/pnp/resource.c linux-2.6.32.48/drivers/pnp/resource.c
36861--- linux-2.6.32.48/drivers/pnp/resource.c 2011-11-08 19:02:43.000000000 -0500
36862+++ linux-2.6.32.48/drivers/pnp/resource.c 2011-11-15 19:59:43.000000000 -0500
36863@@ -355,7 +355,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
36864 return 1;
36865
36866 /* check if the resource is valid */
36867- if (*irq < 0 || *irq > 15)
36868+ if (*irq > 15)
36869 return 0;
36870
36871 /* check if the resource is reserved */
36872@@ -419,7 +419,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
36873 return 1;
36874
36875 /* check if the resource is valid */
36876- if (*dma < 0 || *dma == 4 || *dma > 7)
36877+ if (*dma == 4 || *dma > 7)
36878 return 0;
36879
36880 /* check if the resource is reserved */
36881diff -urNp linux-2.6.32.48/drivers/power/bq27x00_battery.c linux-2.6.32.48/drivers/power/bq27x00_battery.c
36882--- linux-2.6.32.48/drivers/power/bq27x00_battery.c 2011-11-08 19:02:43.000000000 -0500
36883+++ linux-2.6.32.48/drivers/power/bq27x00_battery.c 2011-11-15 19:59:43.000000000 -0500
36884@@ -44,7 +44,7 @@ struct bq27x00_device_info;
36885 struct bq27x00_access_methods {
36886 int (*read)(u8 reg, int *rt_value, int b_single,
36887 struct bq27x00_device_info *di);
36888-};
36889+} __no_const;
36890
36891 struct bq27x00_device_info {
36892 struct device *dev;
36893diff -urNp linux-2.6.32.48/drivers/rtc/rtc-dev.c linux-2.6.32.48/drivers/rtc/rtc-dev.c
36894--- linux-2.6.32.48/drivers/rtc/rtc-dev.c 2011-11-08 19:02:43.000000000 -0500
36895+++ linux-2.6.32.48/drivers/rtc/rtc-dev.c 2011-11-15 19:59:43.000000000 -0500
36896@@ -14,6 +14,7 @@
36897 #include <linux/module.h>
36898 #include <linux/rtc.h>
36899 #include <linux/sched.h>
36900+#include <linux/grsecurity.h>
36901 #include "rtc-core.h"
36902
36903 static dev_t rtc_devt;
36904@@ -357,6 +358,8 @@ static long rtc_dev_ioctl(struct file *f
36905 if (copy_from_user(&tm, uarg, sizeof(tm)))
36906 return -EFAULT;
36907
36908+ gr_log_timechange();
36909+
36910 return rtc_set_time(rtc, &tm);
36911
36912 case RTC_PIE_ON:
36913diff -urNp linux-2.6.32.48/drivers/s390/cio/qdio_perf.c linux-2.6.32.48/drivers/s390/cio/qdio_perf.c
36914--- linux-2.6.32.48/drivers/s390/cio/qdio_perf.c 2011-11-08 19:02:43.000000000 -0500
36915+++ linux-2.6.32.48/drivers/s390/cio/qdio_perf.c 2011-11-15 19:59:43.000000000 -0500
36916@@ -31,51 +31,51 @@ static struct proc_dir_entry *qdio_perf_
36917 static int qdio_perf_proc_show(struct seq_file *m, void *v)
36918 {
36919 seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
36920- (long)atomic_long_read(&perf_stats.qdio_int));
36921+ (long)atomic_long_read_unchecked(&perf_stats.qdio_int));
36922 seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
36923- (long)atomic_long_read(&perf_stats.pci_int));
36924+ (long)atomic_long_read_unchecked(&perf_stats.pci_int));
36925 seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
36926- (long)atomic_long_read(&perf_stats.thin_int));
36927+ (long)atomic_long_read_unchecked(&perf_stats.thin_int));
36928 seq_printf(m, "\n");
36929 seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
36930- (long)atomic_long_read(&perf_stats.tasklet_inbound));
36931+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_inbound));
36932 seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
36933- (long)atomic_long_read(&perf_stats.tasklet_outbound));
36934+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_outbound));
36935 seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
36936- (long)atomic_long_read(&perf_stats.tasklet_thinint),
36937- (long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
36938+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint),
36939+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint_loop));
36940 seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
36941- (long)atomic_long_read(&perf_stats.thinint_inbound),
36942- (long)atomic_long_read(&perf_stats.thinint_inbound_loop));
36943+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound),
36944+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop));
36945 seq_printf(m, "\n");
36946 seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
36947- (long)atomic_long_read(&perf_stats.siga_in));
36948+ (long)atomic_long_read_unchecked(&perf_stats.siga_in));
36949 seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
36950- (long)atomic_long_read(&perf_stats.siga_out));
36951+ (long)atomic_long_read_unchecked(&perf_stats.siga_out));
36952 seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
36953- (long)atomic_long_read(&perf_stats.siga_sync));
36954+ (long)atomic_long_read_unchecked(&perf_stats.siga_sync));
36955 seq_printf(m, "\n");
36956 seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
36957- (long)atomic_long_read(&perf_stats.inbound_handler));
36958+ (long)atomic_long_read_unchecked(&perf_stats.inbound_handler));
36959 seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
36960- (long)atomic_long_read(&perf_stats.outbound_handler));
36961+ (long)atomic_long_read_unchecked(&perf_stats.outbound_handler));
36962 seq_printf(m, "\n");
36963 seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
36964- (long)atomic_long_read(&perf_stats.fast_requeue));
36965+ (long)atomic_long_read_unchecked(&perf_stats.fast_requeue));
36966 seq_printf(m, "Number of outbound target full condition\t: %li\n",
36967- (long)atomic_long_read(&perf_stats.outbound_target_full));
36968+ (long)atomic_long_read_unchecked(&perf_stats.outbound_target_full));
36969 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
36970- (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
36971+ (long)atomic_long_read_unchecked(&perf_stats.debug_tl_out_timer));
36972 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
36973- (long)atomic_long_read(&perf_stats.debug_stop_polling));
36974+ (long)atomic_long_read_unchecked(&perf_stats.debug_stop_polling));
36975 seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
36976- (long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
36977+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop2));
36978 seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n",
36979- (long)atomic_long_read(&perf_stats.debug_eqbs_all),
36980- (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete));
36981+ (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_all),
36982+ (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_incomplete));
36983 seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n",
36984- (long)atomic_long_read(&perf_stats.debug_sqbs_all),
36985- (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete));
36986+ (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_all),
36987+ (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_incomplete));
36988 seq_printf(m, "\n");
36989 return 0;
36990 }
36991diff -urNp linux-2.6.32.48/drivers/s390/cio/qdio_perf.h linux-2.6.32.48/drivers/s390/cio/qdio_perf.h
36992--- linux-2.6.32.48/drivers/s390/cio/qdio_perf.h 2011-11-08 19:02:43.000000000 -0500
36993+++ linux-2.6.32.48/drivers/s390/cio/qdio_perf.h 2011-11-15 19:59:43.000000000 -0500
36994@@ -13,46 +13,46 @@
36995
36996 struct qdio_perf_stats {
36997 /* interrupt handler calls */
36998- atomic_long_t qdio_int;
36999- atomic_long_t pci_int;
37000- atomic_long_t thin_int;
37001+ atomic_long_unchecked_t qdio_int;
37002+ atomic_long_unchecked_t pci_int;
37003+ atomic_long_unchecked_t thin_int;
37004
37005 /* tasklet runs */
37006- atomic_long_t tasklet_inbound;
37007- atomic_long_t tasklet_outbound;
37008- atomic_long_t tasklet_thinint;
37009- atomic_long_t tasklet_thinint_loop;
37010- atomic_long_t thinint_inbound;
37011- atomic_long_t thinint_inbound_loop;
37012- atomic_long_t thinint_inbound_loop2;
37013+ atomic_long_unchecked_t tasklet_inbound;
37014+ atomic_long_unchecked_t tasklet_outbound;
37015+ atomic_long_unchecked_t tasklet_thinint;
37016+ atomic_long_unchecked_t tasklet_thinint_loop;
37017+ atomic_long_unchecked_t thinint_inbound;
37018+ atomic_long_unchecked_t thinint_inbound_loop;
37019+ atomic_long_unchecked_t thinint_inbound_loop2;
37020
37021 /* signal adapter calls */
37022- atomic_long_t siga_out;
37023- atomic_long_t siga_in;
37024- atomic_long_t siga_sync;
37025+ atomic_long_unchecked_t siga_out;
37026+ atomic_long_unchecked_t siga_in;
37027+ atomic_long_unchecked_t siga_sync;
37028
37029 /* misc */
37030- atomic_long_t inbound_handler;
37031- atomic_long_t outbound_handler;
37032- atomic_long_t fast_requeue;
37033- atomic_long_t outbound_target_full;
37034+ atomic_long_unchecked_t inbound_handler;
37035+ atomic_long_unchecked_t outbound_handler;
37036+ atomic_long_unchecked_t fast_requeue;
37037+ atomic_long_unchecked_t outbound_target_full;
37038
37039 /* for debugging */
37040- atomic_long_t debug_tl_out_timer;
37041- atomic_long_t debug_stop_polling;
37042- atomic_long_t debug_eqbs_all;
37043- atomic_long_t debug_eqbs_incomplete;
37044- atomic_long_t debug_sqbs_all;
37045- atomic_long_t debug_sqbs_incomplete;
37046+ atomic_long_unchecked_t debug_tl_out_timer;
37047+ atomic_long_unchecked_t debug_stop_polling;
37048+ atomic_long_unchecked_t debug_eqbs_all;
37049+ atomic_long_unchecked_t debug_eqbs_incomplete;
37050+ atomic_long_unchecked_t debug_sqbs_all;
37051+ atomic_long_unchecked_t debug_sqbs_incomplete;
37052 };
37053
37054 extern struct qdio_perf_stats perf_stats;
37055 extern int qdio_performance_stats;
37056
37057-static inline void qdio_perf_stat_inc(atomic_long_t *count)
37058+static inline void qdio_perf_stat_inc(atomic_long_unchecked_t *count)
37059 {
37060 if (qdio_performance_stats)
37061- atomic_long_inc(count);
37062+ atomic_long_inc_unchecked(count);
37063 }
37064
37065 int qdio_setup_perf_stats(void);
37066diff -urNp linux-2.6.32.48/drivers/scsi/aacraid/aacraid.h linux-2.6.32.48/drivers/scsi/aacraid/aacraid.h
37067--- linux-2.6.32.48/drivers/scsi/aacraid/aacraid.h 2011-11-08 19:02:43.000000000 -0500
37068+++ linux-2.6.32.48/drivers/scsi/aacraid/aacraid.h 2011-11-15 19:59:43.000000000 -0500
37069@@ -471,7 +471,7 @@ struct adapter_ops
37070 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
37071 /* Administrative operations */
37072 int (*adapter_comm)(struct aac_dev * dev, int comm);
37073-};
37074+} __no_const;
37075
37076 /*
37077 * Define which interrupt handler needs to be installed
37078diff -urNp linux-2.6.32.48/drivers/scsi/aacraid/commctrl.c linux-2.6.32.48/drivers/scsi/aacraid/commctrl.c
37079--- linux-2.6.32.48/drivers/scsi/aacraid/commctrl.c 2011-11-08 19:02:43.000000000 -0500
37080+++ linux-2.6.32.48/drivers/scsi/aacraid/commctrl.c 2011-11-15 19:59:43.000000000 -0500
37081@@ -481,6 +481,7 @@ static int aac_send_raw_srb(struct aac_d
37082 u32 actual_fibsize64, actual_fibsize = 0;
37083 int i;
37084
37085+ pax_track_stack();
37086
37087 if (dev->in_reset) {
37088 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
37089diff -urNp linux-2.6.32.48/drivers/scsi/aic94xx/aic94xx_init.c linux-2.6.32.48/drivers/scsi/aic94xx/aic94xx_init.c
37090--- linux-2.6.32.48/drivers/scsi/aic94xx/aic94xx_init.c 2011-11-08 19:02:43.000000000 -0500
37091+++ linux-2.6.32.48/drivers/scsi/aic94xx/aic94xx_init.c 2011-11-15 19:59:43.000000000 -0500
37092@@ -485,7 +485,7 @@ static ssize_t asd_show_update_bios(stru
37093 flash_error_table[i].reason);
37094 }
37095
37096-static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
37097+static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
37098 asd_show_update_bios, asd_store_update_bios);
37099
37100 static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
37101diff -urNp linux-2.6.32.48/drivers/scsi/bfa/bfa_iocfc.h linux-2.6.32.48/drivers/scsi/bfa/bfa_iocfc.h
37102--- linux-2.6.32.48/drivers/scsi/bfa/bfa_iocfc.h 2011-11-08 19:02:43.000000000 -0500
37103+++ linux-2.6.32.48/drivers/scsi/bfa/bfa_iocfc.h 2011-11-15 19:59:43.000000000 -0500
37104@@ -61,7 +61,7 @@ struct bfa_hwif_s {
37105 void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix);
37106 void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap,
37107 u32 *nvecs, u32 *maxvec);
37108-};
37109+} __no_const;
37110 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
37111
37112 struct bfa_iocfc_s {
37113diff -urNp linux-2.6.32.48/drivers/scsi/bfa/bfa_ioc.h linux-2.6.32.48/drivers/scsi/bfa/bfa_ioc.h
37114--- linux-2.6.32.48/drivers/scsi/bfa/bfa_ioc.h 2011-11-08 19:02:43.000000000 -0500
37115+++ linux-2.6.32.48/drivers/scsi/bfa/bfa_ioc.h 2011-11-15 19:59:43.000000000 -0500
37116@@ -127,7 +127,7 @@ struct bfa_ioc_cbfn_s {
37117 bfa_ioc_disable_cbfn_t disable_cbfn;
37118 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
37119 bfa_ioc_reset_cbfn_t reset_cbfn;
37120-};
37121+} __no_const;
37122
37123 /**
37124 * Heartbeat failure notification queue element.
37125diff -urNp linux-2.6.32.48/drivers/scsi/BusLogic.c linux-2.6.32.48/drivers/scsi/BusLogic.c
37126--- linux-2.6.32.48/drivers/scsi/BusLogic.c 2011-11-08 19:02:43.000000000 -0500
37127+++ linux-2.6.32.48/drivers/scsi/BusLogic.c 2011-11-15 19:59:43.000000000 -0500
37128@@ -961,6 +961,8 @@ static int __init BusLogic_InitializeFla
37129 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
37130 *PrototypeHostAdapter)
37131 {
37132+ pax_track_stack();
37133+
37134 /*
37135 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
37136 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
37137diff -urNp linux-2.6.32.48/drivers/scsi/dpt_i2o.c linux-2.6.32.48/drivers/scsi/dpt_i2o.c
37138--- linux-2.6.32.48/drivers/scsi/dpt_i2o.c 2011-11-08 19:02:43.000000000 -0500
37139+++ linux-2.6.32.48/drivers/scsi/dpt_i2o.c 2011-11-15 19:59:43.000000000 -0500
37140@@ -1804,6 +1804,8 @@ static int adpt_i2o_passthru(adpt_hba* p
37141 dma_addr_t addr;
37142 ulong flags = 0;
37143
37144+ pax_track_stack();
37145+
37146 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
37147 // get user msg size in u32s
37148 if(get_user(size, &user_msg[0])){
37149@@ -2297,6 +2299,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
37150 s32 rcode;
37151 dma_addr_t addr;
37152
37153+ pax_track_stack();
37154+
37155 memset(msg, 0 , sizeof(msg));
37156 len = scsi_bufflen(cmd);
37157 direction = 0x00000000;
37158diff -urNp linux-2.6.32.48/drivers/scsi/eata.c linux-2.6.32.48/drivers/scsi/eata.c
37159--- linux-2.6.32.48/drivers/scsi/eata.c 2011-11-08 19:02:43.000000000 -0500
37160+++ linux-2.6.32.48/drivers/scsi/eata.c 2011-11-15 19:59:43.000000000 -0500
37161@@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
37162 struct hostdata *ha;
37163 char name[16];
37164
37165+ pax_track_stack();
37166+
37167 sprintf(name, "%s%d", driver_name, j);
37168
37169 if (!request_region(port_base, REGION_SIZE, driver_name)) {
37170diff -urNp linux-2.6.32.48/drivers/scsi/fcoe/libfcoe.c linux-2.6.32.48/drivers/scsi/fcoe/libfcoe.c
37171--- linux-2.6.32.48/drivers/scsi/fcoe/libfcoe.c 2011-11-08 19:02:43.000000000 -0500
37172+++ linux-2.6.32.48/drivers/scsi/fcoe/libfcoe.c 2011-11-15 19:59:43.000000000 -0500
37173@@ -809,6 +809,8 @@ static void fcoe_ctlr_recv_els(struct fc
37174 size_t rlen;
37175 size_t dlen;
37176
37177+ pax_track_stack();
37178+
37179 fiph = (struct fip_header *)skb->data;
37180 sub = fiph->fip_subcode;
37181 if (sub != FIP_SC_REQ && sub != FIP_SC_REP)
37182diff -urNp linux-2.6.32.48/drivers/scsi/fnic/fnic_main.c linux-2.6.32.48/drivers/scsi/fnic/fnic_main.c
37183--- linux-2.6.32.48/drivers/scsi/fnic/fnic_main.c 2011-11-08 19:02:43.000000000 -0500
37184+++ linux-2.6.32.48/drivers/scsi/fnic/fnic_main.c 2011-11-15 19:59:43.000000000 -0500
37185@@ -669,7 +669,7 @@ static int __devinit fnic_probe(struct p
37186 /* Start local port initiatialization */
37187
37188 lp->link_up = 0;
37189- lp->tt = fnic_transport_template;
37190+ memcpy((void *)&lp->tt, &fnic_transport_template, sizeof(fnic_transport_template));
37191
37192 lp->max_retry_count = fnic->config.flogi_retries;
37193 lp->max_rport_retry_count = fnic->config.plogi_retries;
37194diff -urNp linux-2.6.32.48/drivers/scsi/gdth.c linux-2.6.32.48/drivers/scsi/gdth.c
37195--- linux-2.6.32.48/drivers/scsi/gdth.c 2011-11-08 19:02:43.000000000 -0500
37196+++ linux-2.6.32.48/drivers/scsi/gdth.c 2011-11-15 19:59:43.000000000 -0500
37197@@ -4102,6 +4102,8 @@ static int ioc_lockdrv(void __user *arg)
37198 ulong flags;
37199 gdth_ha_str *ha;
37200
37201+ pax_track_stack();
37202+
37203 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
37204 return -EFAULT;
37205 ha = gdth_find_ha(ldrv.ionode);
37206@@ -4134,6 +4136,8 @@ static int ioc_resetdrv(void __user *arg
37207 gdth_ha_str *ha;
37208 int rval;
37209
37210+ pax_track_stack();
37211+
37212 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
37213 res.number >= MAX_HDRIVES)
37214 return -EFAULT;
37215@@ -4169,6 +4173,8 @@ static int ioc_general(void __user *arg,
37216 gdth_ha_str *ha;
37217 int rval;
37218
37219+ pax_track_stack();
37220+
37221 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
37222 return -EFAULT;
37223 ha = gdth_find_ha(gen.ionode);
37224@@ -4625,6 +4631,9 @@ static void gdth_flush(gdth_ha_str *ha)
37225 int i;
37226 gdth_cmd_str gdtcmd;
37227 char cmnd[MAX_COMMAND_SIZE];
37228+
37229+ pax_track_stack();
37230+
37231 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
37232
37233 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
37234diff -urNp linux-2.6.32.48/drivers/scsi/gdth_proc.c linux-2.6.32.48/drivers/scsi/gdth_proc.c
37235--- linux-2.6.32.48/drivers/scsi/gdth_proc.c 2011-11-08 19:02:43.000000000 -0500
37236+++ linux-2.6.32.48/drivers/scsi/gdth_proc.c 2011-11-15 19:59:43.000000000 -0500
37237@@ -46,6 +46,9 @@ static int gdth_set_asc_info(struct Scsi
37238 ulong64 paddr;
37239
37240 char cmnd[MAX_COMMAND_SIZE];
37241+
37242+ pax_track_stack();
37243+
37244 memset(cmnd, 0xff, 12);
37245 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
37246
37247@@ -174,6 +177,8 @@ static int gdth_get_info(char *buffer,ch
37248 gdth_hget_str *phg;
37249 char cmnd[MAX_COMMAND_SIZE];
37250
37251+ pax_track_stack();
37252+
37253 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
37254 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
37255 if (!gdtcmd || !estr)
37256diff -urNp linux-2.6.32.48/drivers/scsi/hosts.c linux-2.6.32.48/drivers/scsi/hosts.c
37257--- linux-2.6.32.48/drivers/scsi/hosts.c 2011-11-08 19:02:43.000000000 -0500
37258+++ linux-2.6.32.48/drivers/scsi/hosts.c 2011-11-15 19:59:43.000000000 -0500
37259@@ -40,7 +40,7 @@
37260 #include "scsi_logging.h"
37261
37262
37263-static atomic_t scsi_host_next_hn; /* host_no for next new host */
37264+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
37265
37266
37267 static void scsi_host_cls_release(struct device *dev)
37268@@ -344,7 +344,7 @@ struct Scsi_Host *scsi_host_alloc(struct
37269 * subtract one because we increment first then return, but we need to
37270 * know what the next host number was before increment
37271 */
37272- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
37273+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
37274 shost->dma_channel = 0xff;
37275
37276 /* These three are default values which can be overridden */
37277diff -urNp linux-2.6.32.48/drivers/scsi/ipr.c linux-2.6.32.48/drivers/scsi/ipr.c
37278--- linux-2.6.32.48/drivers/scsi/ipr.c 2011-11-08 19:02:43.000000000 -0500
37279+++ linux-2.6.32.48/drivers/scsi/ipr.c 2011-11-15 19:59:43.000000000 -0500
37280@@ -5286,7 +5286,7 @@ static bool ipr_qc_fill_rtf(struct ata_q
37281 return true;
37282 }
37283
37284-static struct ata_port_operations ipr_sata_ops = {
37285+static const struct ata_port_operations ipr_sata_ops = {
37286 .phy_reset = ipr_ata_phy_reset,
37287 .hardreset = ipr_sata_reset,
37288 .post_internal_cmd = ipr_ata_post_internal,
37289diff -urNp linux-2.6.32.48/drivers/scsi/ips.h linux-2.6.32.48/drivers/scsi/ips.h
37290--- linux-2.6.32.48/drivers/scsi/ips.h 2011-11-08 19:02:43.000000000 -0500
37291+++ linux-2.6.32.48/drivers/scsi/ips.h 2011-11-15 19:59:43.000000000 -0500
37292@@ -1027,7 +1027,7 @@ typedef struct {
37293 int (*intr)(struct ips_ha *);
37294 void (*enableint)(struct ips_ha *);
37295 uint32_t (*statupd)(struct ips_ha *);
37296-} ips_hw_func_t;
37297+} __no_const ips_hw_func_t;
37298
37299 typedef struct ips_ha {
37300 uint8_t ha_id[IPS_MAX_CHANNELS+1];
37301diff -urNp linux-2.6.32.48/drivers/scsi/libfc/fc_exch.c linux-2.6.32.48/drivers/scsi/libfc/fc_exch.c
37302--- linux-2.6.32.48/drivers/scsi/libfc/fc_exch.c 2011-11-08 19:02:43.000000000 -0500
37303+++ linux-2.6.32.48/drivers/scsi/libfc/fc_exch.c 2011-11-15 19:59:43.000000000 -0500
37304@@ -86,12 +86,12 @@ struct fc_exch_mgr {
37305 * all together if not used XXX
37306 */
37307 struct {
37308- atomic_t no_free_exch;
37309- atomic_t no_free_exch_xid;
37310- atomic_t xid_not_found;
37311- atomic_t xid_busy;
37312- atomic_t seq_not_found;
37313- atomic_t non_bls_resp;
37314+ atomic_unchecked_t no_free_exch;
37315+ atomic_unchecked_t no_free_exch_xid;
37316+ atomic_unchecked_t xid_not_found;
37317+ atomic_unchecked_t xid_busy;
37318+ atomic_unchecked_t seq_not_found;
37319+ atomic_unchecked_t non_bls_resp;
37320 } stats;
37321 };
37322 #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
37323@@ -510,7 +510,7 @@ static struct fc_exch *fc_exch_em_alloc(
37324 /* allocate memory for exchange */
37325 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
37326 if (!ep) {
37327- atomic_inc(&mp->stats.no_free_exch);
37328+ atomic_inc_unchecked(&mp->stats.no_free_exch);
37329 goto out;
37330 }
37331 memset(ep, 0, sizeof(*ep));
37332@@ -557,7 +557,7 @@ out:
37333 return ep;
37334 err:
37335 spin_unlock_bh(&pool->lock);
37336- atomic_inc(&mp->stats.no_free_exch_xid);
37337+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
37338 mempool_free(ep, mp->ep_pool);
37339 return NULL;
37340 }
37341@@ -690,7 +690,7 @@ static enum fc_pf_rjt_reason fc_seq_look
37342 xid = ntohs(fh->fh_ox_id); /* we originated exch */
37343 ep = fc_exch_find(mp, xid);
37344 if (!ep) {
37345- atomic_inc(&mp->stats.xid_not_found);
37346+ atomic_inc_unchecked(&mp->stats.xid_not_found);
37347 reject = FC_RJT_OX_ID;
37348 goto out;
37349 }
37350@@ -720,7 +720,7 @@ static enum fc_pf_rjt_reason fc_seq_look
37351 ep = fc_exch_find(mp, xid);
37352 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
37353 if (ep) {
37354- atomic_inc(&mp->stats.xid_busy);
37355+ atomic_inc_unchecked(&mp->stats.xid_busy);
37356 reject = FC_RJT_RX_ID;
37357 goto rel;
37358 }
37359@@ -731,7 +731,7 @@ static enum fc_pf_rjt_reason fc_seq_look
37360 }
37361 xid = ep->xid; /* get our XID */
37362 } else if (!ep) {
37363- atomic_inc(&mp->stats.xid_not_found);
37364+ atomic_inc_unchecked(&mp->stats.xid_not_found);
37365 reject = FC_RJT_RX_ID; /* XID not found */
37366 goto out;
37367 }
37368@@ -752,7 +752,7 @@ static enum fc_pf_rjt_reason fc_seq_look
37369 } else {
37370 sp = &ep->seq;
37371 if (sp->id != fh->fh_seq_id) {
37372- atomic_inc(&mp->stats.seq_not_found);
37373+ atomic_inc_unchecked(&mp->stats.seq_not_found);
37374 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
37375 goto rel;
37376 }
37377@@ -1163,22 +1163,22 @@ static void fc_exch_recv_seq_resp(struct
37378
37379 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
37380 if (!ep) {
37381- atomic_inc(&mp->stats.xid_not_found);
37382+ atomic_inc_unchecked(&mp->stats.xid_not_found);
37383 goto out;
37384 }
37385 if (ep->esb_stat & ESB_ST_COMPLETE) {
37386- atomic_inc(&mp->stats.xid_not_found);
37387+ atomic_inc_unchecked(&mp->stats.xid_not_found);
37388 goto out;
37389 }
37390 if (ep->rxid == FC_XID_UNKNOWN)
37391 ep->rxid = ntohs(fh->fh_rx_id);
37392 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
37393- atomic_inc(&mp->stats.xid_not_found);
37394+ atomic_inc_unchecked(&mp->stats.xid_not_found);
37395 goto rel;
37396 }
37397 if (ep->did != ntoh24(fh->fh_s_id) &&
37398 ep->did != FC_FID_FLOGI) {
37399- atomic_inc(&mp->stats.xid_not_found);
37400+ atomic_inc_unchecked(&mp->stats.xid_not_found);
37401 goto rel;
37402 }
37403 sof = fr_sof(fp);
37404@@ -1189,7 +1189,7 @@ static void fc_exch_recv_seq_resp(struct
37405 } else {
37406 sp = &ep->seq;
37407 if (sp->id != fh->fh_seq_id) {
37408- atomic_inc(&mp->stats.seq_not_found);
37409+ atomic_inc_unchecked(&mp->stats.seq_not_found);
37410 goto rel;
37411 }
37412 }
37413@@ -1249,9 +1249,9 @@ static void fc_exch_recv_resp(struct fc_
37414 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
37415
37416 if (!sp)
37417- atomic_inc(&mp->stats.xid_not_found);
37418+ atomic_inc_unchecked(&mp->stats.xid_not_found);
37419 else
37420- atomic_inc(&mp->stats.non_bls_resp);
37421+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
37422
37423 fc_frame_free(fp);
37424 }
37425diff -urNp linux-2.6.32.48/drivers/scsi/libsas/sas_ata.c linux-2.6.32.48/drivers/scsi/libsas/sas_ata.c
37426--- linux-2.6.32.48/drivers/scsi/libsas/sas_ata.c 2011-11-08 19:02:43.000000000 -0500
37427+++ linux-2.6.32.48/drivers/scsi/libsas/sas_ata.c 2011-11-15 19:59:43.000000000 -0500
37428@@ -343,7 +343,7 @@ static int sas_ata_scr_read(struct ata_l
37429 }
37430 }
37431
37432-static struct ata_port_operations sas_sata_ops = {
37433+static const struct ata_port_operations sas_sata_ops = {
37434 .phy_reset = sas_ata_phy_reset,
37435 .post_internal_cmd = sas_ata_post_internal,
37436 .qc_defer = ata_std_qc_defer,
37437diff -urNp linux-2.6.32.48/drivers/scsi/lpfc/lpfc_debugfs.c linux-2.6.32.48/drivers/scsi/lpfc/lpfc_debugfs.c
37438--- linux-2.6.32.48/drivers/scsi/lpfc/lpfc_debugfs.c 2011-11-08 19:02:43.000000000 -0500
37439+++ linux-2.6.32.48/drivers/scsi/lpfc/lpfc_debugfs.c 2011-11-15 19:59:43.000000000 -0500
37440@@ -124,7 +124,7 @@ struct lpfc_debug {
37441 int len;
37442 };
37443
37444-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
37445+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
37446 static unsigned long lpfc_debugfs_start_time = 0L;
37447
37448 /**
37449@@ -158,7 +158,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
37450 lpfc_debugfs_enable = 0;
37451
37452 len = 0;
37453- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
37454+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
37455 (lpfc_debugfs_max_disc_trc - 1);
37456 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
37457 dtp = vport->disc_trc + i;
37458@@ -219,7 +219,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
37459 lpfc_debugfs_enable = 0;
37460
37461 len = 0;
37462- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
37463+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
37464 (lpfc_debugfs_max_slow_ring_trc - 1);
37465 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
37466 dtp = phba->slow_ring_trc + i;
37467@@ -397,6 +397,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpf
37468 uint32_t *ptr;
37469 char buffer[1024];
37470
37471+ pax_track_stack();
37472+
37473 off = 0;
37474 spin_lock_irq(&phba->hbalock);
37475
37476@@ -634,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
37477 !vport || !vport->disc_trc)
37478 return;
37479
37480- index = atomic_inc_return(&vport->disc_trc_cnt) &
37481+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
37482 (lpfc_debugfs_max_disc_trc - 1);
37483 dtp = vport->disc_trc + index;
37484 dtp->fmt = fmt;
37485 dtp->data1 = data1;
37486 dtp->data2 = data2;
37487 dtp->data3 = data3;
37488- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
37489+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
37490 dtp->jif = jiffies;
37491 #endif
37492 return;
37493@@ -672,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
37494 !phba || !phba->slow_ring_trc)
37495 return;
37496
37497- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
37498+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
37499 (lpfc_debugfs_max_slow_ring_trc - 1);
37500 dtp = phba->slow_ring_trc + index;
37501 dtp->fmt = fmt;
37502 dtp->data1 = data1;
37503 dtp->data2 = data2;
37504 dtp->data3 = data3;
37505- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
37506+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
37507 dtp->jif = jiffies;
37508 #endif
37509 return;
37510@@ -1364,7 +1366,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
37511 "slow_ring buffer\n");
37512 goto debug_failed;
37513 }
37514- atomic_set(&phba->slow_ring_trc_cnt, 0);
37515+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
37516 memset(phba->slow_ring_trc, 0,
37517 (sizeof(struct lpfc_debugfs_trc) *
37518 lpfc_debugfs_max_slow_ring_trc));
37519@@ -1410,7 +1412,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
37520 "buffer\n");
37521 goto debug_failed;
37522 }
37523- atomic_set(&vport->disc_trc_cnt, 0);
37524+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
37525
37526 snprintf(name, sizeof(name), "discovery_trace");
37527 vport->debug_disc_trc =
37528diff -urNp linux-2.6.32.48/drivers/scsi/lpfc/lpfc.h linux-2.6.32.48/drivers/scsi/lpfc/lpfc.h
37529--- linux-2.6.32.48/drivers/scsi/lpfc/lpfc.h 2011-11-08 19:02:43.000000000 -0500
37530+++ linux-2.6.32.48/drivers/scsi/lpfc/lpfc.h 2011-11-15 19:59:43.000000000 -0500
37531@@ -400,7 +400,7 @@ struct lpfc_vport {
37532 struct dentry *debug_nodelist;
37533 struct dentry *vport_debugfs_root;
37534 struct lpfc_debugfs_trc *disc_trc;
37535- atomic_t disc_trc_cnt;
37536+ atomic_unchecked_t disc_trc_cnt;
37537 #endif
37538 uint8_t stat_data_enabled;
37539 uint8_t stat_data_blocked;
37540@@ -725,8 +725,8 @@ struct lpfc_hba {
37541 struct timer_list fabric_block_timer;
37542 unsigned long bit_flags;
37543 #define FABRIC_COMANDS_BLOCKED 0
37544- atomic_t num_rsrc_err;
37545- atomic_t num_cmd_success;
37546+ atomic_unchecked_t num_rsrc_err;
37547+ atomic_unchecked_t num_cmd_success;
37548 unsigned long last_rsrc_error_time;
37549 unsigned long last_ramp_down_time;
37550 unsigned long last_ramp_up_time;
37551@@ -740,7 +740,7 @@ struct lpfc_hba {
37552 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
37553 struct dentry *debug_slow_ring_trc;
37554 struct lpfc_debugfs_trc *slow_ring_trc;
37555- atomic_t slow_ring_trc_cnt;
37556+ atomic_unchecked_t slow_ring_trc_cnt;
37557 #endif
37558
37559 /* Used for deferred freeing of ELS data buffers */
37560diff -urNp linux-2.6.32.48/drivers/scsi/lpfc/lpfc_init.c linux-2.6.32.48/drivers/scsi/lpfc/lpfc_init.c
37561--- linux-2.6.32.48/drivers/scsi/lpfc/lpfc_init.c 2011-11-08 19:02:43.000000000 -0500
37562+++ linux-2.6.32.48/drivers/scsi/lpfc/lpfc_init.c 2011-11-15 19:59:43.000000000 -0500
37563@@ -8021,8 +8021,10 @@ lpfc_init(void)
37564 printk(LPFC_COPYRIGHT "\n");
37565
37566 if (lpfc_enable_npiv) {
37567- lpfc_transport_functions.vport_create = lpfc_vport_create;
37568- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
37569+ pax_open_kernel();
37570+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
37571+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
37572+ pax_close_kernel();
37573 }
37574 lpfc_transport_template =
37575 fc_attach_transport(&lpfc_transport_functions);
37576diff -urNp linux-2.6.32.48/drivers/scsi/lpfc/lpfc_scsi.c linux-2.6.32.48/drivers/scsi/lpfc/lpfc_scsi.c
37577--- linux-2.6.32.48/drivers/scsi/lpfc/lpfc_scsi.c 2011-11-08 19:02:43.000000000 -0500
37578+++ linux-2.6.32.48/drivers/scsi/lpfc/lpfc_scsi.c 2011-11-15 19:59:43.000000000 -0500
37579@@ -259,7 +259,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
37580 uint32_t evt_posted;
37581
37582 spin_lock_irqsave(&phba->hbalock, flags);
37583- atomic_inc(&phba->num_rsrc_err);
37584+ atomic_inc_unchecked(&phba->num_rsrc_err);
37585 phba->last_rsrc_error_time = jiffies;
37586
37587 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
37588@@ -300,7 +300,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
37589 unsigned long flags;
37590 struct lpfc_hba *phba = vport->phba;
37591 uint32_t evt_posted;
37592- atomic_inc(&phba->num_cmd_success);
37593+ atomic_inc_unchecked(&phba->num_cmd_success);
37594
37595 if (vport->cfg_lun_queue_depth <= queue_depth)
37596 return;
37597@@ -343,8 +343,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
37598 int i;
37599 struct lpfc_rport_data *rdata;
37600
37601- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
37602- num_cmd_success = atomic_read(&phba->num_cmd_success);
37603+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
37604+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
37605
37606 vports = lpfc_create_vport_work_array(phba);
37607 if (vports != NULL)
37608@@ -378,8 +378,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
37609 }
37610 }
37611 lpfc_destroy_vport_work_array(phba, vports);
37612- atomic_set(&phba->num_rsrc_err, 0);
37613- atomic_set(&phba->num_cmd_success, 0);
37614+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
37615+ atomic_set_unchecked(&phba->num_cmd_success, 0);
37616 }
37617
37618 /**
37619@@ -427,8 +427,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
37620 }
37621 }
37622 lpfc_destroy_vport_work_array(phba, vports);
37623- atomic_set(&phba->num_rsrc_err, 0);
37624- atomic_set(&phba->num_cmd_success, 0);
37625+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
37626+ atomic_set_unchecked(&phba->num_cmd_success, 0);
37627 }
37628
37629 /**
37630diff -urNp linux-2.6.32.48/drivers/scsi/megaraid/megaraid_mbox.c linux-2.6.32.48/drivers/scsi/megaraid/megaraid_mbox.c
37631--- linux-2.6.32.48/drivers/scsi/megaraid/megaraid_mbox.c 2011-11-08 19:02:43.000000000 -0500
37632+++ linux-2.6.32.48/drivers/scsi/megaraid/megaraid_mbox.c 2011-11-15 19:59:43.000000000 -0500
37633@@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter
37634 int rval;
37635 int i;
37636
37637+ pax_track_stack();
37638+
37639 // Allocate memory for the base list of scb for management module.
37640 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
37641
37642diff -urNp linux-2.6.32.48/drivers/scsi/osd/osd_initiator.c linux-2.6.32.48/drivers/scsi/osd/osd_initiator.c
37643--- linux-2.6.32.48/drivers/scsi/osd/osd_initiator.c 2011-11-08 19:02:43.000000000 -0500
37644+++ linux-2.6.32.48/drivers/scsi/osd/osd_initiator.c 2011-11-15 19:59:43.000000000 -0500
37645@@ -94,6 +94,8 @@ static int _osd_print_system_info(struct
37646 int nelem = ARRAY_SIZE(get_attrs), a = 0;
37647 int ret;
37648
37649+ pax_track_stack();
37650+
37651 or = osd_start_request(od, GFP_KERNEL);
37652 if (!or)
37653 return -ENOMEM;
37654diff -urNp linux-2.6.32.48/drivers/scsi/pmcraid.c linux-2.6.32.48/drivers/scsi/pmcraid.c
37655--- linux-2.6.32.48/drivers/scsi/pmcraid.c 2011-11-08 19:02:43.000000000 -0500
37656+++ linux-2.6.32.48/drivers/scsi/pmcraid.c 2011-11-15 19:59:43.000000000 -0500
37657@@ -189,8 +189,8 @@ static int pmcraid_slave_alloc(struct sc
37658 res->scsi_dev = scsi_dev;
37659 scsi_dev->hostdata = res;
37660 res->change_detected = 0;
37661- atomic_set(&res->read_failures, 0);
37662- atomic_set(&res->write_failures, 0);
37663+ atomic_set_unchecked(&res->read_failures, 0);
37664+ atomic_set_unchecked(&res->write_failures, 0);
37665 rc = 0;
37666 }
37667 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
37668@@ -2396,9 +2396,9 @@ static int pmcraid_error_handler(struct
37669
37670 /* If this was a SCSI read/write command keep count of errors */
37671 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
37672- atomic_inc(&res->read_failures);
37673+ atomic_inc_unchecked(&res->read_failures);
37674 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
37675- atomic_inc(&res->write_failures);
37676+ atomic_inc_unchecked(&res->write_failures);
37677
37678 if (!RES_IS_GSCSI(res->cfg_entry) &&
37679 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
37680@@ -4116,7 +4116,7 @@ static void pmcraid_worker_function(stru
37681
37682 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
37683 /* add resources only after host is added into system */
37684- if (!atomic_read(&pinstance->expose_resources))
37685+ if (!atomic_read_unchecked(&pinstance->expose_resources))
37686 return;
37687
37688 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
37689@@ -4850,7 +4850,7 @@ static int __devinit pmcraid_init_instan
37690 init_waitqueue_head(&pinstance->reset_wait_q);
37691
37692 atomic_set(&pinstance->outstanding_cmds, 0);
37693- atomic_set(&pinstance->expose_resources, 0);
37694+ atomic_set_unchecked(&pinstance->expose_resources, 0);
37695
37696 INIT_LIST_HEAD(&pinstance->free_res_q);
37697 INIT_LIST_HEAD(&pinstance->used_res_q);
37698@@ -5502,7 +5502,7 @@ static int __devinit pmcraid_probe(
37699 /* Schedule worker thread to handle CCN and take care of adding and
37700 * removing devices to OS
37701 */
37702- atomic_set(&pinstance->expose_resources, 1);
37703+ atomic_set_unchecked(&pinstance->expose_resources, 1);
37704 schedule_work(&pinstance->worker_q);
37705 return rc;
37706
37707diff -urNp linux-2.6.32.48/drivers/scsi/pmcraid.h linux-2.6.32.48/drivers/scsi/pmcraid.h
37708--- linux-2.6.32.48/drivers/scsi/pmcraid.h 2011-11-08 19:02:43.000000000 -0500
37709+++ linux-2.6.32.48/drivers/scsi/pmcraid.h 2011-11-15 19:59:43.000000000 -0500
37710@@ -690,7 +690,7 @@ struct pmcraid_instance {
37711 atomic_t outstanding_cmds;
37712
37713 /* should add/delete resources to mid-layer now ?*/
37714- atomic_t expose_resources;
37715+ atomic_unchecked_t expose_resources;
37716
37717 /* Tasklet to handle deferred processing */
37718 struct tasklet_struct isr_tasklet[PMCRAID_NUM_MSIX_VECTORS];
37719@@ -727,8 +727,8 @@ struct pmcraid_resource_entry {
37720 struct list_head queue; /* link to "to be exposed" resources */
37721 struct pmcraid_config_table_entry cfg_entry;
37722 struct scsi_device *scsi_dev; /* Link scsi_device structure */
37723- atomic_t read_failures; /* count of failed READ commands */
37724- atomic_t write_failures; /* count of failed WRITE commands */
37725+ atomic_unchecked_t read_failures; /* count of failed READ commands */
37726+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
37727
37728 /* To indicate add/delete/modify during CCN */
37729 u8 change_detected;
37730diff -urNp linux-2.6.32.48/drivers/scsi/qla2xxx/qla_def.h linux-2.6.32.48/drivers/scsi/qla2xxx/qla_def.h
37731--- linux-2.6.32.48/drivers/scsi/qla2xxx/qla_def.h 2011-11-08 19:02:43.000000000 -0500
37732+++ linux-2.6.32.48/drivers/scsi/qla2xxx/qla_def.h 2011-11-15 19:59:43.000000000 -0500
37733@@ -2089,7 +2089,7 @@ struct isp_operations {
37734
37735 int (*get_flash_version) (struct scsi_qla_host *, void *);
37736 int (*start_scsi) (srb_t *);
37737-};
37738+} __no_const;
37739
37740 /* MSI-X Support *************************************************************/
37741
37742diff -urNp linux-2.6.32.48/drivers/scsi/qla4xxx/ql4_def.h linux-2.6.32.48/drivers/scsi/qla4xxx/ql4_def.h
37743--- linux-2.6.32.48/drivers/scsi/qla4xxx/ql4_def.h 2011-11-08 19:02:43.000000000 -0500
37744+++ linux-2.6.32.48/drivers/scsi/qla4xxx/ql4_def.h 2011-11-15 19:59:43.000000000 -0500
37745@@ -240,7 +240,7 @@ struct ddb_entry {
37746 atomic_t retry_relogin_timer; /* Min Time between relogins
37747 * (4000 only) */
37748 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
37749- atomic_t relogin_retry_count; /* Num of times relogin has been
37750+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
37751 * retried */
37752
37753 uint16_t port;
37754diff -urNp linux-2.6.32.48/drivers/scsi/qla4xxx/ql4_init.c linux-2.6.32.48/drivers/scsi/qla4xxx/ql4_init.c
37755--- linux-2.6.32.48/drivers/scsi/qla4xxx/ql4_init.c 2011-11-08 19:02:43.000000000 -0500
37756+++ linux-2.6.32.48/drivers/scsi/qla4xxx/ql4_init.c 2011-11-15 19:59:43.000000000 -0500
37757@@ -482,7 +482,7 @@ static struct ddb_entry * qla4xxx_alloc_
37758 atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count);
37759 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
37760 atomic_set(&ddb_entry->relogin_timer, 0);
37761- atomic_set(&ddb_entry->relogin_retry_count, 0);
37762+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
37763 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
37764 list_add_tail(&ddb_entry->list, &ha->ddb_list);
37765 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
37766@@ -1308,7 +1308,7 @@ int qla4xxx_process_ddb_changed(struct s
37767 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
37768 atomic_set(&ddb_entry->port_down_timer,
37769 ha->port_down_retry_count);
37770- atomic_set(&ddb_entry->relogin_retry_count, 0);
37771+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
37772 atomic_set(&ddb_entry->relogin_timer, 0);
37773 clear_bit(DF_RELOGIN, &ddb_entry->flags);
37774 clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
37775diff -urNp linux-2.6.32.48/drivers/scsi/qla4xxx/ql4_os.c linux-2.6.32.48/drivers/scsi/qla4xxx/ql4_os.c
37776--- linux-2.6.32.48/drivers/scsi/qla4xxx/ql4_os.c 2011-11-08 19:02:43.000000000 -0500
37777+++ linux-2.6.32.48/drivers/scsi/qla4xxx/ql4_os.c 2011-11-15 19:59:43.000000000 -0500
37778@@ -641,13 +641,13 @@ static void qla4xxx_timer(struct scsi_ql
37779 ddb_entry->fw_ddb_device_state ==
37780 DDB_DS_SESSION_FAILED) {
37781 /* Reset retry relogin timer */
37782- atomic_inc(&ddb_entry->relogin_retry_count);
37783+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
37784 DEBUG2(printk("scsi%ld: index[%d] relogin"
37785 " timed out-retrying"
37786 " relogin (%d)\n",
37787 ha->host_no,
37788 ddb_entry->fw_ddb_index,
37789- atomic_read(&ddb_entry->
37790+ atomic_read_unchecked(&ddb_entry->
37791 relogin_retry_count))
37792 );
37793 start_dpc++;
37794diff -urNp linux-2.6.32.48/drivers/scsi/scsi.c linux-2.6.32.48/drivers/scsi/scsi.c
37795--- linux-2.6.32.48/drivers/scsi/scsi.c 2011-11-08 19:02:43.000000000 -0500
37796+++ linux-2.6.32.48/drivers/scsi/scsi.c 2011-11-15 19:59:43.000000000 -0500
37797@@ -652,7 +652,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
37798 unsigned long timeout;
37799 int rtn = 0;
37800
37801- atomic_inc(&cmd->device->iorequest_cnt);
37802+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
37803
37804 /* check if the device is still usable */
37805 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
37806diff -urNp linux-2.6.32.48/drivers/scsi/scsi_debug.c linux-2.6.32.48/drivers/scsi/scsi_debug.c
37807--- linux-2.6.32.48/drivers/scsi/scsi_debug.c 2011-11-08 19:02:43.000000000 -0500
37808+++ linux-2.6.32.48/drivers/scsi/scsi_debug.c 2011-11-15 19:59:43.000000000 -0500
37809@@ -1395,6 +1395,8 @@ static int resp_mode_select(struct scsi_
37810 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
37811 unsigned char *cmd = (unsigned char *)scp->cmnd;
37812
37813+ pax_track_stack();
37814+
37815 if ((errsts = check_readiness(scp, 1, devip)))
37816 return errsts;
37817 memset(arr, 0, sizeof(arr));
37818@@ -1492,6 +1494,8 @@ static int resp_log_sense(struct scsi_cm
37819 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
37820 unsigned char *cmd = (unsigned char *)scp->cmnd;
37821
37822+ pax_track_stack();
37823+
37824 if ((errsts = check_readiness(scp, 1, devip)))
37825 return errsts;
37826 memset(arr, 0, sizeof(arr));
37827diff -urNp linux-2.6.32.48/drivers/scsi/scsi_lib.c linux-2.6.32.48/drivers/scsi/scsi_lib.c
37828--- linux-2.6.32.48/drivers/scsi/scsi_lib.c 2011-11-08 19:02:43.000000000 -0500
37829+++ linux-2.6.32.48/drivers/scsi/scsi_lib.c 2011-11-15 19:59:43.000000000 -0500
37830@@ -1384,7 +1384,7 @@ static void scsi_kill_request(struct req
37831
37832 scsi_init_cmd_errh(cmd);
37833 cmd->result = DID_NO_CONNECT << 16;
37834- atomic_inc(&cmd->device->iorequest_cnt);
37835+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
37836
37837 /*
37838 * SCSI request completion path will do scsi_device_unbusy(),
37839@@ -1415,9 +1415,9 @@ static void scsi_softirq_done(struct req
37840 */
37841 cmd->serial_number = 0;
37842
37843- atomic_inc(&cmd->device->iodone_cnt);
37844+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
37845 if (cmd->result)
37846- atomic_inc(&cmd->device->ioerr_cnt);
37847+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
37848
37849 disposition = scsi_decide_disposition(cmd);
37850 if (disposition != SUCCESS &&
37851diff -urNp linux-2.6.32.48/drivers/scsi/scsi_sysfs.c linux-2.6.32.48/drivers/scsi/scsi_sysfs.c
37852--- linux-2.6.32.48/drivers/scsi/scsi_sysfs.c 2011-11-08 19:02:43.000000000 -0500
37853+++ linux-2.6.32.48/drivers/scsi/scsi_sysfs.c 2011-11-15 19:59:43.000000000 -0500
37854@@ -662,7 +662,7 @@ show_iostat_##field(struct device *dev,
37855 char *buf) \
37856 { \
37857 struct scsi_device *sdev = to_scsi_device(dev); \
37858- unsigned long long count = atomic_read(&sdev->field); \
37859+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
37860 return snprintf(buf, 20, "0x%llx\n", count); \
37861 } \
37862 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
37863diff -urNp linux-2.6.32.48/drivers/scsi/scsi_tgt_lib.c linux-2.6.32.48/drivers/scsi/scsi_tgt_lib.c
37864--- linux-2.6.32.48/drivers/scsi/scsi_tgt_lib.c 2011-11-08 19:02:43.000000000 -0500
37865+++ linux-2.6.32.48/drivers/scsi/scsi_tgt_lib.c 2011-11-15 19:59:43.000000000 -0500
37866@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct sc
37867 int err;
37868
37869 dprintk("%lx %u\n", uaddr, len);
37870- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
37871+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
37872 if (err) {
37873 /*
37874 * TODO: need to fixup sg_tablesize, max_segment_size,
37875diff -urNp linux-2.6.32.48/drivers/scsi/scsi_transport_fc.c linux-2.6.32.48/drivers/scsi/scsi_transport_fc.c
37876--- linux-2.6.32.48/drivers/scsi/scsi_transport_fc.c 2011-11-08 19:02:43.000000000 -0500
37877+++ linux-2.6.32.48/drivers/scsi/scsi_transport_fc.c 2011-11-15 19:59:43.000000000 -0500
37878@@ -480,7 +480,7 @@ MODULE_PARM_DESC(dev_loss_tmo,
37879 * Netlink Infrastructure
37880 */
37881
37882-static atomic_t fc_event_seq;
37883+static atomic_unchecked_t fc_event_seq;
37884
37885 /**
37886 * fc_get_event_number - Obtain the next sequential FC event number
37887@@ -493,7 +493,7 @@ static atomic_t fc_event_seq;
37888 u32
37889 fc_get_event_number(void)
37890 {
37891- return atomic_add_return(1, &fc_event_seq);
37892+ return atomic_add_return_unchecked(1, &fc_event_seq);
37893 }
37894 EXPORT_SYMBOL(fc_get_event_number);
37895
37896@@ -641,7 +641,7 @@ static __init int fc_transport_init(void
37897 {
37898 int error;
37899
37900- atomic_set(&fc_event_seq, 0);
37901+ atomic_set_unchecked(&fc_event_seq, 0);
37902
37903 error = transport_class_register(&fc_host_class);
37904 if (error)
37905diff -urNp linux-2.6.32.48/drivers/scsi/scsi_transport_iscsi.c linux-2.6.32.48/drivers/scsi/scsi_transport_iscsi.c
37906--- linux-2.6.32.48/drivers/scsi/scsi_transport_iscsi.c 2011-11-08 19:02:43.000000000 -0500
37907+++ linux-2.6.32.48/drivers/scsi/scsi_transport_iscsi.c 2011-11-15 19:59:43.000000000 -0500
37908@@ -81,7 +81,7 @@ struct iscsi_internal {
37909 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
37910 };
37911
37912-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
37913+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
37914 static struct workqueue_struct *iscsi_eh_timer_workq;
37915
37916 /*
37917@@ -728,7 +728,7 @@ int iscsi_add_session(struct iscsi_cls_s
37918 int err;
37919
37920 ihost = shost->shost_data;
37921- session->sid = atomic_add_return(1, &iscsi_session_nr);
37922+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
37923
37924 if (id == ISCSI_MAX_TARGET) {
37925 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
37926@@ -2060,7 +2060,7 @@ static __init int iscsi_transport_init(v
37927 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
37928 ISCSI_TRANSPORT_VERSION);
37929
37930- atomic_set(&iscsi_session_nr, 0);
37931+ atomic_set_unchecked(&iscsi_session_nr, 0);
37932
37933 err = class_register(&iscsi_transport_class);
37934 if (err)
37935diff -urNp linux-2.6.32.48/drivers/scsi/scsi_transport_srp.c linux-2.6.32.48/drivers/scsi/scsi_transport_srp.c
37936--- linux-2.6.32.48/drivers/scsi/scsi_transport_srp.c 2011-11-08 19:02:43.000000000 -0500
37937+++ linux-2.6.32.48/drivers/scsi/scsi_transport_srp.c 2011-11-15 19:59:43.000000000 -0500
37938@@ -33,7 +33,7 @@
37939 #include "scsi_transport_srp_internal.h"
37940
37941 struct srp_host_attrs {
37942- atomic_t next_port_id;
37943+ atomic_unchecked_t next_port_id;
37944 };
37945 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
37946
37947@@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
37948 struct Scsi_Host *shost = dev_to_shost(dev);
37949 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
37950
37951- atomic_set(&srp_host->next_port_id, 0);
37952+ atomic_set_unchecked(&srp_host->next_port_id, 0);
37953 return 0;
37954 }
37955
37956@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
37957 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
37958 rport->roles = ids->roles;
37959
37960- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
37961+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
37962 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
37963
37964 transport_setup_device(&rport->dev);
37965diff -urNp linux-2.6.32.48/drivers/scsi/sg.c linux-2.6.32.48/drivers/scsi/sg.c
37966--- linux-2.6.32.48/drivers/scsi/sg.c 2011-11-08 19:02:43.000000000 -0500
37967+++ linux-2.6.32.48/drivers/scsi/sg.c 2011-11-15 19:59:43.000000000 -0500
37968@@ -1064,7 +1064,7 @@ sg_ioctl(struct inode *inode, struct fil
37969 sdp->disk->disk_name,
37970 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
37971 NULL,
37972- (char *)arg);
37973+ (char __user *)arg);
37974 case BLKTRACESTART:
37975 return blk_trace_startstop(sdp->device->request_queue, 1);
37976 case BLKTRACESTOP:
37977@@ -2292,7 +2292,7 @@ struct sg_proc_leaf {
37978 const struct file_operations * fops;
37979 };
37980
37981-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
37982+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
37983 {"allow_dio", &adio_fops},
37984 {"debug", &debug_fops},
37985 {"def_reserved_size", &dressz_fops},
37986@@ -2307,7 +2307,7 @@ sg_proc_init(void)
37987 {
37988 int k, mask;
37989 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
37990- struct sg_proc_leaf * leaf;
37991+ const struct sg_proc_leaf * leaf;
37992
37993 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
37994 if (!sg_proc_sgp)
37995diff -urNp linux-2.6.32.48/drivers/scsi/sym53c8xx_2/sym_glue.c linux-2.6.32.48/drivers/scsi/sym53c8xx_2/sym_glue.c
37996--- linux-2.6.32.48/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-11-08 19:02:43.000000000 -0500
37997+++ linux-2.6.32.48/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-11-15 19:59:43.000000000 -0500
37998@@ -1754,6 +1754,8 @@ static int __devinit sym2_probe(struct p
37999 int do_iounmap = 0;
38000 int do_disable_device = 1;
38001
38002+ pax_track_stack();
38003+
38004 memset(&sym_dev, 0, sizeof(sym_dev));
38005 memset(&nvram, 0, sizeof(nvram));
38006 sym_dev.pdev = pdev;
38007diff -urNp linux-2.6.32.48/drivers/serial/kgdboc.c linux-2.6.32.48/drivers/serial/kgdboc.c
38008--- linux-2.6.32.48/drivers/serial/kgdboc.c 2011-11-08 19:02:43.000000000 -0500
38009+++ linux-2.6.32.48/drivers/serial/kgdboc.c 2011-11-15 19:59:43.000000000 -0500
38010@@ -18,7 +18,7 @@
38011
38012 #define MAX_CONFIG_LEN 40
38013
38014-static struct kgdb_io kgdboc_io_ops;
38015+static const struct kgdb_io kgdboc_io_ops;
38016
38017 /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
38018 static int configured = -1;
38019@@ -154,7 +154,7 @@ static void kgdboc_post_exp_handler(void
38020 module_put(THIS_MODULE);
38021 }
38022
38023-static struct kgdb_io kgdboc_io_ops = {
38024+static const struct kgdb_io kgdboc_io_ops = {
38025 .name = "kgdboc",
38026 .read_char = kgdboc_get_char,
38027 .write_char = kgdboc_put_char,
38028diff -urNp linux-2.6.32.48/drivers/spi/spi.c linux-2.6.32.48/drivers/spi/spi.c
38029--- linux-2.6.32.48/drivers/spi/spi.c 2011-11-08 19:02:43.000000000 -0500
38030+++ linux-2.6.32.48/drivers/spi/spi.c 2011-11-15 19:59:43.000000000 -0500
38031@@ -774,7 +774,7 @@ int spi_sync(struct spi_device *spi, str
38032 EXPORT_SYMBOL_GPL(spi_sync);
38033
38034 /* portable code must never pass more than 32 bytes */
38035-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
38036+#define SPI_BUFSIZ max(32U,SMP_CACHE_BYTES)
38037
38038 static u8 *buf;
38039
38040diff -urNp linux-2.6.32.48/drivers/staging/android/binder.c linux-2.6.32.48/drivers/staging/android/binder.c
38041--- linux-2.6.32.48/drivers/staging/android/binder.c 2011-11-08 19:02:43.000000000 -0500
38042+++ linux-2.6.32.48/drivers/staging/android/binder.c 2011-11-15 19:59:43.000000000 -0500
38043@@ -2756,7 +2756,7 @@ static void binder_vma_close(struct vm_a
38044 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
38045 }
38046
38047-static struct vm_operations_struct binder_vm_ops = {
38048+static const struct vm_operations_struct binder_vm_ops = {
38049 .open = binder_vma_open,
38050 .close = binder_vma_close,
38051 };
38052diff -urNp linux-2.6.32.48/drivers/staging/b3dfg/b3dfg.c linux-2.6.32.48/drivers/staging/b3dfg/b3dfg.c
38053--- linux-2.6.32.48/drivers/staging/b3dfg/b3dfg.c 2011-11-08 19:02:43.000000000 -0500
38054+++ linux-2.6.32.48/drivers/staging/b3dfg/b3dfg.c 2011-11-15 19:59:43.000000000 -0500
38055@@ -455,7 +455,7 @@ static int b3dfg_vma_fault(struct vm_are
38056 return VM_FAULT_NOPAGE;
38057 }
38058
38059-static struct vm_operations_struct b3dfg_vm_ops = {
38060+static const struct vm_operations_struct b3dfg_vm_ops = {
38061 .fault = b3dfg_vma_fault,
38062 };
38063
38064@@ -848,7 +848,7 @@ static int b3dfg_mmap(struct file *filp,
38065 return r;
38066 }
38067
38068-static struct file_operations b3dfg_fops = {
38069+static const struct file_operations b3dfg_fops = {
38070 .owner = THIS_MODULE,
38071 .open = b3dfg_open,
38072 .release = b3dfg_release,
38073diff -urNp linux-2.6.32.48/drivers/staging/comedi/comedi_fops.c linux-2.6.32.48/drivers/staging/comedi/comedi_fops.c
38074--- linux-2.6.32.48/drivers/staging/comedi/comedi_fops.c 2011-11-08 19:02:43.000000000 -0500
38075+++ linux-2.6.32.48/drivers/staging/comedi/comedi_fops.c 2011-11-15 19:59:43.000000000 -0500
38076@@ -1389,7 +1389,7 @@ void comedi_unmap(struct vm_area_struct
38077 mutex_unlock(&dev->mutex);
38078 }
38079
38080-static struct vm_operations_struct comedi_vm_ops = {
38081+static const struct vm_operations_struct comedi_vm_ops = {
38082 .close = comedi_unmap,
38083 };
38084
38085diff -urNp linux-2.6.32.48/drivers/staging/dream/qdsp5/adsp_driver.c linux-2.6.32.48/drivers/staging/dream/qdsp5/adsp_driver.c
38086--- linux-2.6.32.48/drivers/staging/dream/qdsp5/adsp_driver.c 2011-11-08 19:02:43.000000000 -0500
38087+++ linux-2.6.32.48/drivers/staging/dream/qdsp5/adsp_driver.c 2011-11-15 19:59:43.000000000 -0500
38088@@ -576,7 +576,7 @@ static struct adsp_device *inode_to_devi
38089 static dev_t adsp_devno;
38090 static struct class *adsp_class;
38091
38092-static struct file_operations adsp_fops = {
38093+static const struct file_operations adsp_fops = {
38094 .owner = THIS_MODULE,
38095 .open = adsp_open,
38096 .unlocked_ioctl = adsp_ioctl,
38097diff -urNp linux-2.6.32.48/drivers/staging/dream/qdsp5/audio_aac.c linux-2.6.32.48/drivers/staging/dream/qdsp5/audio_aac.c
38098--- linux-2.6.32.48/drivers/staging/dream/qdsp5/audio_aac.c 2011-11-08 19:02:43.000000000 -0500
38099+++ linux-2.6.32.48/drivers/staging/dream/qdsp5/audio_aac.c 2011-11-15 19:59:43.000000000 -0500
38100@@ -1022,7 +1022,7 @@ done:
38101 return rc;
38102 }
38103
38104-static struct file_operations audio_aac_fops = {
38105+static const struct file_operations audio_aac_fops = {
38106 .owner = THIS_MODULE,
38107 .open = audio_open,
38108 .release = audio_release,
38109diff -urNp linux-2.6.32.48/drivers/staging/dream/qdsp5/audio_amrnb.c linux-2.6.32.48/drivers/staging/dream/qdsp5/audio_amrnb.c
38110--- linux-2.6.32.48/drivers/staging/dream/qdsp5/audio_amrnb.c 2011-11-08 19:02:43.000000000 -0500
38111+++ linux-2.6.32.48/drivers/staging/dream/qdsp5/audio_amrnb.c 2011-11-15 19:59:43.000000000 -0500
38112@@ -833,7 +833,7 @@ done:
38113 return rc;
38114 }
38115
38116-static struct file_operations audio_amrnb_fops = {
38117+static const struct file_operations audio_amrnb_fops = {
38118 .owner = THIS_MODULE,
38119 .open = audamrnb_open,
38120 .release = audamrnb_release,
38121diff -urNp linux-2.6.32.48/drivers/staging/dream/qdsp5/audio_evrc.c linux-2.6.32.48/drivers/staging/dream/qdsp5/audio_evrc.c
38122--- linux-2.6.32.48/drivers/staging/dream/qdsp5/audio_evrc.c 2011-11-08 19:02:43.000000000 -0500
38123+++ linux-2.6.32.48/drivers/staging/dream/qdsp5/audio_evrc.c 2011-11-15 19:59:43.000000000 -0500
38124@@ -805,7 +805,7 @@ dma_fail:
38125 return rc;
38126 }
38127
38128-static struct file_operations audio_evrc_fops = {
38129+static const struct file_operations audio_evrc_fops = {
38130 .owner = THIS_MODULE,
38131 .open = audevrc_open,
38132 .release = audevrc_release,
38133diff -urNp linux-2.6.32.48/drivers/staging/dream/qdsp5/audio_in.c linux-2.6.32.48/drivers/staging/dream/qdsp5/audio_in.c
38134--- linux-2.6.32.48/drivers/staging/dream/qdsp5/audio_in.c 2011-11-08 19:02:43.000000000 -0500
38135+++ linux-2.6.32.48/drivers/staging/dream/qdsp5/audio_in.c 2011-11-15 19:59:43.000000000 -0500
38136@@ -913,7 +913,7 @@ static int audpre_open(struct inode *ino
38137 return 0;
38138 }
38139
38140-static struct file_operations audio_fops = {
38141+static const struct file_operations audio_fops = {
38142 .owner = THIS_MODULE,
38143 .open = audio_in_open,
38144 .release = audio_in_release,
38145@@ -922,7 +922,7 @@ static struct file_operations audio_fops
38146 .unlocked_ioctl = audio_in_ioctl,
38147 };
38148
38149-static struct file_operations audpre_fops = {
38150+static const struct file_operations audpre_fops = {
38151 .owner = THIS_MODULE,
38152 .open = audpre_open,
38153 .unlocked_ioctl = audpre_ioctl,
38154diff -urNp linux-2.6.32.48/drivers/staging/dream/qdsp5/audio_mp3.c linux-2.6.32.48/drivers/staging/dream/qdsp5/audio_mp3.c
38155--- linux-2.6.32.48/drivers/staging/dream/qdsp5/audio_mp3.c 2011-11-08 19:02:43.000000000 -0500
38156+++ linux-2.6.32.48/drivers/staging/dream/qdsp5/audio_mp3.c 2011-11-15 19:59:43.000000000 -0500
38157@@ -941,7 +941,7 @@ done:
38158 return rc;
38159 }
38160
38161-static struct file_operations audio_mp3_fops = {
38162+static const struct file_operations audio_mp3_fops = {
38163 .owner = THIS_MODULE,
38164 .open = audio_open,
38165 .release = audio_release,
38166diff -urNp linux-2.6.32.48/drivers/staging/dream/qdsp5/audio_out.c linux-2.6.32.48/drivers/staging/dream/qdsp5/audio_out.c
38167--- linux-2.6.32.48/drivers/staging/dream/qdsp5/audio_out.c 2011-11-08 19:02:43.000000000 -0500
38168+++ linux-2.6.32.48/drivers/staging/dream/qdsp5/audio_out.c 2011-11-15 19:59:43.000000000 -0500
38169@@ -810,7 +810,7 @@ static int audpp_open(struct inode *inod
38170 return 0;
38171 }
38172
38173-static struct file_operations audio_fops = {
38174+static const struct file_operations audio_fops = {
38175 .owner = THIS_MODULE,
38176 .open = audio_open,
38177 .release = audio_release,
38178@@ -819,7 +819,7 @@ static struct file_operations audio_fops
38179 .unlocked_ioctl = audio_ioctl,
38180 };
38181
38182-static struct file_operations audpp_fops = {
38183+static const struct file_operations audpp_fops = {
38184 .owner = THIS_MODULE,
38185 .open = audpp_open,
38186 .unlocked_ioctl = audpp_ioctl,
38187diff -urNp linux-2.6.32.48/drivers/staging/dream/qdsp5/audio_qcelp.c linux-2.6.32.48/drivers/staging/dream/qdsp5/audio_qcelp.c
38188--- linux-2.6.32.48/drivers/staging/dream/qdsp5/audio_qcelp.c 2011-11-08 19:02:43.000000000 -0500
38189+++ linux-2.6.32.48/drivers/staging/dream/qdsp5/audio_qcelp.c 2011-11-15 19:59:43.000000000 -0500
38190@@ -816,7 +816,7 @@ err:
38191 return rc;
38192 }
38193
38194-static struct file_operations audio_qcelp_fops = {
38195+static const struct file_operations audio_qcelp_fops = {
38196 .owner = THIS_MODULE,
38197 .open = audqcelp_open,
38198 .release = audqcelp_release,
38199diff -urNp linux-2.6.32.48/drivers/staging/dream/qdsp5/snd.c linux-2.6.32.48/drivers/staging/dream/qdsp5/snd.c
38200--- linux-2.6.32.48/drivers/staging/dream/qdsp5/snd.c 2011-11-08 19:02:43.000000000 -0500
38201+++ linux-2.6.32.48/drivers/staging/dream/qdsp5/snd.c 2011-11-15 19:59:43.000000000 -0500
38202@@ -242,7 +242,7 @@ err:
38203 return rc;
38204 }
38205
38206-static struct file_operations snd_fops = {
38207+static const struct file_operations snd_fops = {
38208 .owner = THIS_MODULE,
38209 .open = snd_open,
38210 .release = snd_release,
38211diff -urNp linux-2.6.32.48/drivers/staging/dream/smd/smd_qmi.c linux-2.6.32.48/drivers/staging/dream/smd/smd_qmi.c
38212--- linux-2.6.32.48/drivers/staging/dream/smd/smd_qmi.c 2011-11-08 19:02:43.000000000 -0500
38213+++ linux-2.6.32.48/drivers/staging/dream/smd/smd_qmi.c 2011-11-15 19:59:43.000000000 -0500
38214@@ -793,7 +793,7 @@ static int qmi_release(struct inode *ip,
38215 return 0;
38216 }
38217
38218-static struct file_operations qmi_fops = {
38219+static const struct file_operations qmi_fops = {
38220 .owner = THIS_MODULE,
38221 .read = qmi_read,
38222 .write = qmi_write,
38223diff -urNp linux-2.6.32.48/drivers/staging/dream/smd/smd_rpcrouter_device.c linux-2.6.32.48/drivers/staging/dream/smd/smd_rpcrouter_device.c
38224--- linux-2.6.32.48/drivers/staging/dream/smd/smd_rpcrouter_device.c 2011-11-08 19:02:43.000000000 -0500
38225+++ linux-2.6.32.48/drivers/staging/dream/smd/smd_rpcrouter_device.c 2011-11-15 19:59:43.000000000 -0500
38226@@ -214,7 +214,7 @@ static long rpcrouter_ioctl(struct file
38227 return rc;
38228 }
38229
38230-static struct file_operations rpcrouter_server_fops = {
38231+static const struct file_operations rpcrouter_server_fops = {
38232 .owner = THIS_MODULE,
38233 .open = rpcrouter_open,
38234 .release = rpcrouter_release,
38235@@ -224,7 +224,7 @@ static struct file_operations rpcrouter_
38236 .unlocked_ioctl = rpcrouter_ioctl,
38237 };
38238
38239-static struct file_operations rpcrouter_router_fops = {
38240+static const struct file_operations rpcrouter_router_fops = {
38241 .owner = THIS_MODULE,
38242 .open = rpcrouter_open,
38243 .release = rpcrouter_release,
38244diff -urNp linux-2.6.32.48/drivers/staging/dst/dcore.c linux-2.6.32.48/drivers/staging/dst/dcore.c
38245--- linux-2.6.32.48/drivers/staging/dst/dcore.c 2011-11-08 19:02:43.000000000 -0500
38246+++ linux-2.6.32.48/drivers/staging/dst/dcore.c 2011-11-15 19:59:43.000000000 -0500
38247@@ -149,7 +149,7 @@ static int dst_bdev_release(struct gendi
38248 return 0;
38249 }
38250
38251-static struct block_device_operations dst_blk_ops = {
38252+static const struct block_device_operations dst_blk_ops = {
38253 .open = dst_bdev_open,
38254 .release = dst_bdev_release,
38255 .owner = THIS_MODULE,
38256@@ -588,7 +588,7 @@ static struct dst_node *dst_alloc_node(s
38257 n->size = ctl->size;
38258
38259 atomic_set(&n->refcnt, 1);
38260- atomic_long_set(&n->gen, 0);
38261+ atomic_long_set_unchecked(&n->gen, 0);
38262 snprintf(n->name, sizeof(n->name), "%s", ctl->name);
38263
38264 err = dst_node_sysfs_init(n);
38265diff -urNp linux-2.6.32.48/drivers/staging/dst/trans.c linux-2.6.32.48/drivers/staging/dst/trans.c
38266--- linux-2.6.32.48/drivers/staging/dst/trans.c 2011-11-08 19:02:43.000000000 -0500
38267+++ linux-2.6.32.48/drivers/staging/dst/trans.c 2011-11-15 19:59:43.000000000 -0500
38268@@ -169,7 +169,7 @@ int dst_process_bio(struct dst_node *n,
38269 t->error = 0;
38270 t->retries = 0;
38271 atomic_set(&t->refcnt, 1);
38272- t->gen = atomic_long_inc_return(&n->gen);
38273+ t->gen = atomic_long_inc_return_unchecked(&n->gen);
38274
38275 t->enc = bio_data_dir(bio);
38276 dst_bio_to_cmd(bio, &t->cmd, DST_IO, t->gen);
38277diff -urNp linux-2.6.32.48/drivers/staging/et131x/et1310_tx.c linux-2.6.32.48/drivers/staging/et131x/et1310_tx.c
38278--- linux-2.6.32.48/drivers/staging/et131x/et1310_tx.c 2011-11-08 19:02:43.000000000 -0500
38279+++ linux-2.6.32.48/drivers/staging/et131x/et1310_tx.c 2011-11-15 19:59:43.000000000 -0500
38280@@ -710,11 +710,11 @@ inline void et131x_free_send_packet(stru
38281 struct net_device_stats *stats = &etdev->net_stats;
38282
38283 if (pMpTcb->Flags & fMP_DEST_BROAD)
38284- atomic_inc(&etdev->Stats.brdcstxmt);
38285+ atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
38286 else if (pMpTcb->Flags & fMP_DEST_MULTI)
38287- atomic_inc(&etdev->Stats.multixmt);
38288+ atomic_inc_unchecked(&etdev->Stats.multixmt);
38289 else
38290- atomic_inc(&etdev->Stats.unixmt);
38291+ atomic_inc_unchecked(&etdev->Stats.unixmt);
38292
38293 if (pMpTcb->Packet) {
38294 stats->tx_bytes += pMpTcb->Packet->len;
38295diff -urNp linux-2.6.32.48/drivers/staging/et131x/et131x_adapter.h linux-2.6.32.48/drivers/staging/et131x/et131x_adapter.h
38296--- linux-2.6.32.48/drivers/staging/et131x/et131x_adapter.h 2011-11-08 19:02:43.000000000 -0500
38297+++ linux-2.6.32.48/drivers/staging/et131x/et131x_adapter.h 2011-11-15 19:59:43.000000000 -0500
38298@@ -145,11 +145,11 @@ typedef struct _ce_stats_t {
38299 * operations
38300 */
38301 u32 unircv; /* # multicast packets received */
38302- atomic_t unixmt; /* # multicast packets for Tx */
38303+ atomic_unchecked_t unixmt; /* # multicast packets for Tx */
38304 u32 multircv; /* # multicast packets received */
38305- atomic_t multixmt; /* # multicast packets for Tx */
38306+ atomic_unchecked_t multixmt; /* # multicast packets for Tx */
38307 u32 brdcstrcv; /* # broadcast packets received */
38308- atomic_t brdcstxmt; /* # broadcast packets for Tx */
38309+ atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
38310 u32 norcvbuf; /* # Rx packets discarded */
38311 u32 noxmtbuf; /* # Tx packets discarded */
38312
38313diff -urNp linux-2.6.32.48/drivers/staging/go7007/go7007-v4l2.c linux-2.6.32.48/drivers/staging/go7007/go7007-v4l2.c
38314--- linux-2.6.32.48/drivers/staging/go7007/go7007-v4l2.c 2011-11-08 19:02:43.000000000 -0500
38315+++ linux-2.6.32.48/drivers/staging/go7007/go7007-v4l2.c 2011-11-15 19:59:43.000000000 -0500
38316@@ -1700,7 +1700,7 @@ static int go7007_vm_fault(struct vm_are
38317 return 0;
38318 }
38319
38320-static struct vm_operations_struct go7007_vm_ops = {
38321+static const struct vm_operations_struct go7007_vm_ops = {
38322 .open = go7007_vm_open,
38323 .close = go7007_vm_close,
38324 .fault = go7007_vm_fault,
38325diff -urNp linux-2.6.32.48/drivers/staging/hv/blkvsc_drv.c linux-2.6.32.48/drivers/staging/hv/blkvsc_drv.c
38326--- linux-2.6.32.48/drivers/staging/hv/blkvsc_drv.c 2011-11-08 19:02:43.000000000 -0500
38327+++ linux-2.6.32.48/drivers/staging/hv/blkvsc_drv.c 2011-11-15 19:59:43.000000000 -0500
38328@@ -153,7 +153,7 @@ static int blkvsc_ringbuffer_size = BLKV
38329 /* The one and only one */
38330 static struct blkvsc_driver_context g_blkvsc_drv;
38331
38332-static struct block_device_operations block_ops = {
38333+static const struct block_device_operations block_ops = {
38334 .owner = THIS_MODULE,
38335 .open = blkvsc_open,
38336 .release = blkvsc_release,
38337diff -urNp linux-2.6.32.48/drivers/staging/hv/Channel.c linux-2.6.32.48/drivers/staging/hv/Channel.c
38338--- linux-2.6.32.48/drivers/staging/hv/Channel.c 2011-11-08 19:02:43.000000000 -0500
38339+++ linux-2.6.32.48/drivers/staging/hv/Channel.c 2011-11-15 19:59:43.000000000 -0500
38340@@ -464,8 +464,8 @@ int VmbusChannelEstablishGpadl(struct vm
38341
38342 DPRINT_ENTER(VMBUS);
38343
38344- nextGpadlHandle = atomic_read(&gVmbusConnection.NextGpadlHandle);
38345- atomic_inc(&gVmbusConnection.NextGpadlHandle);
38346+ nextGpadlHandle = atomic_read_unchecked(&gVmbusConnection.NextGpadlHandle);
38347+ atomic_inc_unchecked(&gVmbusConnection.NextGpadlHandle);
38348
38349 VmbusChannelCreateGpadlHeader(Kbuffer, Size, &msgInfo, &msgCount);
38350 ASSERT(msgInfo != NULL);
38351diff -urNp linux-2.6.32.48/drivers/staging/hv/Hv.c linux-2.6.32.48/drivers/staging/hv/Hv.c
38352--- linux-2.6.32.48/drivers/staging/hv/Hv.c 2011-11-08 19:02:43.000000000 -0500
38353+++ linux-2.6.32.48/drivers/staging/hv/Hv.c 2011-11-15 19:59:43.000000000 -0500
38354@@ -161,7 +161,7 @@ static u64 HvDoHypercall(u64 Control, vo
38355 u64 outputAddress = (Output) ? virt_to_phys(Output) : 0;
38356 u32 outputAddressHi = outputAddress >> 32;
38357 u32 outputAddressLo = outputAddress & 0xFFFFFFFF;
38358- volatile void *hypercallPage = gHvContext.HypercallPage;
38359+ volatile void *hypercallPage = ktva_ktla(gHvContext.HypercallPage);
38360
38361 DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
38362 Control, Input, Output);
38363diff -urNp linux-2.6.32.48/drivers/staging/hv/VmbusApi.h linux-2.6.32.48/drivers/staging/hv/VmbusApi.h
38364--- linux-2.6.32.48/drivers/staging/hv/VmbusApi.h 2011-11-08 19:02:43.000000000 -0500
38365+++ linux-2.6.32.48/drivers/staging/hv/VmbusApi.h 2011-11-15 19:59:43.000000000 -0500
38366@@ -109,7 +109,7 @@ struct vmbus_channel_interface {
38367 u32 *GpadlHandle);
38368 int (*TeardownGpadl)(struct hv_device *device, u32 GpadlHandle);
38369 void (*GetInfo)(struct hv_device *dev, struct hv_device_info *devinfo);
38370-};
38371+} __no_const;
38372
38373 /* Base driver object */
38374 struct hv_driver {
38375diff -urNp linux-2.6.32.48/drivers/staging/hv/vmbus_drv.c linux-2.6.32.48/drivers/staging/hv/vmbus_drv.c
38376--- linux-2.6.32.48/drivers/staging/hv/vmbus_drv.c 2011-11-08 19:02:43.000000000 -0500
38377+++ linux-2.6.32.48/drivers/staging/hv/vmbus_drv.c 2011-11-15 19:59:43.000000000 -0500
38378@@ -532,7 +532,7 @@ static int vmbus_child_device_register(s
38379 to_device_context(root_device_obj);
38380 struct device_context *child_device_ctx =
38381 to_device_context(child_device_obj);
38382- static atomic_t device_num = ATOMIC_INIT(0);
38383+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
38384
38385 DPRINT_ENTER(VMBUS_DRV);
38386
38387@@ -541,7 +541,7 @@ static int vmbus_child_device_register(s
38388
38389 /* Set the device name. Otherwise, device_register() will fail. */
38390 dev_set_name(&child_device_ctx->device, "vmbus_0_%d",
38391- atomic_inc_return(&device_num));
38392+ atomic_inc_return_unchecked(&device_num));
38393
38394 /* The new device belongs to this bus */
38395 child_device_ctx->device.bus = &g_vmbus_drv.bus; /* device->dev.bus; */
38396diff -urNp linux-2.6.32.48/drivers/staging/hv/VmbusPrivate.h linux-2.6.32.48/drivers/staging/hv/VmbusPrivate.h
38397--- linux-2.6.32.48/drivers/staging/hv/VmbusPrivate.h 2011-11-08 19:02:43.000000000 -0500
38398+++ linux-2.6.32.48/drivers/staging/hv/VmbusPrivate.h 2011-11-15 19:59:43.000000000 -0500
38399@@ -59,7 +59,7 @@ enum VMBUS_CONNECT_STATE {
38400 struct VMBUS_CONNECTION {
38401 enum VMBUS_CONNECT_STATE ConnectState;
38402
38403- atomic_t NextGpadlHandle;
38404+ atomic_unchecked_t NextGpadlHandle;
38405
38406 /*
38407 * Represents channel interrupts. Each bit position represents a
38408diff -urNp linux-2.6.32.48/drivers/staging/iio/ring_generic.h linux-2.6.32.48/drivers/staging/iio/ring_generic.h
38409--- linux-2.6.32.48/drivers/staging/iio/ring_generic.h 2011-11-08 19:02:43.000000000 -0500
38410+++ linux-2.6.32.48/drivers/staging/iio/ring_generic.h 2011-11-15 19:59:43.000000000 -0500
38411@@ -87,7 +87,7 @@ struct iio_ring_access_funcs {
38412
38413 int (*is_enabled)(struct iio_ring_buffer *ring);
38414 int (*enable)(struct iio_ring_buffer *ring);
38415-};
38416+} __no_const;
38417
38418 /**
38419 * struct iio_ring_buffer - general ring buffer structure
38420diff -urNp linux-2.6.32.48/drivers/staging/octeon/ethernet.c linux-2.6.32.48/drivers/staging/octeon/ethernet.c
38421--- linux-2.6.32.48/drivers/staging/octeon/ethernet.c 2011-11-08 19:02:43.000000000 -0500
38422+++ linux-2.6.32.48/drivers/staging/octeon/ethernet.c 2011-11-15 19:59:43.000000000 -0500
38423@@ -294,11 +294,11 @@ static struct net_device_stats *cvm_oct_
38424 * since the RX tasklet also increments it.
38425 */
38426 #ifdef CONFIG_64BIT
38427- atomic64_add(rx_status.dropped_packets,
38428- (atomic64_t *)&priv->stats.rx_dropped);
38429+ atomic64_add_unchecked(rx_status.dropped_packets,
38430+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
38431 #else
38432- atomic_add(rx_status.dropped_packets,
38433- (atomic_t *)&priv->stats.rx_dropped);
38434+ atomic_add_unchecked(rx_status.dropped_packets,
38435+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
38436 #endif
38437 }
38438
38439diff -urNp linux-2.6.32.48/drivers/staging/octeon/ethernet-rx.c linux-2.6.32.48/drivers/staging/octeon/ethernet-rx.c
38440--- linux-2.6.32.48/drivers/staging/octeon/ethernet-rx.c 2011-11-08 19:02:43.000000000 -0500
38441+++ linux-2.6.32.48/drivers/staging/octeon/ethernet-rx.c 2011-11-15 19:59:43.000000000 -0500
38442@@ -406,11 +406,11 @@ void cvm_oct_tasklet_rx(unsigned long un
38443 /* Increment RX stats for virtual ports */
38444 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
38445 #ifdef CONFIG_64BIT
38446- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
38447- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
38448+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
38449+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
38450 #else
38451- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
38452- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
38453+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
38454+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
38455 #endif
38456 }
38457 netif_receive_skb(skb);
38458@@ -424,9 +424,9 @@ void cvm_oct_tasklet_rx(unsigned long un
38459 dev->name);
38460 */
38461 #ifdef CONFIG_64BIT
38462- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
38463+ atomic64_add_unchecked(1, (atomic64_t *)&priv->stats.rx_dropped);
38464 #else
38465- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
38466+ atomic_add_unchecked(1, (atomic_t *)&priv->stats.rx_dropped);
38467 #endif
38468 dev_kfree_skb_irq(skb);
38469 }
38470diff -urNp linux-2.6.32.48/drivers/staging/panel/panel.c linux-2.6.32.48/drivers/staging/panel/panel.c
38471--- linux-2.6.32.48/drivers/staging/panel/panel.c 2011-11-08 19:02:43.000000000 -0500
38472+++ linux-2.6.32.48/drivers/staging/panel/panel.c 2011-11-15 19:59:43.000000000 -0500
38473@@ -1305,7 +1305,7 @@ static int lcd_release(struct inode *ino
38474 return 0;
38475 }
38476
38477-static struct file_operations lcd_fops = {
38478+static const struct file_operations lcd_fops = {
38479 .write = lcd_write,
38480 .open = lcd_open,
38481 .release = lcd_release,
38482@@ -1565,7 +1565,7 @@ static int keypad_release(struct inode *
38483 return 0;
38484 }
38485
38486-static struct file_operations keypad_fops = {
38487+static const struct file_operations keypad_fops = {
38488 .read = keypad_read, /* read */
38489 .open = keypad_open, /* open */
38490 .release = keypad_release, /* close */
38491diff -urNp linux-2.6.32.48/drivers/staging/phison/phison.c linux-2.6.32.48/drivers/staging/phison/phison.c
38492--- linux-2.6.32.48/drivers/staging/phison/phison.c 2011-11-08 19:02:43.000000000 -0500
38493+++ linux-2.6.32.48/drivers/staging/phison/phison.c 2011-11-15 19:59:43.000000000 -0500
38494@@ -43,7 +43,7 @@ static struct scsi_host_template phison_
38495 ATA_BMDMA_SHT(DRV_NAME),
38496 };
38497
38498-static struct ata_port_operations phison_ops = {
38499+static const struct ata_port_operations phison_ops = {
38500 .inherits = &ata_bmdma_port_ops,
38501 .prereset = phison_pre_reset,
38502 };
38503diff -urNp linux-2.6.32.48/drivers/staging/poch/poch.c linux-2.6.32.48/drivers/staging/poch/poch.c
38504--- linux-2.6.32.48/drivers/staging/poch/poch.c 2011-11-08 19:02:43.000000000 -0500
38505+++ linux-2.6.32.48/drivers/staging/poch/poch.c 2011-11-15 19:59:43.000000000 -0500
38506@@ -1057,7 +1057,7 @@ static int poch_ioctl(struct inode *inod
38507 return 0;
38508 }
38509
38510-static struct file_operations poch_fops = {
38511+static const struct file_operations poch_fops = {
38512 .owner = THIS_MODULE,
38513 .open = poch_open,
38514 .release = poch_release,
38515diff -urNp linux-2.6.32.48/drivers/staging/pohmelfs/inode.c linux-2.6.32.48/drivers/staging/pohmelfs/inode.c
38516--- linux-2.6.32.48/drivers/staging/pohmelfs/inode.c 2011-11-08 19:02:43.000000000 -0500
38517+++ linux-2.6.32.48/drivers/staging/pohmelfs/inode.c 2011-11-15 19:59:43.000000000 -0500
38518@@ -1850,7 +1850,7 @@ static int pohmelfs_fill_super(struct su
38519 mutex_init(&psb->mcache_lock);
38520 psb->mcache_root = RB_ROOT;
38521 psb->mcache_timeout = msecs_to_jiffies(5000);
38522- atomic_long_set(&psb->mcache_gen, 0);
38523+ atomic_long_set_unchecked(&psb->mcache_gen, 0);
38524
38525 psb->trans_max_pages = 100;
38526
38527@@ -1865,7 +1865,7 @@ static int pohmelfs_fill_super(struct su
38528 INIT_LIST_HEAD(&psb->crypto_ready_list);
38529 INIT_LIST_HEAD(&psb->crypto_active_list);
38530
38531- atomic_set(&psb->trans_gen, 1);
38532+ atomic_set_unchecked(&psb->trans_gen, 1);
38533 atomic_long_set(&psb->total_inodes, 0);
38534
38535 mutex_init(&psb->state_lock);
38536diff -urNp linux-2.6.32.48/drivers/staging/pohmelfs/mcache.c linux-2.6.32.48/drivers/staging/pohmelfs/mcache.c
38537--- linux-2.6.32.48/drivers/staging/pohmelfs/mcache.c 2011-11-08 19:02:43.000000000 -0500
38538+++ linux-2.6.32.48/drivers/staging/pohmelfs/mcache.c 2011-11-15 19:59:43.000000000 -0500
38539@@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
38540 m->data = data;
38541 m->start = start;
38542 m->size = size;
38543- m->gen = atomic_long_inc_return(&psb->mcache_gen);
38544+ m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
38545
38546 mutex_lock(&psb->mcache_lock);
38547 err = pohmelfs_mcache_insert(psb, m);
38548diff -urNp linux-2.6.32.48/drivers/staging/pohmelfs/netfs.h linux-2.6.32.48/drivers/staging/pohmelfs/netfs.h
38549--- linux-2.6.32.48/drivers/staging/pohmelfs/netfs.h 2011-11-08 19:02:43.000000000 -0500
38550+++ linux-2.6.32.48/drivers/staging/pohmelfs/netfs.h 2011-11-15 19:59:43.000000000 -0500
38551@@ -570,14 +570,14 @@ struct pohmelfs_config;
38552 struct pohmelfs_sb {
38553 struct rb_root mcache_root;
38554 struct mutex mcache_lock;
38555- atomic_long_t mcache_gen;
38556+ atomic_long_unchecked_t mcache_gen;
38557 unsigned long mcache_timeout;
38558
38559 unsigned int idx;
38560
38561 unsigned int trans_retries;
38562
38563- atomic_t trans_gen;
38564+ atomic_unchecked_t trans_gen;
38565
38566 unsigned int crypto_attached_size;
38567 unsigned int crypto_align_size;
38568diff -urNp linux-2.6.32.48/drivers/staging/pohmelfs/trans.c linux-2.6.32.48/drivers/staging/pohmelfs/trans.c
38569--- linux-2.6.32.48/drivers/staging/pohmelfs/trans.c 2011-11-08 19:02:43.000000000 -0500
38570+++ linux-2.6.32.48/drivers/staging/pohmelfs/trans.c 2011-11-15 19:59:43.000000000 -0500
38571@@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
38572 int err;
38573 struct netfs_cmd *cmd = t->iovec.iov_base;
38574
38575- t->gen = atomic_inc_return(&psb->trans_gen);
38576+ t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
38577
38578 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
38579 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
38580diff -urNp linux-2.6.32.48/drivers/staging/sep/sep_driver.c linux-2.6.32.48/drivers/staging/sep/sep_driver.c
38581--- linux-2.6.32.48/drivers/staging/sep/sep_driver.c 2011-11-08 19:02:43.000000000 -0500
38582+++ linux-2.6.32.48/drivers/staging/sep/sep_driver.c 2011-11-15 19:59:43.000000000 -0500
38583@@ -2603,7 +2603,7 @@ static struct pci_driver sep_pci_driver
38584 static dev_t sep_devno;
38585
38586 /* the files operations structure of the driver */
38587-static struct file_operations sep_file_operations = {
38588+static const struct file_operations sep_file_operations = {
38589 .owner = THIS_MODULE,
38590 .ioctl = sep_ioctl,
38591 .poll = sep_poll,
38592diff -urNp linux-2.6.32.48/drivers/staging/usbip/usbip_common.h linux-2.6.32.48/drivers/staging/usbip/usbip_common.h
38593--- linux-2.6.32.48/drivers/staging/usbip/usbip_common.h 2011-11-08 19:02:43.000000000 -0500
38594+++ linux-2.6.32.48/drivers/staging/usbip/usbip_common.h 2011-11-15 19:59:43.000000000 -0500
38595@@ -374,7 +374,7 @@ struct usbip_device {
38596 void (*shutdown)(struct usbip_device *);
38597 void (*reset)(struct usbip_device *);
38598 void (*unusable)(struct usbip_device *);
38599- } eh_ops;
38600+ } __no_const eh_ops;
38601 };
38602
38603
38604diff -urNp linux-2.6.32.48/drivers/staging/usbip/vhci.h linux-2.6.32.48/drivers/staging/usbip/vhci.h
38605--- linux-2.6.32.48/drivers/staging/usbip/vhci.h 2011-11-08 19:02:43.000000000 -0500
38606+++ linux-2.6.32.48/drivers/staging/usbip/vhci.h 2011-11-15 19:59:43.000000000 -0500
38607@@ -92,7 +92,7 @@ struct vhci_hcd {
38608 unsigned resuming:1;
38609 unsigned long re_timeout;
38610
38611- atomic_t seqnum;
38612+ atomic_unchecked_t seqnum;
38613
38614 /*
38615 * NOTE:
38616diff -urNp linux-2.6.32.48/drivers/staging/usbip/vhci_hcd.c linux-2.6.32.48/drivers/staging/usbip/vhci_hcd.c
38617--- linux-2.6.32.48/drivers/staging/usbip/vhci_hcd.c 2011-11-08 19:02:43.000000000 -0500
38618+++ linux-2.6.32.48/drivers/staging/usbip/vhci_hcd.c 2011-11-15 19:59:43.000000000 -0500
38619@@ -534,7 +534,7 @@ static void vhci_tx_urb(struct urb *urb)
38620 return;
38621 }
38622
38623- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
38624+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
38625 if (priv->seqnum == 0xffff)
38626 usbip_uinfo("seqnum max\n");
38627
38628@@ -793,7 +793,7 @@ static int vhci_urb_dequeue(struct usb_h
38629 return -ENOMEM;
38630 }
38631
38632- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
38633+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
38634 if (unlink->seqnum == 0xffff)
38635 usbip_uinfo("seqnum max\n");
38636
38637@@ -988,7 +988,7 @@ static int vhci_start(struct usb_hcd *hc
38638 vdev->rhport = rhport;
38639 }
38640
38641- atomic_set(&vhci->seqnum, 0);
38642+ atomic_set_unchecked(&vhci->seqnum, 0);
38643 spin_lock_init(&vhci->lock);
38644
38645
38646diff -urNp linux-2.6.32.48/drivers/staging/usbip/vhci_rx.c linux-2.6.32.48/drivers/staging/usbip/vhci_rx.c
38647--- linux-2.6.32.48/drivers/staging/usbip/vhci_rx.c 2011-11-08 19:02:43.000000000 -0500
38648+++ linux-2.6.32.48/drivers/staging/usbip/vhci_rx.c 2011-11-15 19:59:43.000000000 -0500
38649@@ -78,7 +78,7 @@ static void vhci_recv_ret_submit(struct
38650 usbip_uerr("cannot find a urb of seqnum %u\n",
38651 pdu->base.seqnum);
38652 usbip_uinfo("max seqnum %d\n",
38653- atomic_read(&the_controller->seqnum));
38654+ atomic_read_unchecked(&the_controller->seqnum));
38655 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
38656 return;
38657 }
38658diff -urNp linux-2.6.32.48/drivers/staging/vme/devices/vme_user.c linux-2.6.32.48/drivers/staging/vme/devices/vme_user.c
38659--- linux-2.6.32.48/drivers/staging/vme/devices/vme_user.c 2011-11-08 19:02:43.000000000 -0500
38660+++ linux-2.6.32.48/drivers/staging/vme/devices/vme_user.c 2011-11-15 19:59:43.000000000 -0500
38661@@ -136,7 +136,7 @@ static int vme_user_ioctl(struct inode *
38662 static int __init vme_user_probe(struct device *, int, int);
38663 static int __exit vme_user_remove(struct device *, int, int);
38664
38665-static struct file_operations vme_user_fops = {
38666+static const struct file_operations vme_user_fops = {
38667 .open = vme_user_open,
38668 .release = vme_user_release,
38669 .read = vme_user_read,
38670diff -urNp linux-2.6.32.48/drivers/staging/vt6655/hostap.c linux-2.6.32.48/drivers/staging/vt6655/hostap.c
38671--- linux-2.6.32.48/drivers/staging/vt6655/hostap.c 2011-11-08 19:02:43.000000000 -0500
38672+++ linux-2.6.32.48/drivers/staging/vt6655/hostap.c 2011-11-15 19:59:43.000000000 -0500
38673@@ -84,7 +84,7 @@ static int hostap_enable_hostapd(PSDevic
38674 PSDevice apdev_priv;
38675 struct net_device *dev = pDevice->dev;
38676 int ret;
38677- const struct net_device_ops apdev_netdev_ops = {
38678+ net_device_ops_no_const apdev_netdev_ops = {
38679 .ndo_start_xmit = pDevice->tx_80211,
38680 };
38681
38682diff -urNp linux-2.6.32.48/drivers/staging/vt6656/hostap.c linux-2.6.32.48/drivers/staging/vt6656/hostap.c
38683--- linux-2.6.32.48/drivers/staging/vt6656/hostap.c 2011-11-08 19:02:43.000000000 -0500
38684+++ linux-2.6.32.48/drivers/staging/vt6656/hostap.c 2011-11-15 19:59:43.000000000 -0500
38685@@ -86,7 +86,7 @@ static int hostap_enable_hostapd(PSDevic
38686 PSDevice apdev_priv;
38687 struct net_device *dev = pDevice->dev;
38688 int ret;
38689- const struct net_device_ops apdev_netdev_ops = {
38690+ net_device_ops_no_const apdev_netdev_ops = {
38691 .ndo_start_xmit = pDevice->tx_80211,
38692 };
38693
38694diff -urNp linux-2.6.32.48/drivers/staging/wlan-ng/hfa384x_usb.c linux-2.6.32.48/drivers/staging/wlan-ng/hfa384x_usb.c
38695--- linux-2.6.32.48/drivers/staging/wlan-ng/hfa384x_usb.c 2011-11-08 19:02:43.000000000 -0500
38696+++ linux-2.6.32.48/drivers/staging/wlan-ng/hfa384x_usb.c 2011-11-15 19:59:43.000000000 -0500
38697@@ -205,7 +205,7 @@ static void unlocked_usbctlx_complete(hf
38698
38699 struct usbctlx_completor {
38700 int (*complete) (struct usbctlx_completor *);
38701-};
38702+} __no_const;
38703 typedef struct usbctlx_completor usbctlx_completor_t;
38704
38705 static int
38706diff -urNp linux-2.6.32.48/drivers/telephony/ixj.c linux-2.6.32.48/drivers/telephony/ixj.c
38707--- linux-2.6.32.48/drivers/telephony/ixj.c 2011-11-08 19:02:43.000000000 -0500
38708+++ linux-2.6.32.48/drivers/telephony/ixj.c 2011-11-15 19:59:43.000000000 -0500
38709@@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
38710 bool mContinue;
38711 char *pIn, *pOut;
38712
38713+ pax_track_stack();
38714+
38715 if (!SCI_Prepare(j))
38716 return 0;
38717
38718diff -urNp linux-2.6.32.48/drivers/uio/uio.c linux-2.6.32.48/drivers/uio/uio.c
38719--- linux-2.6.32.48/drivers/uio/uio.c 2011-11-08 19:02:43.000000000 -0500
38720+++ linux-2.6.32.48/drivers/uio/uio.c 2011-11-15 19:59:43.000000000 -0500
38721@@ -23,6 +23,7 @@
38722 #include <linux/string.h>
38723 #include <linux/kobject.h>
38724 #include <linux/uio_driver.h>
38725+#include <asm/local.h>
38726
38727 #define UIO_MAX_DEVICES 255
38728
38729@@ -30,10 +31,10 @@ struct uio_device {
38730 struct module *owner;
38731 struct device *dev;
38732 int minor;
38733- atomic_t event;
38734+ atomic_unchecked_t event;
38735 struct fasync_struct *async_queue;
38736 wait_queue_head_t wait;
38737- int vma_count;
38738+ local_t vma_count;
38739 struct uio_info *info;
38740 struct kobject *map_dir;
38741 struct kobject *portio_dir;
38742@@ -129,7 +130,7 @@ static ssize_t map_type_show(struct kobj
38743 return entry->show(mem, buf);
38744 }
38745
38746-static struct sysfs_ops map_sysfs_ops = {
38747+static const struct sysfs_ops map_sysfs_ops = {
38748 .show = map_type_show,
38749 };
38750
38751@@ -217,7 +218,7 @@ static ssize_t portio_type_show(struct k
38752 return entry->show(port, buf);
38753 }
38754
38755-static struct sysfs_ops portio_sysfs_ops = {
38756+static const struct sysfs_ops portio_sysfs_ops = {
38757 .show = portio_type_show,
38758 };
38759
38760@@ -255,7 +256,7 @@ static ssize_t show_event(struct device
38761 struct uio_device *idev = dev_get_drvdata(dev);
38762 if (idev)
38763 return sprintf(buf, "%u\n",
38764- (unsigned int)atomic_read(&idev->event));
38765+ (unsigned int)atomic_read_unchecked(&idev->event));
38766 else
38767 return -ENODEV;
38768 }
38769@@ -424,7 +425,7 @@ void uio_event_notify(struct uio_info *i
38770 {
38771 struct uio_device *idev = info->uio_dev;
38772
38773- atomic_inc(&idev->event);
38774+ atomic_inc_unchecked(&idev->event);
38775 wake_up_interruptible(&idev->wait);
38776 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
38777 }
38778@@ -477,7 +478,7 @@ static int uio_open(struct inode *inode,
38779 }
38780
38781 listener->dev = idev;
38782- listener->event_count = atomic_read(&idev->event);
38783+ listener->event_count = atomic_read_unchecked(&idev->event);
38784 filep->private_data = listener;
38785
38786 if (idev->info->open) {
38787@@ -528,7 +529,7 @@ static unsigned int uio_poll(struct file
38788 return -EIO;
38789
38790 poll_wait(filep, &idev->wait, wait);
38791- if (listener->event_count != atomic_read(&idev->event))
38792+ if (listener->event_count != atomic_read_unchecked(&idev->event))
38793 return POLLIN | POLLRDNORM;
38794 return 0;
38795 }
38796@@ -553,7 +554,7 @@ static ssize_t uio_read(struct file *fil
38797 do {
38798 set_current_state(TASK_INTERRUPTIBLE);
38799
38800- event_count = atomic_read(&idev->event);
38801+ event_count = atomic_read_unchecked(&idev->event);
38802 if (event_count != listener->event_count) {
38803 if (copy_to_user(buf, &event_count, count))
38804 retval = -EFAULT;
38805@@ -624,13 +625,13 @@ static int uio_find_mem_index(struct vm_
38806 static void uio_vma_open(struct vm_area_struct *vma)
38807 {
38808 struct uio_device *idev = vma->vm_private_data;
38809- idev->vma_count++;
38810+ local_inc(&idev->vma_count);
38811 }
38812
38813 static void uio_vma_close(struct vm_area_struct *vma)
38814 {
38815 struct uio_device *idev = vma->vm_private_data;
38816- idev->vma_count--;
38817+ local_dec(&idev->vma_count);
38818 }
38819
38820 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
38821@@ -840,7 +841,7 @@ int __uio_register_device(struct module
38822 idev->owner = owner;
38823 idev->info = info;
38824 init_waitqueue_head(&idev->wait);
38825- atomic_set(&idev->event, 0);
38826+ atomic_set_unchecked(&idev->event, 0);
38827
38828 ret = uio_get_minor(idev);
38829 if (ret)
38830diff -urNp linux-2.6.32.48/drivers/usb/atm/usbatm.c linux-2.6.32.48/drivers/usb/atm/usbatm.c
38831--- linux-2.6.32.48/drivers/usb/atm/usbatm.c 2011-11-08 19:02:43.000000000 -0500
38832+++ linux-2.6.32.48/drivers/usb/atm/usbatm.c 2011-11-15 19:59:43.000000000 -0500
38833@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(stru
38834 if (printk_ratelimit())
38835 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
38836 __func__, vpi, vci);
38837- atomic_inc(&vcc->stats->rx_err);
38838+ atomic_inc_unchecked(&vcc->stats->rx_err);
38839 return;
38840 }
38841
38842@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(stru
38843 if (length > ATM_MAX_AAL5_PDU) {
38844 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
38845 __func__, length, vcc);
38846- atomic_inc(&vcc->stats->rx_err);
38847+ atomic_inc_unchecked(&vcc->stats->rx_err);
38848 goto out;
38849 }
38850
38851@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(stru
38852 if (sarb->len < pdu_length) {
38853 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
38854 __func__, pdu_length, sarb->len, vcc);
38855- atomic_inc(&vcc->stats->rx_err);
38856+ atomic_inc_unchecked(&vcc->stats->rx_err);
38857 goto out;
38858 }
38859
38860 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
38861 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
38862 __func__, vcc);
38863- atomic_inc(&vcc->stats->rx_err);
38864+ atomic_inc_unchecked(&vcc->stats->rx_err);
38865 goto out;
38866 }
38867
38868@@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(stru
38869 if (printk_ratelimit())
38870 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
38871 __func__, length);
38872- atomic_inc(&vcc->stats->rx_drop);
38873+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38874 goto out;
38875 }
38876
38877@@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(stru
38878
38879 vcc->push(vcc, skb);
38880
38881- atomic_inc(&vcc->stats->rx);
38882+ atomic_inc_unchecked(&vcc->stats->rx);
38883 out:
38884 skb_trim(sarb, 0);
38885 }
38886@@ -616,7 +616,7 @@ static void usbatm_tx_process(unsigned l
38887 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
38888
38889 usbatm_pop(vcc, skb);
38890- atomic_inc(&vcc->stats->tx);
38891+ atomic_inc_unchecked(&vcc->stats->tx);
38892
38893 skb = skb_dequeue(&instance->sndqueue);
38894 }
38895@@ -775,11 +775,11 @@ static int usbatm_atm_proc_read(struct a
38896 if (!left--)
38897 return sprintf(page,
38898 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
38899- atomic_read(&atm_dev->stats.aal5.tx),
38900- atomic_read(&atm_dev->stats.aal5.tx_err),
38901- atomic_read(&atm_dev->stats.aal5.rx),
38902- atomic_read(&atm_dev->stats.aal5.rx_err),
38903- atomic_read(&atm_dev->stats.aal5.rx_drop));
38904+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
38905+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
38906+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
38907+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
38908+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
38909
38910 if (!left--) {
38911 if (instance->disconnected)
38912diff -urNp linux-2.6.32.48/drivers/usb/class/cdc-wdm.c linux-2.6.32.48/drivers/usb/class/cdc-wdm.c
38913--- linux-2.6.32.48/drivers/usb/class/cdc-wdm.c 2011-11-08 19:02:43.000000000 -0500
38914+++ linux-2.6.32.48/drivers/usb/class/cdc-wdm.c 2011-11-15 19:59:43.000000000 -0500
38915@@ -314,7 +314,7 @@ static ssize_t wdm_write
38916 if (r < 0)
38917 goto outnp;
38918
38919- if (!file->f_flags && O_NONBLOCK)
38920+ if (!(file->f_flags & O_NONBLOCK))
38921 r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE,
38922 &desc->flags));
38923 else
38924diff -urNp linux-2.6.32.48/drivers/usb/core/hcd.c linux-2.6.32.48/drivers/usb/core/hcd.c
38925--- linux-2.6.32.48/drivers/usb/core/hcd.c 2011-11-08 19:02:43.000000000 -0500
38926+++ linux-2.6.32.48/drivers/usb/core/hcd.c 2011-11-15 19:59:43.000000000 -0500
38927@@ -2216,7 +2216,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_platform_shutd
38928
38929 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
38930
38931-struct usb_mon_operations *mon_ops;
38932+const struct usb_mon_operations *mon_ops;
38933
38934 /*
38935 * The registration is unlocked.
38936@@ -2226,7 +2226,7 @@ struct usb_mon_operations *mon_ops;
38937 * symbols from usbcore, usbcore gets referenced and cannot be unloaded first.
38938 */
38939
38940-int usb_mon_register (struct usb_mon_operations *ops)
38941+int usb_mon_register (const struct usb_mon_operations *ops)
38942 {
38943
38944 if (mon_ops)
38945diff -urNp linux-2.6.32.48/drivers/usb/core/hcd.h linux-2.6.32.48/drivers/usb/core/hcd.h
38946--- linux-2.6.32.48/drivers/usb/core/hcd.h 2011-11-08 19:02:43.000000000 -0500
38947+++ linux-2.6.32.48/drivers/usb/core/hcd.h 2011-11-15 19:59:43.000000000 -0500
38948@@ -486,13 +486,13 @@ static inline void usbfs_cleanup(void) {
38949 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
38950
38951 struct usb_mon_operations {
38952- void (*urb_submit)(struct usb_bus *bus, struct urb *urb);
38953- void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
38954- void (*urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
38955+ void (* const urb_submit)(struct usb_bus *bus, struct urb *urb);
38956+ void (* const urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
38957+ void (* const urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
38958 /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */
38959 };
38960
38961-extern struct usb_mon_operations *mon_ops;
38962+extern const struct usb_mon_operations *mon_ops;
38963
38964 static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb)
38965 {
38966@@ -514,7 +514,7 @@ static inline void usbmon_urb_complete(s
38967 (*mon_ops->urb_complete)(bus, urb, status);
38968 }
38969
38970-int usb_mon_register(struct usb_mon_operations *ops);
38971+int usb_mon_register(const struct usb_mon_operations *ops);
38972 void usb_mon_deregister(void);
38973
38974 #else
38975diff -urNp linux-2.6.32.48/drivers/usb/core/message.c linux-2.6.32.48/drivers/usb/core/message.c
38976--- linux-2.6.32.48/drivers/usb/core/message.c 2011-11-08 19:02:43.000000000 -0500
38977+++ linux-2.6.32.48/drivers/usb/core/message.c 2011-11-15 19:59:43.000000000 -0500
38978@@ -914,8 +914,8 @@ char *usb_cache_string(struct usb_device
38979 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
38980 if (buf) {
38981 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
38982- if (len > 0) {
38983- smallbuf = kmalloc(++len, GFP_NOIO);
38984+ if (len++ > 0) {
38985+ smallbuf = kmalloc(len, GFP_NOIO);
38986 if (!smallbuf)
38987 return buf;
38988 memcpy(smallbuf, buf, len);
38989diff -urNp linux-2.6.32.48/drivers/usb/misc/appledisplay.c linux-2.6.32.48/drivers/usb/misc/appledisplay.c
38990--- linux-2.6.32.48/drivers/usb/misc/appledisplay.c 2011-11-08 19:02:43.000000000 -0500
38991+++ linux-2.6.32.48/drivers/usb/misc/appledisplay.c 2011-11-15 19:59:43.000000000 -0500
38992@@ -178,7 +178,7 @@ static int appledisplay_bl_get_brightnes
38993 return pdata->msgdata[1];
38994 }
38995
38996-static struct backlight_ops appledisplay_bl_data = {
38997+static const struct backlight_ops appledisplay_bl_data = {
38998 .get_brightness = appledisplay_bl_get_brightness,
38999 .update_status = appledisplay_bl_update_status,
39000 };
39001diff -urNp linux-2.6.32.48/drivers/usb/mon/mon_main.c linux-2.6.32.48/drivers/usb/mon/mon_main.c
39002--- linux-2.6.32.48/drivers/usb/mon/mon_main.c 2011-11-08 19:02:43.000000000 -0500
39003+++ linux-2.6.32.48/drivers/usb/mon/mon_main.c 2011-11-15 19:59:43.000000000 -0500
39004@@ -238,7 +238,7 @@ static struct notifier_block mon_nb = {
39005 /*
39006 * Ops
39007 */
39008-static struct usb_mon_operations mon_ops_0 = {
39009+static const struct usb_mon_operations mon_ops_0 = {
39010 .urb_submit = mon_submit,
39011 .urb_submit_error = mon_submit_error,
39012 .urb_complete = mon_complete,
39013diff -urNp linux-2.6.32.48/drivers/usb/wusbcore/wa-hc.h linux-2.6.32.48/drivers/usb/wusbcore/wa-hc.h
39014--- linux-2.6.32.48/drivers/usb/wusbcore/wa-hc.h 2011-11-08 19:02:43.000000000 -0500
39015+++ linux-2.6.32.48/drivers/usb/wusbcore/wa-hc.h 2011-11-15 19:59:43.000000000 -0500
39016@@ -192,7 +192,7 @@ struct wahc {
39017 struct list_head xfer_delayed_list;
39018 spinlock_t xfer_list_lock;
39019 struct work_struct xfer_work;
39020- atomic_t xfer_id_count;
39021+ atomic_unchecked_t xfer_id_count;
39022 };
39023
39024
39025@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
39026 INIT_LIST_HEAD(&wa->xfer_delayed_list);
39027 spin_lock_init(&wa->xfer_list_lock);
39028 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
39029- atomic_set(&wa->xfer_id_count, 1);
39030+ atomic_set_unchecked(&wa->xfer_id_count, 1);
39031 }
39032
39033 /**
39034diff -urNp linux-2.6.32.48/drivers/usb/wusbcore/wa-xfer.c linux-2.6.32.48/drivers/usb/wusbcore/wa-xfer.c
39035--- linux-2.6.32.48/drivers/usb/wusbcore/wa-xfer.c 2011-11-08 19:02:43.000000000 -0500
39036+++ linux-2.6.32.48/drivers/usb/wusbcore/wa-xfer.c 2011-11-15 19:59:43.000000000 -0500
39037@@ -293,7 +293,7 @@ out:
39038 */
39039 static void wa_xfer_id_init(struct wa_xfer *xfer)
39040 {
39041- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
39042+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
39043 }
39044
39045 /*
39046diff -urNp linux-2.6.32.48/drivers/uwb/wlp/messages.c linux-2.6.32.48/drivers/uwb/wlp/messages.c
39047--- linux-2.6.32.48/drivers/uwb/wlp/messages.c 2011-11-08 19:02:43.000000000 -0500
39048+++ linux-2.6.32.48/drivers/uwb/wlp/messages.c 2011-11-15 19:59:43.000000000 -0500
39049@@ -903,7 +903,7 @@ int wlp_parse_f0(struct wlp *wlp, struct
39050 size_t len = skb->len;
39051 size_t used;
39052 ssize_t result;
39053- struct wlp_nonce enonce, rnonce;
39054+ struct wlp_nonce enonce = {{0}}, rnonce = {{0}};
39055 enum wlp_assc_error assc_err;
39056 char enonce_buf[WLP_WSS_NONCE_STRSIZE];
39057 char rnonce_buf[WLP_WSS_NONCE_STRSIZE];
39058diff -urNp linux-2.6.32.48/drivers/uwb/wlp/sysfs.c linux-2.6.32.48/drivers/uwb/wlp/sysfs.c
39059--- linux-2.6.32.48/drivers/uwb/wlp/sysfs.c 2011-11-08 19:02:43.000000000 -0500
39060+++ linux-2.6.32.48/drivers/uwb/wlp/sysfs.c 2011-11-15 19:59:43.000000000 -0500
39061@@ -615,8 +615,7 @@ ssize_t wlp_wss_attr_store(struct kobjec
39062 return ret;
39063 }
39064
39065-static
39066-struct sysfs_ops wss_sysfs_ops = {
39067+static const struct sysfs_ops wss_sysfs_ops = {
39068 .show = wlp_wss_attr_show,
39069 .store = wlp_wss_attr_store,
39070 };
39071diff -urNp linux-2.6.32.48/drivers/video/atmel_lcdfb.c linux-2.6.32.48/drivers/video/atmel_lcdfb.c
39072--- linux-2.6.32.48/drivers/video/atmel_lcdfb.c 2011-11-08 19:02:43.000000000 -0500
39073+++ linux-2.6.32.48/drivers/video/atmel_lcdfb.c 2011-11-15 19:59:43.000000000 -0500
39074@@ -110,7 +110,7 @@ static int atmel_bl_get_brightness(struc
39075 return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
39076 }
39077
39078-static struct backlight_ops atmel_lcdc_bl_ops = {
39079+static const struct backlight_ops atmel_lcdc_bl_ops = {
39080 .update_status = atmel_bl_update_status,
39081 .get_brightness = atmel_bl_get_brightness,
39082 };
39083diff -urNp linux-2.6.32.48/drivers/video/aty/aty128fb.c linux-2.6.32.48/drivers/video/aty/aty128fb.c
39084--- linux-2.6.32.48/drivers/video/aty/aty128fb.c 2011-11-08 19:02:43.000000000 -0500
39085+++ linux-2.6.32.48/drivers/video/aty/aty128fb.c 2011-11-15 19:59:43.000000000 -0500
39086@@ -1787,7 +1787,7 @@ static int aty128_bl_get_brightness(stru
39087 return bd->props.brightness;
39088 }
39089
39090-static struct backlight_ops aty128_bl_data = {
39091+static const struct backlight_ops aty128_bl_data = {
39092 .get_brightness = aty128_bl_get_brightness,
39093 .update_status = aty128_bl_update_status,
39094 };
39095diff -urNp linux-2.6.32.48/drivers/video/aty/atyfb_base.c linux-2.6.32.48/drivers/video/aty/atyfb_base.c
39096--- linux-2.6.32.48/drivers/video/aty/atyfb_base.c 2011-11-08 19:02:43.000000000 -0500
39097+++ linux-2.6.32.48/drivers/video/aty/atyfb_base.c 2011-11-15 19:59:43.000000000 -0500
39098@@ -2225,7 +2225,7 @@ static int aty_bl_get_brightness(struct
39099 return bd->props.brightness;
39100 }
39101
39102-static struct backlight_ops aty_bl_data = {
39103+static const struct backlight_ops aty_bl_data = {
39104 .get_brightness = aty_bl_get_brightness,
39105 .update_status = aty_bl_update_status,
39106 };
39107diff -urNp linux-2.6.32.48/drivers/video/aty/radeon_backlight.c linux-2.6.32.48/drivers/video/aty/radeon_backlight.c
39108--- linux-2.6.32.48/drivers/video/aty/radeon_backlight.c 2011-11-08 19:02:43.000000000 -0500
39109+++ linux-2.6.32.48/drivers/video/aty/radeon_backlight.c 2011-11-15 19:59:43.000000000 -0500
39110@@ -127,7 +127,7 @@ static int radeon_bl_get_brightness(stru
39111 return bd->props.brightness;
39112 }
39113
39114-static struct backlight_ops radeon_bl_data = {
39115+static const struct backlight_ops radeon_bl_data = {
39116 .get_brightness = radeon_bl_get_brightness,
39117 .update_status = radeon_bl_update_status,
39118 };
39119diff -urNp linux-2.6.32.48/drivers/video/backlight/adp5520_bl.c linux-2.6.32.48/drivers/video/backlight/adp5520_bl.c
39120--- linux-2.6.32.48/drivers/video/backlight/adp5520_bl.c 2011-11-08 19:02:43.000000000 -0500
39121+++ linux-2.6.32.48/drivers/video/backlight/adp5520_bl.c 2011-11-15 19:59:43.000000000 -0500
39122@@ -84,7 +84,7 @@ static int adp5520_bl_get_brightness(str
39123 return error ? data->current_brightness : reg_val;
39124 }
39125
39126-static struct backlight_ops adp5520_bl_ops = {
39127+static const struct backlight_ops adp5520_bl_ops = {
39128 .update_status = adp5520_bl_update_status,
39129 .get_brightness = adp5520_bl_get_brightness,
39130 };
39131diff -urNp linux-2.6.32.48/drivers/video/backlight/adx_bl.c linux-2.6.32.48/drivers/video/backlight/adx_bl.c
39132--- linux-2.6.32.48/drivers/video/backlight/adx_bl.c 2011-11-08 19:02:43.000000000 -0500
39133+++ linux-2.6.32.48/drivers/video/backlight/adx_bl.c 2011-11-15 19:59:43.000000000 -0500
39134@@ -61,7 +61,7 @@ static int adx_backlight_check_fb(struct
39135 return 1;
39136 }
39137
39138-static struct backlight_ops adx_backlight_ops = {
39139+static const struct backlight_ops adx_backlight_ops = {
39140 .options = 0,
39141 .update_status = adx_backlight_update_status,
39142 .get_brightness = adx_backlight_get_brightness,
39143diff -urNp linux-2.6.32.48/drivers/video/backlight/atmel-pwm-bl.c linux-2.6.32.48/drivers/video/backlight/atmel-pwm-bl.c
39144--- linux-2.6.32.48/drivers/video/backlight/atmel-pwm-bl.c 2011-11-08 19:02:43.000000000 -0500
39145+++ linux-2.6.32.48/drivers/video/backlight/atmel-pwm-bl.c 2011-11-15 19:59:43.000000000 -0500
39146@@ -113,7 +113,7 @@ static int atmel_pwm_bl_init_pwm(struct
39147 return pwm_channel_enable(&pwmbl->pwmc);
39148 }
39149
39150-static struct backlight_ops atmel_pwm_bl_ops = {
39151+static const struct backlight_ops atmel_pwm_bl_ops = {
39152 .get_brightness = atmel_pwm_bl_get_intensity,
39153 .update_status = atmel_pwm_bl_set_intensity,
39154 };
39155diff -urNp linux-2.6.32.48/drivers/video/backlight/backlight.c linux-2.6.32.48/drivers/video/backlight/backlight.c
39156--- linux-2.6.32.48/drivers/video/backlight/backlight.c 2011-11-08 19:02:43.000000000 -0500
39157+++ linux-2.6.32.48/drivers/video/backlight/backlight.c 2011-11-15 19:59:43.000000000 -0500
39158@@ -269,7 +269,7 @@ EXPORT_SYMBOL(backlight_force_update);
39159 * ERR_PTR() or a pointer to the newly allocated device.
39160 */
39161 struct backlight_device *backlight_device_register(const char *name,
39162- struct device *parent, void *devdata, struct backlight_ops *ops)
39163+ struct device *parent, void *devdata, const struct backlight_ops *ops)
39164 {
39165 struct backlight_device *new_bd;
39166 int rc;
39167diff -urNp linux-2.6.32.48/drivers/video/backlight/corgi_lcd.c linux-2.6.32.48/drivers/video/backlight/corgi_lcd.c
39168--- linux-2.6.32.48/drivers/video/backlight/corgi_lcd.c 2011-11-08 19:02:43.000000000 -0500
39169+++ linux-2.6.32.48/drivers/video/backlight/corgi_lcd.c 2011-11-15 19:59:43.000000000 -0500
39170@@ -451,7 +451,7 @@ void corgi_lcd_limit_intensity(int limit
39171 }
39172 EXPORT_SYMBOL(corgi_lcd_limit_intensity);
39173
39174-static struct backlight_ops corgi_bl_ops = {
39175+static const struct backlight_ops corgi_bl_ops = {
39176 .get_brightness = corgi_bl_get_intensity,
39177 .update_status = corgi_bl_update_status,
39178 };
39179diff -urNp linux-2.6.32.48/drivers/video/backlight/cr_bllcd.c linux-2.6.32.48/drivers/video/backlight/cr_bllcd.c
39180--- linux-2.6.32.48/drivers/video/backlight/cr_bllcd.c 2011-11-08 19:02:43.000000000 -0500
39181+++ linux-2.6.32.48/drivers/video/backlight/cr_bllcd.c 2011-11-15 19:59:43.000000000 -0500
39182@@ -108,7 +108,7 @@ static int cr_backlight_get_intensity(st
39183 return intensity;
39184 }
39185
39186-static struct backlight_ops cr_backlight_ops = {
39187+static const struct backlight_ops cr_backlight_ops = {
39188 .get_brightness = cr_backlight_get_intensity,
39189 .update_status = cr_backlight_set_intensity,
39190 };
39191diff -urNp linux-2.6.32.48/drivers/video/backlight/da903x_bl.c linux-2.6.32.48/drivers/video/backlight/da903x_bl.c
39192--- linux-2.6.32.48/drivers/video/backlight/da903x_bl.c 2011-11-08 19:02:43.000000000 -0500
39193+++ linux-2.6.32.48/drivers/video/backlight/da903x_bl.c 2011-11-15 19:59:43.000000000 -0500
39194@@ -94,7 +94,7 @@ static int da903x_backlight_get_brightne
39195 return data->current_brightness;
39196 }
39197
39198-static struct backlight_ops da903x_backlight_ops = {
39199+static const struct backlight_ops da903x_backlight_ops = {
39200 .update_status = da903x_backlight_update_status,
39201 .get_brightness = da903x_backlight_get_brightness,
39202 };
39203diff -urNp linux-2.6.32.48/drivers/video/backlight/generic_bl.c linux-2.6.32.48/drivers/video/backlight/generic_bl.c
39204--- linux-2.6.32.48/drivers/video/backlight/generic_bl.c 2011-11-08 19:02:43.000000000 -0500
39205+++ linux-2.6.32.48/drivers/video/backlight/generic_bl.c 2011-11-15 19:59:43.000000000 -0500
39206@@ -70,7 +70,7 @@ void corgibl_limit_intensity(int limit)
39207 }
39208 EXPORT_SYMBOL(corgibl_limit_intensity);
39209
39210-static struct backlight_ops genericbl_ops = {
39211+static const struct backlight_ops genericbl_ops = {
39212 .options = BL_CORE_SUSPENDRESUME,
39213 .get_brightness = genericbl_get_intensity,
39214 .update_status = genericbl_send_intensity,
39215diff -urNp linux-2.6.32.48/drivers/video/backlight/hp680_bl.c linux-2.6.32.48/drivers/video/backlight/hp680_bl.c
39216--- linux-2.6.32.48/drivers/video/backlight/hp680_bl.c 2011-11-08 19:02:43.000000000 -0500
39217+++ linux-2.6.32.48/drivers/video/backlight/hp680_bl.c 2011-11-15 19:59:43.000000000 -0500
39218@@ -98,7 +98,7 @@ static int hp680bl_get_intensity(struct
39219 return current_intensity;
39220 }
39221
39222-static struct backlight_ops hp680bl_ops = {
39223+static const struct backlight_ops hp680bl_ops = {
39224 .get_brightness = hp680bl_get_intensity,
39225 .update_status = hp680bl_set_intensity,
39226 };
39227diff -urNp linux-2.6.32.48/drivers/video/backlight/jornada720_bl.c linux-2.6.32.48/drivers/video/backlight/jornada720_bl.c
39228--- linux-2.6.32.48/drivers/video/backlight/jornada720_bl.c 2011-11-08 19:02:43.000000000 -0500
39229+++ linux-2.6.32.48/drivers/video/backlight/jornada720_bl.c 2011-11-15 19:59:43.000000000 -0500
39230@@ -93,7 +93,7 @@ out:
39231 return ret;
39232 }
39233
39234-static struct backlight_ops jornada_bl_ops = {
39235+static const struct backlight_ops jornada_bl_ops = {
39236 .get_brightness = jornada_bl_get_brightness,
39237 .update_status = jornada_bl_update_status,
39238 .options = BL_CORE_SUSPENDRESUME,
39239diff -urNp linux-2.6.32.48/drivers/video/backlight/kb3886_bl.c linux-2.6.32.48/drivers/video/backlight/kb3886_bl.c
39240--- linux-2.6.32.48/drivers/video/backlight/kb3886_bl.c 2011-11-08 19:02:43.000000000 -0500
39241+++ linux-2.6.32.48/drivers/video/backlight/kb3886_bl.c 2011-11-15 19:59:43.000000000 -0500
39242@@ -134,7 +134,7 @@ static int kb3886bl_get_intensity(struct
39243 return kb3886bl_intensity;
39244 }
39245
39246-static struct backlight_ops kb3886bl_ops = {
39247+static const struct backlight_ops kb3886bl_ops = {
39248 .get_brightness = kb3886bl_get_intensity,
39249 .update_status = kb3886bl_send_intensity,
39250 };
39251diff -urNp linux-2.6.32.48/drivers/video/backlight/locomolcd.c linux-2.6.32.48/drivers/video/backlight/locomolcd.c
39252--- linux-2.6.32.48/drivers/video/backlight/locomolcd.c 2011-11-08 19:02:43.000000000 -0500
39253+++ linux-2.6.32.48/drivers/video/backlight/locomolcd.c 2011-11-15 19:59:43.000000000 -0500
39254@@ -141,7 +141,7 @@ static int locomolcd_get_intensity(struc
39255 return current_intensity;
39256 }
39257
39258-static struct backlight_ops locomobl_data = {
39259+static const struct backlight_ops locomobl_data = {
39260 .get_brightness = locomolcd_get_intensity,
39261 .update_status = locomolcd_set_intensity,
39262 };
39263diff -urNp linux-2.6.32.48/drivers/video/backlight/mbp_nvidia_bl.c linux-2.6.32.48/drivers/video/backlight/mbp_nvidia_bl.c
39264--- linux-2.6.32.48/drivers/video/backlight/mbp_nvidia_bl.c 2011-11-08 19:02:43.000000000 -0500
39265+++ linux-2.6.32.48/drivers/video/backlight/mbp_nvidia_bl.c 2011-11-15 19:59:43.000000000 -0500
39266@@ -33,7 +33,7 @@ struct dmi_match_data {
39267 unsigned long iostart;
39268 unsigned long iolen;
39269 /* Backlight operations structure. */
39270- struct backlight_ops backlight_ops;
39271+ const struct backlight_ops backlight_ops;
39272 };
39273
39274 /* Module parameters. */
39275diff -urNp linux-2.6.32.48/drivers/video/backlight/omap1_bl.c linux-2.6.32.48/drivers/video/backlight/omap1_bl.c
39276--- linux-2.6.32.48/drivers/video/backlight/omap1_bl.c 2011-11-08 19:02:43.000000000 -0500
39277+++ linux-2.6.32.48/drivers/video/backlight/omap1_bl.c 2011-11-15 19:59:43.000000000 -0500
39278@@ -125,7 +125,7 @@ static int omapbl_get_intensity(struct b
39279 return bl->current_intensity;
39280 }
39281
39282-static struct backlight_ops omapbl_ops = {
39283+static const struct backlight_ops omapbl_ops = {
39284 .get_brightness = omapbl_get_intensity,
39285 .update_status = omapbl_update_status,
39286 };
39287diff -urNp linux-2.6.32.48/drivers/video/backlight/progear_bl.c linux-2.6.32.48/drivers/video/backlight/progear_bl.c
39288--- linux-2.6.32.48/drivers/video/backlight/progear_bl.c 2011-11-08 19:02:43.000000000 -0500
39289+++ linux-2.6.32.48/drivers/video/backlight/progear_bl.c 2011-11-15 19:59:43.000000000 -0500
39290@@ -54,7 +54,7 @@ static int progearbl_get_intensity(struc
39291 return intensity - HW_LEVEL_MIN;
39292 }
39293
39294-static struct backlight_ops progearbl_ops = {
39295+static const struct backlight_ops progearbl_ops = {
39296 .get_brightness = progearbl_get_intensity,
39297 .update_status = progearbl_set_intensity,
39298 };
39299diff -urNp linux-2.6.32.48/drivers/video/backlight/pwm_bl.c linux-2.6.32.48/drivers/video/backlight/pwm_bl.c
39300--- linux-2.6.32.48/drivers/video/backlight/pwm_bl.c 2011-11-08 19:02:43.000000000 -0500
39301+++ linux-2.6.32.48/drivers/video/backlight/pwm_bl.c 2011-11-15 19:59:43.000000000 -0500
39302@@ -56,7 +56,7 @@ static int pwm_backlight_get_brightness(
39303 return bl->props.brightness;
39304 }
39305
39306-static struct backlight_ops pwm_backlight_ops = {
39307+static const struct backlight_ops pwm_backlight_ops = {
39308 .update_status = pwm_backlight_update_status,
39309 .get_brightness = pwm_backlight_get_brightness,
39310 };
39311diff -urNp linux-2.6.32.48/drivers/video/backlight/tosa_bl.c linux-2.6.32.48/drivers/video/backlight/tosa_bl.c
39312--- linux-2.6.32.48/drivers/video/backlight/tosa_bl.c 2011-11-08 19:02:43.000000000 -0500
39313+++ linux-2.6.32.48/drivers/video/backlight/tosa_bl.c 2011-11-15 19:59:43.000000000 -0500
39314@@ -72,7 +72,7 @@ static int tosa_bl_get_brightness(struct
39315 return props->brightness;
39316 }
39317
39318-static struct backlight_ops bl_ops = {
39319+static const struct backlight_ops bl_ops = {
39320 .get_brightness = tosa_bl_get_brightness,
39321 .update_status = tosa_bl_update_status,
39322 };
39323diff -urNp linux-2.6.32.48/drivers/video/backlight/wm831x_bl.c linux-2.6.32.48/drivers/video/backlight/wm831x_bl.c
39324--- linux-2.6.32.48/drivers/video/backlight/wm831x_bl.c 2011-11-08 19:02:43.000000000 -0500
39325+++ linux-2.6.32.48/drivers/video/backlight/wm831x_bl.c 2011-11-15 19:59:43.000000000 -0500
39326@@ -112,7 +112,7 @@ static int wm831x_backlight_get_brightne
39327 return data->current_brightness;
39328 }
39329
39330-static struct backlight_ops wm831x_backlight_ops = {
39331+static const struct backlight_ops wm831x_backlight_ops = {
39332 .options = BL_CORE_SUSPENDRESUME,
39333 .update_status = wm831x_backlight_update_status,
39334 .get_brightness = wm831x_backlight_get_brightness,
39335diff -urNp linux-2.6.32.48/drivers/video/bf54x-lq043fb.c linux-2.6.32.48/drivers/video/bf54x-lq043fb.c
39336--- linux-2.6.32.48/drivers/video/bf54x-lq043fb.c 2011-11-08 19:02:43.000000000 -0500
39337+++ linux-2.6.32.48/drivers/video/bf54x-lq043fb.c 2011-11-15 19:59:43.000000000 -0500
39338@@ -463,7 +463,7 @@ static int bl_get_brightness(struct back
39339 return 0;
39340 }
39341
39342-static struct backlight_ops bfin_lq043fb_bl_ops = {
39343+static const struct backlight_ops bfin_lq043fb_bl_ops = {
39344 .get_brightness = bl_get_brightness,
39345 };
39346
39347diff -urNp linux-2.6.32.48/drivers/video/bfin-t350mcqb-fb.c linux-2.6.32.48/drivers/video/bfin-t350mcqb-fb.c
39348--- linux-2.6.32.48/drivers/video/bfin-t350mcqb-fb.c 2011-11-08 19:02:43.000000000 -0500
39349+++ linux-2.6.32.48/drivers/video/bfin-t350mcqb-fb.c 2011-11-15 19:59:43.000000000 -0500
39350@@ -381,7 +381,7 @@ static int bl_get_brightness(struct back
39351 return 0;
39352 }
39353
39354-static struct backlight_ops bfin_lq043fb_bl_ops = {
39355+static const struct backlight_ops bfin_lq043fb_bl_ops = {
39356 .get_brightness = bl_get_brightness,
39357 };
39358
39359diff -urNp linux-2.6.32.48/drivers/video/fbcmap.c linux-2.6.32.48/drivers/video/fbcmap.c
39360--- linux-2.6.32.48/drivers/video/fbcmap.c 2011-11-08 19:02:43.000000000 -0500
39361+++ linux-2.6.32.48/drivers/video/fbcmap.c 2011-11-15 19:59:43.000000000 -0500
39362@@ -266,8 +266,7 @@ int fb_set_user_cmap(struct fb_cmap_user
39363 rc = -ENODEV;
39364 goto out;
39365 }
39366- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
39367- !info->fbops->fb_setcmap)) {
39368+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
39369 rc = -EINVAL;
39370 goto out1;
39371 }
39372diff -urNp linux-2.6.32.48/drivers/video/fbmem.c linux-2.6.32.48/drivers/video/fbmem.c
39373--- linux-2.6.32.48/drivers/video/fbmem.c 2011-11-08 19:02:43.000000000 -0500
39374+++ linux-2.6.32.48/drivers/video/fbmem.c 2011-11-15 19:59:43.000000000 -0500
39375@@ -403,7 +403,7 @@ static void fb_do_show_logo(struct fb_in
39376 image->dx += image->width + 8;
39377 }
39378 } else if (rotate == FB_ROTATE_UD) {
39379- for (x = 0; x < num && image->dx >= 0; x++) {
39380+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
39381 info->fbops->fb_imageblit(info, image);
39382 image->dx -= image->width + 8;
39383 }
39384@@ -415,7 +415,7 @@ static void fb_do_show_logo(struct fb_in
39385 image->dy += image->height + 8;
39386 }
39387 } else if (rotate == FB_ROTATE_CCW) {
39388- for (x = 0; x < num && image->dy >= 0; x++) {
39389+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
39390 info->fbops->fb_imageblit(info, image);
39391 image->dy -= image->height + 8;
39392 }
39393@@ -915,6 +915,8 @@ fb_set_var(struct fb_info *info, struct
39394 int flags = info->flags;
39395 int ret = 0;
39396
39397+ pax_track_stack();
39398+
39399 if (var->activate & FB_ACTIVATE_INV_MODE) {
39400 struct fb_videomode mode1, mode2;
39401
39402@@ -1040,6 +1042,8 @@ static long do_fb_ioctl(struct fb_info *
39403 void __user *argp = (void __user *)arg;
39404 long ret = 0;
39405
39406+ pax_track_stack();
39407+
39408 switch (cmd) {
39409 case FBIOGET_VSCREENINFO:
39410 if (!lock_fb_info(info))
39411@@ -1119,7 +1123,7 @@ static long do_fb_ioctl(struct fb_info *
39412 return -EFAULT;
39413 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
39414 return -EINVAL;
39415- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
39416+ if (con2fb.framebuffer >= FB_MAX)
39417 return -EINVAL;
39418 if (!registered_fb[con2fb.framebuffer])
39419 request_module("fb%d", con2fb.framebuffer);
39420diff -urNp linux-2.6.32.48/drivers/video/i810/i810_accel.c linux-2.6.32.48/drivers/video/i810/i810_accel.c
39421--- linux-2.6.32.48/drivers/video/i810/i810_accel.c 2011-11-08 19:02:43.000000000 -0500
39422+++ linux-2.6.32.48/drivers/video/i810/i810_accel.c 2011-11-15 19:59:43.000000000 -0500
39423@@ -73,6 +73,7 @@ static inline int wait_for_space(struct
39424 }
39425 }
39426 printk("ringbuffer lockup!!!\n");
39427+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
39428 i810_report_error(mmio);
39429 par->dev_flags |= LOCKUP;
39430 info->pixmap.scan_align = 1;
39431diff -urNp linux-2.6.32.48/drivers/video/logo/logo_linux_clut224.ppm linux-2.6.32.48/drivers/video/logo/logo_linux_clut224.ppm
39432--- linux-2.6.32.48/drivers/video/logo/logo_linux_clut224.ppm 2011-11-08 19:02:43.000000000 -0500
39433+++ linux-2.6.32.48/drivers/video/logo/logo_linux_clut224.ppm 2011-11-15 19:59:43.000000000 -0500
39434@@ -1,1604 +1,1123 @@
39435 P3
39436-# Standard 224-color Linux logo
39437 80 80
39438 255
39439- 0 0 0 0 0 0 0 0 0 0 0 0
39440- 0 0 0 0 0 0 0 0 0 0 0 0
39441- 0 0 0 0 0 0 0 0 0 0 0 0
39442- 0 0 0 0 0 0 0 0 0 0 0 0
39443- 0 0 0 0 0 0 0 0 0 0 0 0
39444- 0 0 0 0 0 0 0 0 0 0 0 0
39445- 0 0 0 0 0 0 0 0 0 0 0 0
39446- 0 0 0 0 0 0 0 0 0 0 0 0
39447- 0 0 0 0 0 0 0 0 0 0 0 0
39448- 6 6 6 6 6 6 10 10 10 10 10 10
39449- 10 10 10 6 6 6 6 6 6 6 6 6
39450- 0 0 0 0 0 0 0 0 0 0 0 0
39451- 0 0 0 0 0 0 0 0 0 0 0 0
39452- 0 0 0 0 0 0 0 0 0 0 0 0
39453- 0 0 0 0 0 0 0 0 0 0 0 0
39454- 0 0 0 0 0 0 0 0 0 0 0 0
39455- 0 0 0 0 0 0 0 0 0 0 0 0
39456- 0 0 0 0 0 0 0 0 0 0 0 0
39457- 0 0 0 0 0 0 0 0 0 0 0 0
39458- 0 0 0 0 0 0 0 0 0 0 0 0
39459- 0 0 0 0 0 0 0 0 0 0 0 0
39460- 0 0 0 0 0 0 0 0 0 0 0 0
39461- 0 0 0 0 0 0 0 0 0 0 0 0
39462- 0 0 0 0 0 0 0 0 0 0 0 0
39463- 0 0 0 0 0 0 0 0 0 0 0 0
39464- 0 0 0 0 0 0 0 0 0 0 0 0
39465- 0 0 0 0 0 0 0 0 0 0 0 0
39466- 0 0 0 0 0 0 0 0 0 0 0 0
39467- 0 0 0 6 6 6 10 10 10 14 14 14
39468- 22 22 22 26 26 26 30 30 30 34 34 34
39469- 30 30 30 30 30 30 26 26 26 18 18 18
39470- 14 14 14 10 10 10 6 6 6 0 0 0
39471- 0 0 0 0 0 0 0 0 0 0 0 0
39472- 0 0 0 0 0 0 0 0 0 0 0 0
39473- 0 0 0 0 0 0 0 0 0 0 0 0
39474- 0 0 0 0 0 0 0 0 0 0 0 0
39475- 0 0 0 0 0 0 0 0 0 0 0 0
39476- 0 0 0 0 0 0 0 0 0 0 0 0
39477- 0 0 0 0 0 0 0 0 0 0 0 0
39478- 0 0 0 0 0 0 0 0 0 0 0 0
39479- 0 0 0 0 0 0 0 0 0 0 0 0
39480- 0 0 0 0 0 1 0 0 1 0 0 0
39481- 0 0 0 0 0 0 0 0 0 0 0 0
39482- 0 0 0 0 0 0 0 0 0 0 0 0
39483- 0 0 0 0 0 0 0 0 0 0 0 0
39484- 0 0 0 0 0 0 0 0 0 0 0 0
39485- 0 0 0 0 0 0 0 0 0 0 0 0
39486- 0 0 0 0 0 0 0 0 0 0 0 0
39487- 6 6 6 14 14 14 26 26 26 42 42 42
39488- 54 54 54 66 66 66 78 78 78 78 78 78
39489- 78 78 78 74 74 74 66 66 66 54 54 54
39490- 42 42 42 26 26 26 18 18 18 10 10 10
39491- 6 6 6 0 0 0 0 0 0 0 0 0
39492- 0 0 0 0 0 0 0 0 0 0 0 0
39493- 0 0 0 0 0 0 0 0 0 0 0 0
39494- 0 0 0 0 0 0 0 0 0 0 0 0
39495- 0 0 0 0 0 0 0 0 0 0 0 0
39496- 0 0 0 0 0 0 0 0 0 0 0 0
39497- 0 0 0 0 0 0 0 0 0 0 0 0
39498- 0 0 0 0 0 0 0 0 0 0 0 0
39499- 0 0 0 0 0 0 0 0 0 0 0 0
39500- 0 0 1 0 0 0 0 0 0 0 0 0
39501- 0 0 0 0 0 0 0 0 0 0 0 0
39502- 0 0 0 0 0 0 0 0 0 0 0 0
39503- 0 0 0 0 0 0 0 0 0 0 0 0
39504- 0 0 0 0 0 0 0 0 0 0 0 0
39505- 0 0 0 0 0 0 0 0 0 0 0 0
39506- 0 0 0 0 0 0 0 0 0 10 10 10
39507- 22 22 22 42 42 42 66 66 66 86 86 86
39508- 66 66 66 38 38 38 38 38 38 22 22 22
39509- 26 26 26 34 34 34 54 54 54 66 66 66
39510- 86 86 86 70 70 70 46 46 46 26 26 26
39511- 14 14 14 6 6 6 0 0 0 0 0 0
39512- 0 0 0 0 0 0 0 0 0 0 0 0
39513- 0 0 0 0 0 0 0 0 0 0 0 0
39514- 0 0 0 0 0 0 0 0 0 0 0 0
39515- 0 0 0 0 0 0 0 0 0 0 0 0
39516- 0 0 0 0 0 0 0 0 0 0 0 0
39517- 0 0 0 0 0 0 0 0 0 0 0 0
39518- 0 0 0 0 0 0 0 0 0 0 0 0
39519- 0 0 0 0 0 0 0 0 0 0 0 0
39520- 0 0 1 0 0 1 0 0 1 0 0 0
39521- 0 0 0 0 0 0 0 0 0 0 0 0
39522- 0 0 0 0 0 0 0 0 0 0 0 0
39523- 0 0 0 0 0 0 0 0 0 0 0 0
39524- 0 0 0 0 0 0 0 0 0 0 0 0
39525- 0 0 0 0 0 0 0 0 0 0 0 0
39526- 0 0 0 0 0 0 10 10 10 26 26 26
39527- 50 50 50 82 82 82 58 58 58 6 6 6
39528- 2 2 6 2 2 6 2 2 6 2 2 6
39529- 2 2 6 2 2 6 2 2 6 2 2 6
39530- 6 6 6 54 54 54 86 86 86 66 66 66
39531- 38 38 38 18 18 18 6 6 6 0 0 0
39532- 0 0 0 0 0 0 0 0 0 0 0 0
39533- 0 0 0 0 0 0 0 0 0 0 0 0
39534- 0 0 0 0 0 0 0 0 0 0 0 0
39535- 0 0 0 0 0 0 0 0 0 0 0 0
39536- 0 0 0 0 0 0 0 0 0 0 0 0
39537- 0 0 0 0 0 0 0 0 0 0 0 0
39538- 0 0 0 0 0 0 0 0 0 0 0 0
39539- 0 0 0 0 0 0 0 0 0 0 0 0
39540- 0 0 0 0 0 0 0 0 0 0 0 0
39541- 0 0 0 0 0 0 0 0 0 0 0 0
39542- 0 0 0 0 0 0 0 0 0 0 0 0
39543- 0 0 0 0 0 0 0 0 0 0 0 0
39544- 0 0 0 0 0 0 0 0 0 0 0 0
39545- 0 0 0 0 0 0 0 0 0 0 0 0
39546- 0 0 0 6 6 6 22 22 22 50 50 50
39547- 78 78 78 34 34 34 2 2 6 2 2 6
39548- 2 2 6 2 2 6 2 2 6 2 2 6
39549- 2 2 6 2 2 6 2 2 6 2 2 6
39550- 2 2 6 2 2 6 6 6 6 70 70 70
39551- 78 78 78 46 46 46 22 22 22 6 6 6
39552- 0 0 0 0 0 0 0 0 0 0 0 0
39553- 0 0 0 0 0 0 0 0 0 0 0 0
39554- 0 0 0 0 0 0 0 0 0 0 0 0
39555- 0 0 0 0 0 0 0 0 0 0 0 0
39556- 0 0 0 0 0 0 0 0 0 0 0 0
39557- 0 0 0 0 0 0 0 0 0 0 0 0
39558- 0 0 0 0 0 0 0 0 0 0 0 0
39559- 0 0 0 0 0 0 0 0 0 0 0 0
39560- 0 0 1 0 0 1 0 0 1 0 0 0
39561- 0 0 0 0 0 0 0 0 0 0 0 0
39562- 0 0 0 0 0 0 0 0 0 0 0 0
39563- 0 0 0 0 0 0 0 0 0 0 0 0
39564- 0 0 0 0 0 0 0 0 0 0 0 0
39565- 0 0 0 0 0 0 0 0 0 0 0 0
39566- 6 6 6 18 18 18 42 42 42 82 82 82
39567- 26 26 26 2 2 6 2 2 6 2 2 6
39568- 2 2 6 2 2 6 2 2 6 2 2 6
39569- 2 2 6 2 2 6 2 2 6 14 14 14
39570- 46 46 46 34 34 34 6 6 6 2 2 6
39571- 42 42 42 78 78 78 42 42 42 18 18 18
39572- 6 6 6 0 0 0 0 0 0 0 0 0
39573- 0 0 0 0 0 0 0 0 0 0 0 0
39574- 0 0 0 0 0 0 0 0 0 0 0 0
39575- 0 0 0 0 0 0 0 0 0 0 0 0
39576- 0 0 0 0 0 0 0 0 0 0 0 0
39577- 0 0 0 0 0 0 0 0 0 0 0 0
39578- 0 0 0 0 0 0 0 0 0 0 0 0
39579- 0 0 0 0 0 0 0 0 0 0 0 0
39580- 0 0 1 0 0 0 0 0 1 0 0 0
39581- 0 0 0 0 0 0 0 0 0 0 0 0
39582- 0 0 0 0 0 0 0 0 0 0 0 0
39583- 0 0 0 0 0 0 0 0 0 0 0 0
39584- 0 0 0 0 0 0 0 0 0 0 0 0
39585- 0 0 0 0 0 0 0 0 0 0 0 0
39586- 10 10 10 30 30 30 66 66 66 58 58 58
39587- 2 2 6 2 2 6 2 2 6 2 2 6
39588- 2 2 6 2 2 6 2 2 6 2 2 6
39589- 2 2 6 2 2 6 2 2 6 26 26 26
39590- 86 86 86 101 101 101 46 46 46 10 10 10
39591- 2 2 6 58 58 58 70 70 70 34 34 34
39592- 10 10 10 0 0 0 0 0 0 0 0 0
39593- 0 0 0 0 0 0 0 0 0 0 0 0
39594- 0 0 0 0 0 0 0 0 0 0 0 0
39595- 0 0 0 0 0 0 0 0 0 0 0 0
39596- 0 0 0 0 0 0 0 0 0 0 0 0
39597- 0 0 0 0 0 0 0 0 0 0 0 0
39598- 0 0 0 0 0 0 0 0 0 0 0 0
39599- 0 0 0 0 0 0 0 0 0 0 0 0
39600- 0 0 1 0 0 1 0 0 1 0 0 0
39601- 0 0 0 0 0 0 0 0 0 0 0 0
39602- 0 0 0 0 0 0 0 0 0 0 0 0
39603- 0 0 0 0 0 0 0 0 0 0 0 0
39604- 0 0 0 0 0 0 0 0 0 0 0 0
39605- 0 0 0 0 0 0 0 0 0 0 0 0
39606- 14 14 14 42 42 42 86 86 86 10 10 10
39607- 2 2 6 2 2 6 2 2 6 2 2 6
39608- 2 2 6 2 2 6 2 2 6 2 2 6
39609- 2 2 6 2 2 6 2 2 6 30 30 30
39610- 94 94 94 94 94 94 58 58 58 26 26 26
39611- 2 2 6 6 6 6 78 78 78 54 54 54
39612- 22 22 22 6 6 6 0 0 0 0 0 0
39613- 0 0 0 0 0 0 0 0 0 0 0 0
39614- 0 0 0 0 0 0 0 0 0 0 0 0
39615- 0 0 0 0 0 0 0 0 0 0 0 0
39616- 0 0 0 0 0 0 0 0 0 0 0 0
39617- 0 0 0 0 0 0 0 0 0 0 0 0
39618- 0 0 0 0 0 0 0 0 0 0 0 0
39619- 0 0 0 0 0 0 0 0 0 0 0 0
39620- 0 0 0 0 0 0 0 0 0 0 0 0
39621- 0 0 0 0 0 0 0 0 0 0 0 0
39622- 0 0 0 0 0 0 0 0 0 0 0 0
39623- 0 0 0 0 0 0 0 0 0 0 0 0
39624- 0 0 0 0 0 0 0 0 0 0 0 0
39625- 0 0 0 0 0 0 0 0 0 6 6 6
39626- 22 22 22 62 62 62 62 62 62 2 2 6
39627- 2 2 6 2 2 6 2 2 6 2 2 6
39628- 2 2 6 2 2 6 2 2 6 2 2 6
39629- 2 2 6 2 2 6 2 2 6 26 26 26
39630- 54 54 54 38 38 38 18 18 18 10 10 10
39631- 2 2 6 2 2 6 34 34 34 82 82 82
39632- 38 38 38 14 14 14 0 0 0 0 0 0
39633- 0 0 0 0 0 0 0 0 0 0 0 0
39634- 0 0 0 0 0 0 0 0 0 0 0 0
39635- 0 0 0 0 0 0 0 0 0 0 0 0
39636- 0 0 0 0 0 0 0 0 0 0 0 0
39637- 0 0 0 0 0 0 0 0 0 0 0 0
39638- 0 0 0 0 0 0 0 0 0 0 0 0
39639- 0 0 0 0 0 0 0 0 0 0 0 0
39640- 0 0 0 0 0 1 0 0 1 0 0 0
39641- 0 0 0 0 0 0 0 0 0 0 0 0
39642- 0 0 0 0 0 0 0 0 0 0 0 0
39643- 0 0 0 0 0 0 0 0 0 0 0 0
39644- 0 0 0 0 0 0 0 0 0 0 0 0
39645- 0 0 0 0 0 0 0 0 0 6 6 6
39646- 30 30 30 78 78 78 30 30 30 2 2 6
39647- 2 2 6 2 2 6 2 2 6 2 2 6
39648- 2 2 6 2 2 6 2 2 6 2 2 6
39649- 2 2 6 2 2 6 2 2 6 10 10 10
39650- 10 10 10 2 2 6 2 2 6 2 2 6
39651- 2 2 6 2 2 6 2 2 6 78 78 78
39652- 50 50 50 18 18 18 6 6 6 0 0 0
39653- 0 0 0 0 0 0 0 0 0 0 0 0
39654- 0 0 0 0 0 0 0 0 0 0 0 0
39655- 0 0 0 0 0 0 0 0 0 0 0 0
39656- 0 0 0 0 0 0 0 0 0 0 0 0
39657- 0 0 0 0 0 0 0 0 0 0 0 0
39658- 0 0 0 0 0 0 0 0 0 0 0 0
39659- 0 0 0 0 0 0 0 0 0 0 0 0
39660- 0 0 1 0 0 0 0 0 0 0 0 0
39661- 0 0 0 0 0 0 0 0 0 0 0 0
39662- 0 0 0 0 0 0 0 0 0 0 0 0
39663- 0 0 0 0 0 0 0 0 0 0 0 0
39664- 0 0 0 0 0 0 0 0 0 0 0 0
39665- 0 0 0 0 0 0 0 0 0 10 10 10
39666- 38 38 38 86 86 86 14 14 14 2 2 6
39667- 2 2 6 2 2 6 2 2 6 2 2 6
39668- 2 2 6 2 2 6 2 2 6 2 2 6
39669- 2 2 6 2 2 6 2 2 6 2 2 6
39670- 2 2 6 2 2 6 2 2 6 2 2 6
39671- 2 2 6 2 2 6 2 2 6 54 54 54
39672- 66 66 66 26 26 26 6 6 6 0 0 0
39673- 0 0 0 0 0 0 0 0 0 0 0 0
39674- 0 0 0 0 0 0 0 0 0 0 0 0
39675- 0 0 0 0 0 0 0 0 0 0 0 0
39676- 0 0 0 0 0 0 0 0 0 0 0 0
39677- 0 0 0 0 0 0 0 0 0 0 0 0
39678- 0 0 0 0 0 0 0 0 0 0 0 0
39679- 0 0 0 0 0 0 0 0 0 0 0 0
39680- 0 0 0 0 0 1 0 0 1 0 0 0
39681- 0 0 0 0 0 0 0 0 0 0 0 0
39682- 0 0 0 0 0 0 0 0 0 0 0 0
39683- 0 0 0 0 0 0 0 0 0 0 0 0
39684- 0 0 0 0 0 0 0 0 0 0 0 0
39685- 0 0 0 0 0 0 0 0 0 14 14 14
39686- 42 42 42 82 82 82 2 2 6 2 2 6
39687- 2 2 6 6 6 6 10 10 10 2 2 6
39688- 2 2 6 2 2 6 2 2 6 2 2 6
39689- 2 2 6 2 2 6 2 2 6 6 6 6
39690- 14 14 14 10 10 10 2 2 6 2 2 6
39691- 2 2 6 2 2 6 2 2 6 18 18 18
39692- 82 82 82 34 34 34 10 10 10 0 0 0
39693- 0 0 0 0 0 0 0 0 0 0 0 0
39694- 0 0 0 0 0 0 0 0 0 0 0 0
39695- 0 0 0 0 0 0 0 0 0 0 0 0
39696- 0 0 0 0 0 0 0 0 0 0 0 0
39697- 0 0 0 0 0 0 0 0 0 0 0 0
39698- 0 0 0 0 0 0 0 0 0 0 0 0
39699- 0 0 0 0 0 0 0 0 0 0 0 0
39700- 0 0 1 0 0 0 0 0 0 0 0 0
39701- 0 0 0 0 0 0 0 0 0 0 0 0
39702- 0 0 0 0 0 0 0 0 0 0 0 0
39703- 0 0 0 0 0 0 0 0 0 0 0 0
39704- 0 0 0 0 0 0 0 0 0 0 0 0
39705- 0 0 0 0 0 0 0 0 0 14 14 14
39706- 46 46 46 86 86 86 2 2 6 2 2 6
39707- 6 6 6 6 6 6 22 22 22 34 34 34
39708- 6 6 6 2 2 6 2 2 6 2 2 6
39709- 2 2 6 2 2 6 18 18 18 34 34 34
39710- 10 10 10 50 50 50 22 22 22 2 2 6
39711- 2 2 6 2 2 6 2 2 6 10 10 10
39712- 86 86 86 42 42 42 14 14 14 0 0 0
39713- 0 0 0 0 0 0 0 0 0 0 0 0
39714- 0 0 0 0 0 0 0 0 0 0 0 0
39715- 0 0 0 0 0 0 0 0 0 0 0 0
39716- 0 0 0 0 0 0 0 0 0 0 0 0
39717- 0 0 0 0 0 0 0 0 0 0 0 0
39718- 0 0 0 0 0 0 0 0 0 0 0 0
39719- 0 0 0 0 0 0 0 0 0 0 0 0
39720- 0 0 1 0 0 1 0 0 1 0 0 0
39721- 0 0 0 0 0 0 0 0 0 0 0 0
39722- 0 0 0 0 0 0 0 0 0 0 0 0
39723- 0 0 0 0 0 0 0 0 0 0 0 0
39724- 0 0 0 0 0 0 0 0 0 0 0 0
39725- 0 0 0 0 0 0 0 0 0 14 14 14
39726- 46 46 46 86 86 86 2 2 6 2 2 6
39727- 38 38 38 116 116 116 94 94 94 22 22 22
39728- 22 22 22 2 2 6 2 2 6 2 2 6
39729- 14 14 14 86 86 86 138 138 138 162 162 162
39730-154 154 154 38 38 38 26 26 26 6 6 6
39731- 2 2 6 2 2 6 2 2 6 2 2 6
39732- 86 86 86 46 46 46 14 14 14 0 0 0
39733- 0 0 0 0 0 0 0 0 0 0 0 0
39734- 0 0 0 0 0 0 0 0 0 0 0 0
39735- 0 0 0 0 0 0 0 0 0 0 0 0
39736- 0 0 0 0 0 0 0 0 0 0 0 0
39737- 0 0 0 0 0 0 0 0 0 0 0 0
39738- 0 0 0 0 0 0 0 0 0 0 0 0
39739- 0 0 0 0 0 0 0 0 0 0 0 0
39740- 0 0 0 0 0 0 0 0 0 0 0 0
39741- 0 0 0 0 0 0 0 0 0 0 0 0
39742- 0 0 0 0 0 0 0 0 0 0 0 0
39743- 0 0 0 0 0 0 0 0 0 0 0 0
39744- 0 0 0 0 0 0 0 0 0 0 0 0
39745- 0 0 0 0 0 0 0 0 0 14 14 14
39746- 46 46 46 86 86 86 2 2 6 14 14 14
39747-134 134 134 198 198 198 195 195 195 116 116 116
39748- 10 10 10 2 2 6 2 2 6 6 6 6
39749-101 98 89 187 187 187 210 210 210 218 218 218
39750-214 214 214 134 134 134 14 14 14 6 6 6
39751- 2 2 6 2 2 6 2 2 6 2 2 6
39752- 86 86 86 50 50 50 18 18 18 6 6 6
39753- 0 0 0 0 0 0 0 0 0 0 0 0
39754- 0 0 0 0 0 0 0 0 0 0 0 0
39755- 0 0 0 0 0 0 0 0 0 0 0 0
39756- 0 0 0 0 0 0 0 0 0 0 0 0
39757- 0 0 0 0 0 0 0 0 0 0 0 0
39758- 0 0 0 0 0 0 0 0 0 0 0 0
39759- 0 0 0 0 0 0 0 0 1 0 0 0
39760- 0 0 1 0 0 1 0 0 1 0 0 0
39761- 0 0 0 0 0 0 0 0 0 0 0 0
39762- 0 0 0 0 0 0 0 0 0 0 0 0
39763- 0 0 0 0 0 0 0 0 0 0 0 0
39764- 0 0 0 0 0 0 0 0 0 0 0 0
39765- 0 0 0 0 0 0 0 0 0 14 14 14
39766- 46 46 46 86 86 86 2 2 6 54 54 54
39767-218 218 218 195 195 195 226 226 226 246 246 246
39768- 58 58 58 2 2 6 2 2 6 30 30 30
39769-210 210 210 253 253 253 174 174 174 123 123 123
39770-221 221 221 234 234 234 74 74 74 2 2 6
39771- 2 2 6 2 2 6 2 2 6 2 2 6
39772- 70 70 70 58 58 58 22 22 22 6 6 6
39773- 0 0 0 0 0 0 0 0 0 0 0 0
39774- 0 0 0 0 0 0 0 0 0 0 0 0
39775- 0 0 0 0 0 0 0 0 0 0 0 0
39776- 0 0 0 0 0 0 0 0 0 0 0 0
39777- 0 0 0 0 0 0 0 0 0 0 0 0
39778- 0 0 0 0 0 0 0 0 0 0 0 0
39779- 0 0 0 0 0 0 0 0 0 0 0 0
39780- 0 0 0 0 0 0 0 0 0 0 0 0
39781- 0 0 0 0 0 0 0 0 0 0 0 0
39782- 0 0 0 0 0 0 0 0 0 0 0 0
39783- 0 0 0 0 0 0 0 0 0 0 0 0
39784- 0 0 0 0 0 0 0 0 0 0 0 0
39785- 0 0 0 0 0 0 0 0 0 14 14 14
39786- 46 46 46 82 82 82 2 2 6 106 106 106
39787-170 170 170 26 26 26 86 86 86 226 226 226
39788-123 123 123 10 10 10 14 14 14 46 46 46
39789-231 231 231 190 190 190 6 6 6 70 70 70
39790- 90 90 90 238 238 238 158 158 158 2 2 6
39791- 2 2 6 2 2 6 2 2 6 2 2 6
39792- 70 70 70 58 58 58 22 22 22 6 6 6
39793- 0 0 0 0 0 0 0 0 0 0 0 0
39794- 0 0 0 0 0 0 0 0 0 0 0 0
39795- 0 0 0 0 0 0 0 0 0 0 0 0
39796- 0 0 0 0 0 0 0 0 0 0 0 0
39797- 0 0 0 0 0 0 0 0 0 0 0 0
39798- 0 0 0 0 0 0 0 0 0 0 0 0
39799- 0 0 0 0 0 0 0 0 1 0 0 0
39800- 0 0 1 0 0 1 0 0 1 0 0 0
39801- 0 0 0 0 0 0 0 0 0 0 0 0
39802- 0 0 0 0 0 0 0 0 0 0 0 0
39803- 0 0 0 0 0 0 0 0 0 0 0 0
39804- 0 0 0 0 0 0 0 0 0 0 0 0
39805- 0 0 0 0 0 0 0 0 0 14 14 14
39806- 42 42 42 86 86 86 6 6 6 116 116 116
39807-106 106 106 6 6 6 70 70 70 149 149 149
39808-128 128 128 18 18 18 38 38 38 54 54 54
39809-221 221 221 106 106 106 2 2 6 14 14 14
39810- 46 46 46 190 190 190 198 198 198 2 2 6
39811- 2 2 6 2 2 6 2 2 6 2 2 6
39812- 74 74 74 62 62 62 22 22 22 6 6 6
39813- 0 0 0 0 0 0 0 0 0 0 0 0
39814- 0 0 0 0 0 0 0 0 0 0 0 0
39815- 0 0 0 0 0 0 0 0 0 0 0 0
39816- 0 0 0 0 0 0 0 0 0 0 0 0
39817- 0 0 0 0 0 0 0 0 0 0 0 0
39818- 0 0 0 0 0 0 0 0 0 0 0 0
39819- 0 0 0 0 0 0 0 0 1 0 0 0
39820- 0 0 1 0 0 0 0 0 1 0 0 0
39821- 0 0 0 0 0 0 0 0 0 0 0 0
39822- 0 0 0 0 0 0 0 0 0 0 0 0
39823- 0 0 0 0 0 0 0 0 0 0 0 0
39824- 0 0 0 0 0 0 0 0 0 0 0 0
39825- 0 0 0 0 0 0 0 0 0 14 14 14
39826- 42 42 42 94 94 94 14 14 14 101 101 101
39827-128 128 128 2 2 6 18 18 18 116 116 116
39828-118 98 46 121 92 8 121 92 8 98 78 10
39829-162 162 162 106 106 106 2 2 6 2 2 6
39830- 2 2 6 195 195 195 195 195 195 6 6 6
39831- 2 2 6 2 2 6 2 2 6 2 2 6
39832- 74 74 74 62 62 62 22 22 22 6 6 6
39833- 0 0 0 0 0 0 0 0 0 0 0 0
39834- 0 0 0 0 0 0 0 0 0 0 0 0
39835- 0 0 0 0 0 0 0 0 0 0 0 0
39836- 0 0 0 0 0 0 0 0 0 0 0 0
39837- 0 0 0 0 0 0 0 0 0 0 0 0
39838- 0 0 0 0 0 0 0 0 0 0 0 0
39839- 0 0 0 0 0 0 0 0 1 0 0 1
39840- 0 0 1 0 0 0 0 0 1 0 0 0
39841- 0 0 0 0 0 0 0 0 0 0 0 0
39842- 0 0 0 0 0 0 0 0 0 0 0 0
39843- 0 0 0 0 0 0 0 0 0 0 0 0
39844- 0 0 0 0 0 0 0 0 0 0 0 0
39845- 0 0 0 0 0 0 0 0 0 10 10 10
39846- 38 38 38 90 90 90 14 14 14 58 58 58
39847-210 210 210 26 26 26 54 38 6 154 114 10
39848-226 170 11 236 186 11 225 175 15 184 144 12
39849-215 174 15 175 146 61 37 26 9 2 2 6
39850- 70 70 70 246 246 246 138 138 138 2 2 6
39851- 2 2 6 2 2 6 2 2 6 2 2 6
39852- 70 70 70 66 66 66 26 26 26 6 6 6
39853- 0 0 0 0 0 0 0 0 0 0 0 0
39854- 0 0 0 0 0 0 0 0 0 0 0 0
39855- 0 0 0 0 0 0 0 0 0 0 0 0
39856- 0 0 0 0 0 0 0 0 0 0 0 0
39857- 0 0 0 0 0 0 0 0 0 0 0 0
39858- 0 0 0 0 0 0 0 0 0 0 0 0
39859- 0 0 0 0 0 0 0 0 0 0 0 0
39860- 0 0 0 0 0 0 0 0 0 0 0 0
39861- 0 0 0 0 0 0 0 0 0 0 0 0
39862- 0 0 0 0 0 0 0 0 0 0 0 0
39863- 0 0 0 0 0 0 0 0 0 0 0 0
39864- 0 0 0 0 0 0 0 0 0 0 0 0
39865- 0 0 0 0 0 0 0 0 0 10 10 10
39866- 38 38 38 86 86 86 14 14 14 10 10 10
39867-195 195 195 188 164 115 192 133 9 225 175 15
39868-239 182 13 234 190 10 232 195 16 232 200 30
39869-245 207 45 241 208 19 232 195 16 184 144 12
39870-218 194 134 211 206 186 42 42 42 2 2 6
39871- 2 2 6 2 2 6 2 2 6 2 2 6
39872- 50 50 50 74 74 74 30 30 30 6 6 6
39873- 0 0 0 0 0 0 0 0 0 0 0 0
39874- 0 0 0 0 0 0 0 0 0 0 0 0
39875- 0 0 0 0 0 0 0 0 0 0 0 0
39876- 0 0 0 0 0 0 0 0 0 0 0 0
39877- 0 0 0 0 0 0 0 0 0 0 0 0
39878- 0 0 0 0 0 0 0 0 0 0 0 0
39879- 0 0 0 0 0 0 0 0 0 0 0 0
39880- 0 0 0 0 0 0 0 0 0 0 0 0
39881- 0 0 0 0 0 0 0 0 0 0 0 0
39882- 0 0 0 0 0 0 0 0 0 0 0 0
39883- 0 0 0 0 0 0 0 0 0 0 0 0
39884- 0 0 0 0 0 0 0 0 0 0 0 0
39885- 0 0 0 0 0 0 0 0 0 10 10 10
39886- 34 34 34 86 86 86 14 14 14 2 2 6
39887-121 87 25 192 133 9 219 162 10 239 182 13
39888-236 186 11 232 195 16 241 208 19 244 214 54
39889-246 218 60 246 218 38 246 215 20 241 208 19
39890-241 208 19 226 184 13 121 87 25 2 2 6
39891- 2 2 6 2 2 6 2 2 6 2 2 6
39892- 50 50 50 82 82 82 34 34 34 10 10 10
39893- 0 0 0 0 0 0 0 0 0 0 0 0
39894- 0 0 0 0 0 0 0 0 0 0 0 0
39895- 0 0 0 0 0 0 0 0 0 0 0 0
39896- 0 0 0 0 0 0 0 0 0 0 0 0
39897- 0 0 0 0 0 0 0 0 0 0 0 0
39898- 0 0 0 0 0 0 0 0 0 0 0 0
39899- 0 0 0 0 0 0 0 0 0 0 0 0
39900- 0 0 0 0 0 0 0 0 0 0 0 0
39901- 0 0 0 0 0 0 0 0 0 0 0 0
39902- 0 0 0 0 0 0 0 0 0 0 0 0
39903- 0 0 0 0 0 0 0 0 0 0 0 0
39904- 0 0 0 0 0 0 0 0 0 0 0 0
39905- 0 0 0 0 0 0 0 0 0 10 10 10
39906- 34 34 34 82 82 82 30 30 30 61 42 6
39907-180 123 7 206 145 10 230 174 11 239 182 13
39908-234 190 10 238 202 15 241 208 19 246 218 74
39909-246 218 38 246 215 20 246 215 20 246 215 20
39910-226 184 13 215 174 15 184 144 12 6 6 6
39911- 2 2 6 2 2 6 2 2 6 2 2 6
39912- 26 26 26 94 94 94 42 42 42 14 14 14
39913- 0 0 0 0 0 0 0 0 0 0 0 0
39914- 0 0 0 0 0 0 0 0 0 0 0 0
39915- 0 0 0 0 0 0 0 0 0 0 0 0
39916- 0 0 0 0 0 0 0 0 0 0 0 0
39917- 0 0 0 0 0 0 0 0 0 0 0 0
39918- 0 0 0 0 0 0 0 0 0 0 0 0
39919- 0 0 0 0 0 0 0 0 0 0 0 0
39920- 0 0 0 0 0 0 0 0 0 0 0 0
39921- 0 0 0 0 0 0 0 0 0 0 0 0
39922- 0 0 0 0 0 0 0 0 0 0 0 0
39923- 0 0 0 0 0 0 0 0 0 0 0 0
39924- 0 0 0 0 0 0 0 0 0 0 0 0
39925- 0 0 0 0 0 0 0 0 0 10 10 10
39926- 30 30 30 78 78 78 50 50 50 104 69 6
39927-192 133 9 216 158 10 236 178 12 236 186 11
39928-232 195 16 241 208 19 244 214 54 245 215 43
39929-246 215 20 246 215 20 241 208 19 198 155 10
39930-200 144 11 216 158 10 156 118 10 2 2 6
39931- 2 2 6 2 2 6 2 2 6 2 2 6
39932- 6 6 6 90 90 90 54 54 54 18 18 18
39933- 6 6 6 0 0 0 0 0 0 0 0 0
39934- 0 0 0 0 0 0 0 0 0 0 0 0
39935- 0 0 0 0 0 0 0 0 0 0 0 0
39936- 0 0 0 0 0 0 0 0 0 0 0 0
39937- 0 0 0 0 0 0 0 0 0 0 0 0
39938- 0 0 0 0 0 0 0 0 0 0 0 0
39939- 0 0 0 0 0 0 0 0 0 0 0 0
39940- 0 0 0 0 0 0 0 0 0 0 0 0
39941- 0 0 0 0 0 0 0 0 0 0 0 0
39942- 0 0 0 0 0 0 0 0 0 0 0 0
39943- 0 0 0 0 0 0 0 0 0 0 0 0
39944- 0 0 0 0 0 0 0 0 0 0 0 0
39945- 0 0 0 0 0 0 0 0 0 10 10 10
39946- 30 30 30 78 78 78 46 46 46 22 22 22
39947-137 92 6 210 162 10 239 182 13 238 190 10
39948-238 202 15 241 208 19 246 215 20 246 215 20
39949-241 208 19 203 166 17 185 133 11 210 150 10
39950-216 158 10 210 150 10 102 78 10 2 2 6
39951- 6 6 6 54 54 54 14 14 14 2 2 6
39952- 2 2 6 62 62 62 74 74 74 30 30 30
39953- 10 10 10 0 0 0 0 0 0 0 0 0
39954- 0 0 0 0 0 0 0 0 0 0 0 0
39955- 0 0 0 0 0 0 0 0 0 0 0 0
39956- 0 0 0 0 0 0 0 0 0 0 0 0
39957- 0 0 0 0 0 0 0 0 0 0 0 0
39958- 0 0 0 0 0 0 0 0 0 0 0 0
39959- 0 0 0 0 0 0 0 0 0 0 0 0
39960- 0 0 0 0 0 0 0 0 0 0 0 0
39961- 0 0 0 0 0 0 0 0 0 0 0 0
39962- 0 0 0 0 0 0 0 0 0 0 0 0
39963- 0 0 0 0 0 0 0 0 0 0 0 0
39964- 0 0 0 0 0 0 0 0 0 0 0 0
39965- 0 0 0 0 0 0 0 0 0 10 10 10
39966- 34 34 34 78 78 78 50 50 50 6 6 6
39967- 94 70 30 139 102 15 190 146 13 226 184 13
39968-232 200 30 232 195 16 215 174 15 190 146 13
39969-168 122 10 192 133 9 210 150 10 213 154 11
39970-202 150 34 182 157 106 101 98 89 2 2 6
39971- 2 2 6 78 78 78 116 116 116 58 58 58
39972- 2 2 6 22 22 22 90 90 90 46 46 46
39973- 18 18 18 6 6 6 0 0 0 0 0 0
39974- 0 0 0 0 0 0 0 0 0 0 0 0
39975- 0 0 0 0 0 0 0 0 0 0 0 0
39976- 0 0 0 0 0 0 0 0 0 0 0 0
39977- 0 0 0 0 0 0 0 0 0 0 0 0
39978- 0 0 0 0 0 0 0 0 0 0 0 0
39979- 0 0 0 0 0 0 0 0 0 0 0 0
39980- 0 0 0 0 0 0 0 0 0 0 0 0
39981- 0 0 0 0 0 0 0 0 0 0 0 0
39982- 0 0 0 0 0 0 0 0 0 0 0 0
39983- 0 0 0 0 0 0 0 0 0 0 0 0
39984- 0 0 0 0 0 0 0 0 0 0 0 0
39985- 0 0 0 0 0 0 0 0 0 10 10 10
39986- 38 38 38 86 86 86 50 50 50 6 6 6
39987-128 128 128 174 154 114 156 107 11 168 122 10
39988-198 155 10 184 144 12 197 138 11 200 144 11
39989-206 145 10 206 145 10 197 138 11 188 164 115
39990-195 195 195 198 198 198 174 174 174 14 14 14
39991- 2 2 6 22 22 22 116 116 116 116 116 116
39992- 22 22 22 2 2 6 74 74 74 70 70 70
39993- 30 30 30 10 10 10 0 0 0 0 0 0
39994- 0 0 0 0 0 0 0 0 0 0 0 0
39995- 0 0 0 0 0 0 0 0 0 0 0 0
39996- 0 0 0 0 0 0 0 0 0 0 0 0
39997- 0 0 0 0 0 0 0 0 0 0 0 0
39998- 0 0 0 0 0 0 0 0 0 0 0 0
39999- 0 0 0 0 0 0 0 0 0 0 0 0
40000- 0 0 0 0 0 0 0 0 0 0 0 0
40001- 0 0 0 0 0 0 0 0 0 0 0 0
40002- 0 0 0 0 0 0 0 0 0 0 0 0
40003- 0 0 0 0 0 0 0 0 0 0 0 0
40004- 0 0 0 0 0 0 0 0 0 0 0 0
40005- 0 0 0 0 0 0 6 6 6 18 18 18
40006- 50 50 50 101 101 101 26 26 26 10 10 10
40007-138 138 138 190 190 190 174 154 114 156 107 11
40008-197 138 11 200 144 11 197 138 11 192 133 9
40009-180 123 7 190 142 34 190 178 144 187 187 187
40010-202 202 202 221 221 221 214 214 214 66 66 66
40011- 2 2 6 2 2 6 50 50 50 62 62 62
40012- 6 6 6 2 2 6 10 10 10 90 90 90
40013- 50 50 50 18 18 18 6 6 6 0 0 0
40014- 0 0 0 0 0 0 0 0 0 0 0 0
40015- 0 0 0 0 0 0 0 0 0 0 0 0
40016- 0 0 0 0 0 0 0 0 0 0 0 0
40017- 0 0 0 0 0 0 0 0 0 0 0 0
40018- 0 0 0 0 0 0 0 0 0 0 0 0
40019- 0 0 0 0 0 0 0 0 0 0 0 0
40020- 0 0 0 0 0 0 0 0 0 0 0 0
40021- 0 0 0 0 0 0 0 0 0 0 0 0
40022- 0 0 0 0 0 0 0 0 0 0 0 0
40023- 0 0 0 0 0 0 0 0 0 0 0 0
40024- 0 0 0 0 0 0 0 0 0 0 0 0
40025- 0 0 0 0 0 0 10 10 10 34 34 34
40026- 74 74 74 74 74 74 2 2 6 6 6 6
40027-144 144 144 198 198 198 190 190 190 178 166 146
40028-154 121 60 156 107 11 156 107 11 168 124 44
40029-174 154 114 187 187 187 190 190 190 210 210 210
40030-246 246 246 253 253 253 253 253 253 182 182 182
40031- 6 6 6 2 2 6 2 2 6 2 2 6
40032- 2 2 6 2 2 6 2 2 6 62 62 62
40033- 74 74 74 34 34 34 14 14 14 0 0 0
40034- 0 0 0 0 0 0 0 0 0 0 0 0
40035- 0 0 0 0 0 0 0 0 0 0 0 0
40036- 0 0 0 0 0 0 0 0 0 0 0 0
40037- 0 0 0 0 0 0 0 0 0 0 0 0
40038- 0 0 0 0 0 0 0 0 0 0 0 0
40039- 0 0 0 0 0 0 0 0 0 0 0 0
40040- 0 0 0 0 0 0 0 0 0 0 0 0
40041- 0 0 0 0 0 0 0 0 0 0 0 0
40042- 0 0 0 0 0 0 0 0 0 0 0 0
40043- 0 0 0 0 0 0 0 0 0 0 0 0
40044- 0 0 0 0 0 0 0 0 0 0 0 0
40045- 0 0 0 10 10 10 22 22 22 54 54 54
40046- 94 94 94 18 18 18 2 2 6 46 46 46
40047-234 234 234 221 221 221 190 190 190 190 190 190
40048-190 190 190 187 187 187 187 187 187 190 190 190
40049-190 190 190 195 195 195 214 214 214 242 242 242
40050-253 253 253 253 253 253 253 253 253 253 253 253
40051- 82 82 82 2 2 6 2 2 6 2 2 6
40052- 2 2 6 2 2 6 2 2 6 14 14 14
40053- 86 86 86 54 54 54 22 22 22 6 6 6
40054- 0 0 0 0 0 0 0 0 0 0 0 0
40055- 0 0 0 0 0 0 0 0 0 0 0 0
40056- 0 0 0 0 0 0 0 0 0 0 0 0
40057- 0 0 0 0 0 0 0 0 0 0 0 0
40058- 0 0 0 0 0 0 0 0 0 0 0 0
40059- 0 0 0 0 0 0 0 0 0 0 0 0
40060- 0 0 0 0 0 0 0 0 0 0 0 0
40061- 0 0 0 0 0 0 0 0 0 0 0 0
40062- 0 0 0 0 0 0 0 0 0 0 0 0
40063- 0 0 0 0 0 0 0 0 0 0 0 0
40064- 0 0 0 0 0 0 0 0 0 0 0 0
40065- 6 6 6 18 18 18 46 46 46 90 90 90
40066- 46 46 46 18 18 18 6 6 6 182 182 182
40067-253 253 253 246 246 246 206 206 206 190 190 190
40068-190 190 190 190 190 190 190 190 190 190 190 190
40069-206 206 206 231 231 231 250 250 250 253 253 253
40070-253 253 253 253 253 253 253 253 253 253 253 253
40071-202 202 202 14 14 14 2 2 6 2 2 6
40072- 2 2 6 2 2 6 2 2 6 2 2 6
40073- 42 42 42 86 86 86 42 42 42 18 18 18
40074- 6 6 6 0 0 0 0 0 0 0 0 0
40075- 0 0 0 0 0 0 0 0 0 0 0 0
40076- 0 0 0 0 0 0 0 0 0 0 0 0
40077- 0 0 0 0 0 0 0 0 0 0 0 0
40078- 0 0 0 0 0 0 0 0 0 0 0 0
40079- 0 0 0 0 0 0 0 0 0 0 0 0
40080- 0 0 0 0 0 0 0 0 0 0 0 0
40081- 0 0 0 0 0 0 0 0 0 0 0 0
40082- 0 0 0 0 0 0 0 0 0 0 0 0
40083- 0 0 0 0 0 0 0 0 0 0 0 0
40084- 0 0 0 0 0 0 0 0 0 6 6 6
40085- 14 14 14 38 38 38 74 74 74 66 66 66
40086- 2 2 6 6 6 6 90 90 90 250 250 250
40087-253 253 253 253 253 253 238 238 238 198 198 198
40088-190 190 190 190 190 190 195 195 195 221 221 221
40089-246 246 246 253 253 253 253 253 253 253 253 253
40090-253 253 253 253 253 253 253 253 253 253 253 253
40091-253 253 253 82 82 82 2 2 6 2 2 6
40092- 2 2 6 2 2 6 2 2 6 2 2 6
40093- 2 2 6 78 78 78 70 70 70 34 34 34
40094- 14 14 14 6 6 6 0 0 0 0 0 0
40095- 0 0 0 0 0 0 0 0 0 0 0 0
40096- 0 0 0 0 0 0 0 0 0 0 0 0
40097- 0 0 0 0 0 0 0 0 0 0 0 0
40098- 0 0 0 0 0 0 0 0 0 0 0 0
40099- 0 0 0 0 0 0 0 0 0 0 0 0
40100- 0 0 0 0 0 0 0 0 0 0 0 0
40101- 0 0 0 0 0 0 0 0 0 0 0 0
40102- 0 0 0 0 0 0 0 0 0 0 0 0
40103- 0 0 0 0 0 0 0 0 0 0 0 0
40104- 0 0 0 0 0 0 0 0 0 14 14 14
40105- 34 34 34 66 66 66 78 78 78 6 6 6
40106- 2 2 6 18 18 18 218 218 218 253 253 253
40107-253 253 253 253 253 253 253 253 253 246 246 246
40108-226 226 226 231 231 231 246 246 246 253 253 253
40109-253 253 253 253 253 253 253 253 253 253 253 253
40110-253 253 253 253 253 253 253 253 253 253 253 253
40111-253 253 253 178 178 178 2 2 6 2 2 6
40112- 2 2 6 2 2 6 2 2 6 2 2 6
40113- 2 2 6 18 18 18 90 90 90 62 62 62
40114- 30 30 30 10 10 10 0 0 0 0 0 0
40115- 0 0 0 0 0 0 0 0 0 0 0 0
40116- 0 0 0 0 0 0 0 0 0 0 0 0
40117- 0 0 0 0 0 0 0 0 0 0 0 0
40118- 0 0 0 0 0 0 0 0 0 0 0 0
40119- 0 0 0 0 0 0 0 0 0 0 0 0
40120- 0 0 0 0 0 0 0 0 0 0 0 0
40121- 0 0 0 0 0 0 0 0 0 0 0 0
40122- 0 0 0 0 0 0 0 0 0 0 0 0
40123- 0 0 0 0 0 0 0 0 0 0 0 0
40124- 0 0 0 0 0 0 10 10 10 26 26 26
40125- 58 58 58 90 90 90 18 18 18 2 2 6
40126- 2 2 6 110 110 110 253 253 253 253 253 253
40127-253 253 253 253 253 253 253 253 253 253 253 253
40128-250 250 250 253 253 253 253 253 253 253 253 253
40129-253 253 253 253 253 253 253 253 253 253 253 253
40130-253 253 253 253 253 253 253 253 253 253 253 253
40131-253 253 253 231 231 231 18 18 18 2 2 6
40132- 2 2 6 2 2 6 2 2 6 2 2 6
40133- 2 2 6 2 2 6 18 18 18 94 94 94
40134- 54 54 54 26 26 26 10 10 10 0 0 0
40135- 0 0 0 0 0 0 0 0 0 0 0 0
40136- 0 0 0 0 0 0 0 0 0 0 0 0
40137- 0 0 0 0 0 0 0 0 0 0 0 0
40138- 0 0 0 0 0 0 0 0 0 0 0 0
40139- 0 0 0 0 0 0 0 0 0 0 0 0
40140- 0 0 0 0 0 0 0 0 0 0 0 0
40141- 0 0 0 0 0 0 0 0 0 0 0 0
40142- 0 0 0 0 0 0 0 0 0 0 0 0
40143- 0 0 0 0 0 0 0 0 0 0 0 0
40144- 0 0 0 6 6 6 22 22 22 50 50 50
40145- 90 90 90 26 26 26 2 2 6 2 2 6
40146- 14 14 14 195 195 195 250 250 250 253 253 253
40147-253 253 253 253 253 253 253 253 253 253 253 253
40148-253 253 253 253 253 253 253 253 253 253 253 253
40149-253 253 253 253 253 253 253 253 253 253 253 253
40150-253 253 253 253 253 253 253 253 253 253 253 253
40151-250 250 250 242 242 242 54 54 54 2 2 6
40152- 2 2 6 2 2 6 2 2 6 2 2 6
40153- 2 2 6 2 2 6 2 2 6 38 38 38
40154- 86 86 86 50 50 50 22 22 22 6 6 6
40155- 0 0 0 0 0 0 0 0 0 0 0 0
40156- 0 0 0 0 0 0 0 0 0 0 0 0
40157- 0 0 0 0 0 0 0 0 0 0 0 0
40158- 0 0 0 0 0 0 0 0 0 0 0 0
40159- 0 0 0 0 0 0 0 0 0 0 0 0
40160- 0 0 0 0 0 0 0 0 0 0 0 0
40161- 0 0 0 0 0 0 0 0 0 0 0 0
40162- 0 0 0 0 0 0 0 0 0 0 0 0
40163- 0 0 0 0 0 0 0 0 0 0 0 0
40164- 6 6 6 14 14 14 38 38 38 82 82 82
40165- 34 34 34 2 2 6 2 2 6 2 2 6
40166- 42 42 42 195 195 195 246 246 246 253 253 253
40167-253 253 253 253 253 253 253 253 253 250 250 250
40168-242 242 242 242 242 242 250 250 250 253 253 253
40169-253 253 253 253 253 253 253 253 253 253 253 253
40170-253 253 253 250 250 250 246 246 246 238 238 238
40171-226 226 226 231 231 231 101 101 101 6 6 6
40172- 2 2 6 2 2 6 2 2 6 2 2 6
40173- 2 2 6 2 2 6 2 2 6 2 2 6
40174- 38 38 38 82 82 82 42 42 42 14 14 14
40175- 6 6 6 0 0 0 0 0 0 0 0 0
40176- 0 0 0 0 0 0 0 0 0 0 0 0
40177- 0 0 0 0 0 0 0 0 0 0 0 0
40178- 0 0 0 0 0 0 0 0 0 0 0 0
40179- 0 0 0 0 0 0 0 0 0 0 0 0
40180- 0 0 0 0 0 0 0 0 0 0 0 0
40181- 0 0 0 0 0 0 0 0 0 0 0 0
40182- 0 0 0 0 0 0 0 0 0 0 0 0
40183- 0 0 0 0 0 0 0 0 0 0 0 0
40184- 10 10 10 26 26 26 62 62 62 66 66 66
40185- 2 2 6 2 2 6 2 2 6 6 6 6
40186- 70 70 70 170 170 170 206 206 206 234 234 234
40187-246 246 246 250 250 250 250 250 250 238 238 238
40188-226 226 226 231 231 231 238 238 238 250 250 250
40189-250 250 250 250 250 250 246 246 246 231 231 231
40190-214 214 214 206 206 206 202 202 202 202 202 202
40191-198 198 198 202 202 202 182 182 182 18 18 18
40192- 2 2 6 2 2 6 2 2 6 2 2 6
40193- 2 2 6 2 2 6 2 2 6 2 2 6
40194- 2 2 6 62 62 62 66 66 66 30 30 30
40195- 10 10 10 0 0 0 0 0 0 0 0 0
40196- 0 0 0 0 0 0 0 0 0 0 0 0
40197- 0 0 0 0 0 0 0 0 0 0 0 0
40198- 0 0 0 0 0 0 0 0 0 0 0 0
40199- 0 0 0 0 0 0 0 0 0 0 0 0
40200- 0 0 0 0 0 0 0 0 0 0 0 0
40201- 0 0 0 0 0 0 0 0 0 0 0 0
40202- 0 0 0 0 0 0 0 0 0 0 0 0
40203- 0 0 0 0 0 0 0 0 0 0 0 0
40204- 14 14 14 42 42 42 82 82 82 18 18 18
40205- 2 2 6 2 2 6 2 2 6 10 10 10
40206- 94 94 94 182 182 182 218 218 218 242 242 242
40207-250 250 250 253 253 253 253 253 253 250 250 250
40208-234 234 234 253 253 253 253 253 253 253 253 253
40209-253 253 253 253 253 253 253 253 253 246 246 246
40210-238 238 238 226 226 226 210 210 210 202 202 202
40211-195 195 195 195 195 195 210 210 210 158 158 158
40212- 6 6 6 14 14 14 50 50 50 14 14 14
40213- 2 2 6 2 2 6 2 2 6 2 2 6
40214- 2 2 6 6 6 6 86 86 86 46 46 46
40215- 18 18 18 6 6 6 0 0 0 0 0 0
40216- 0 0 0 0 0 0 0 0 0 0 0 0
40217- 0 0 0 0 0 0 0 0 0 0 0 0
40218- 0 0 0 0 0 0 0 0 0 0 0 0
40219- 0 0 0 0 0 0 0 0 0 0 0 0
40220- 0 0 0 0 0 0 0 0 0 0 0 0
40221- 0 0 0 0 0 0 0 0 0 0 0 0
40222- 0 0 0 0 0 0 0 0 0 0 0 0
40223- 0 0 0 0 0 0 0 0 0 6 6 6
40224- 22 22 22 54 54 54 70 70 70 2 2 6
40225- 2 2 6 10 10 10 2 2 6 22 22 22
40226-166 166 166 231 231 231 250 250 250 253 253 253
40227-253 253 253 253 253 253 253 253 253 250 250 250
40228-242 242 242 253 253 253 253 253 253 253 253 253
40229-253 253 253 253 253 253 253 253 253 253 253 253
40230-253 253 253 253 253 253 253 253 253 246 246 246
40231-231 231 231 206 206 206 198 198 198 226 226 226
40232- 94 94 94 2 2 6 6 6 6 38 38 38
40233- 30 30 30 2 2 6 2 2 6 2 2 6
40234- 2 2 6 2 2 6 62 62 62 66 66 66
40235- 26 26 26 10 10 10 0 0 0 0 0 0
40236- 0 0 0 0 0 0 0 0 0 0 0 0
40237- 0 0 0 0 0 0 0 0 0 0 0 0
40238- 0 0 0 0 0 0 0 0 0 0 0 0
40239- 0 0 0 0 0 0 0 0 0 0 0 0
40240- 0 0 0 0 0 0 0 0 0 0 0 0
40241- 0 0 0 0 0 0 0 0 0 0 0 0
40242- 0 0 0 0 0 0 0 0 0 0 0 0
40243- 0 0 0 0 0 0 0 0 0 10 10 10
40244- 30 30 30 74 74 74 50 50 50 2 2 6
40245- 26 26 26 26 26 26 2 2 6 106 106 106
40246-238 238 238 253 253 253 253 253 253 253 253 253
40247-253 253 253 253 253 253 253 253 253 253 253 253
40248-253 253 253 253 253 253 253 253 253 253 253 253
40249-253 253 253 253 253 253 253 253 253 253 253 253
40250-253 253 253 253 253 253 253 253 253 253 253 253
40251-253 253 253 246 246 246 218 218 218 202 202 202
40252-210 210 210 14 14 14 2 2 6 2 2 6
40253- 30 30 30 22 22 22 2 2 6 2 2 6
40254- 2 2 6 2 2 6 18 18 18 86 86 86
40255- 42 42 42 14 14 14 0 0 0 0 0 0
40256- 0 0 0 0 0 0 0 0 0 0 0 0
40257- 0 0 0 0 0 0 0 0 0 0 0 0
40258- 0 0 0 0 0 0 0 0 0 0 0 0
40259- 0 0 0 0 0 0 0 0 0 0 0 0
40260- 0 0 0 0 0 0 0 0 0 0 0 0
40261- 0 0 0 0 0 0 0 0 0 0 0 0
40262- 0 0 0 0 0 0 0 0 0 0 0 0
40263- 0 0 0 0 0 0 0 0 0 14 14 14
40264- 42 42 42 90 90 90 22 22 22 2 2 6
40265- 42 42 42 2 2 6 18 18 18 218 218 218
40266-253 253 253 253 253 253 253 253 253 253 253 253
40267-253 253 253 253 253 253 253 253 253 253 253 253
40268-253 253 253 253 253 253 253 253 253 253 253 253
40269-253 253 253 253 253 253 253 253 253 253 253 253
40270-253 253 253 253 253 253 253 253 253 253 253 253
40271-253 253 253 253 253 253 250 250 250 221 221 221
40272-218 218 218 101 101 101 2 2 6 14 14 14
40273- 18 18 18 38 38 38 10 10 10 2 2 6
40274- 2 2 6 2 2 6 2 2 6 78 78 78
40275- 58 58 58 22 22 22 6 6 6 0 0 0
40276- 0 0 0 0 0 0 0 0 0 0 0 0
40277- 0 0 0 0 0 0 0 0 0 0 0 0
40278- 0 0 0 0 0 0 0 0 0 0 0 0
40279- 0 0 0 0 0 0 0 0 0 0 0 0
40280- 0 0 0 0 0 0 0 0 0 0 0 0
40281- 0 0 0 0 0 0 0 0 0 0 0 0
40282- 0 0 0 0 0 0 0 0 0 0 0 0
40283- 0 0 0 0 0 0 6 6 6 18 18 18
40284- 54 54 54 82 82 82 2 2 6 26 26 26
40285- 22 22 22 2 2 6 123 123 123 253 253 253
40286-253 253 253 253 253 253 253 253 253 253 253 253
40287-253 253 253 253 253 253 253 253 253 253 253 253
40288-253 253 253 253 253 253 253 253 253 253 253 253
40289-253 253 253 253 253 253 253 253 253 253 253 253
40290-253 253 253 253 253 253 253 253 253 253 253 253
40291-253 253 253 253 253 253 253 253 253 250 250 250
40292-238 238 238 198 198 198 6 6 6 38 38 38
40293- 58 58 58 26 26 26 38 38 38 2 2 6
40294- 2 2 6 2 2 6 2 2 6 46 46 46
40295- 78 78 78 30 30 30 10 10 10 0 0 0
40296- 0 0 0 0 0 0 0 0 0 0 0 0
40297- 0 0 0 0 0 0 0 0 0 0 0 0
40298- 0 0 0 0 0 0 0 0 0 0 0 0
40299- 0 0 0 0 0 0 0 0 0 0 0 0
40300- 0 0 0 0 0 0 0 0 0 0 0 0
40301- 0 0 0 0 0 0 0 0 0 0 0 0
40302- 0 0 0 0 0 0 0 0 0 0 0 0
40303- 0 0 0 0 0 0 10 10 10 30 30 30
40304- 74 74 74 58 58 58 2 2 6 42 42 42
40305- 2 2 6 22 22 22 231 231 231 253 253 253
40306-253 253 253 253 253 253 253 253 253 253 253 253
40307-253 253 253 253 253 253 253 253 253 250 250 250
40308-253 253 253 253 253 253 253 253 253 253 253 253
40309-253 253 253 253 253 253 253 253 253 253 253 253
40310-253 253 253 253 253 253 253 253 253 253 253 253
40311-253 253 253 253 253 253 253 253 253 253 253 253
40312-253 253 253 246 246 246 46 46 46 38 38 38
40313- 42 42 42 14 14 14 38 38 38 14 14 14
40314- 2 2 6 2 2 6 2 2 6 6 6 6
40315- 86 86 86 46 46 46 14 14 14 0 0 0
40316- 0 0 0 0 0 0 0 0 0 0 0 0
40317- 0 0 0 0 0 0 0 0 0 0 0 0
40318- 0 0 0 0 0 0 0 0 0 0 0 0
40319- 0 0 0 0 0 0 0 0 0 0 0 0
40320- 0 0 0 0 0 0 0 0 0 0 0 0
40321- 0 0 0 0 0 0 0 0 0 0 0 0
40322- 0 0 0 0 0 0 0 0 0 0 0 0
40323- 0 0 0 6 6 6 14 14 14 42 42 42
40324- 90 90 90 18 18 18 18 18 18 26 26 26
40325- 2 2 6 116 116 116 253 253 253 253 253 253
40326-253 253 253 253 253 253 253 253 253 253 253 253
40327-253 253 253 253 253 253 250 250 250 238 238 238
40328-253 253 253 253 253 253 253 253 253 253 253 253
40329-253 253 253 253 253 253 253 253 253 253 253 253
40330-253 253 253 253 253 253 253 253 253 253 253 253
40331-253 253 253 253 253 253 253 253 253 253 253 253
40332-253 253 253 253 253 253 94 94 94 6 6 6
40333- 2 2 6 2 2 6 10 10 10 34 34 34
40334- 2 2 6 2 2 6 2 2 6 2 2 6
40335- 74 74 74 58 58 58 22 22 22 6 6 6
40336- 0 0 0 0 0 0 0 0 0 0 0 0
40337- 0 0 0 0 0 0 0 0 0 0 0 0
40338- 0 0 0 0 0 0 0 0 0 0 0 0
40339- 0 0 0 0 0 0 0 0 0 0 0 0
40340- 0 0 0 0 0 0 0 0 0 0 0 0
40341- 0 0 0 0 0 0 0 0 0 0 0 0
40342- 0 0 0 0 0 0 0 0 0 0 0 0
40343- 0 0 0 10 10 10 26 26 26 66 66 66
40344- 82 82 82 2 2 6 38 38 38 6 6 6
40345- 14 14 14 210 210 210 253 253 253 253 253 253
40346-253 253 253 253 253 253 253 253 253 253 253 253
40347-253 253 253 253 253 253 246 246 246 242 242 242
40348-253 253 253 253 253 253 253 253 253 253 253 253
40349-253 253 253 253 253 253 253 253 253 253 253 253
40350-253 253 253 253 253 253 253 253 253 253 253 253
40351-253 253 253 253 253 253 253 253 253 253 253 253
40352-253 253 253 253 253 253 144 144 144 2 2 6
40353- 2 2 6 2 2 6 2 2 6 46 46 46
40354- 2 2 6 2 2 6 2 2 6 2 2 6
40355- 42 42 42 74 74 74 30 30 30 10 10 10
40356- 0 0 0 0 0 0 0 0 0 0 0 0
40357- 0 0 0 0 0 0 0 0 0 0 0 0
40358- 0 0 0 0 0 0 0 0 0 0 0 0
40359- 0 0 0 0 0 0 0 0 0 0 0 0
40360- 0 0 0 0 0 0 0 0 0 0 0 0
40361- 0 0 0 0 0 0 0 0 0 0 0 0
40362- 0 0 0 0 0 0 0 0 0 0 0 0
40363- 6 6 6 14 14 14 42 42 42 90 90 90
40364- 26 26 26 6 6 6 42 42 42 2 2 6
40365- 74 74 74 250 250 250 253 253 253 253 253 253
40366-253 253 253 253 253 253 253 253 253 253 253 253
40367-253 253 253 253 253 253 242 242 242 242 242 242
40368-253 253 253 253 253 253 253 253 253 253 253 253
40369-253 253 253 253 253 253 253 253 253 253 253 253
40370-253 253 253 253 253 253 253 253 253 253 253 253
40371-253 253 253 253 253 253 253 253 253 253 253 253
40372-253 253 253 253 253 253 182 182 182 2 2 6
40373- 2 2 6 2 2 6 2 2 6 46 46 46
40374- 2 2 6 2 2 6 2 2 6 2 2 6
40375- 10 10 10 86 86 86 38 38 38 10 10 10
40376- 0 0 0 0 0 0 0 0 0 0 0 0
40377- 0 0 0 0 0 0 0 0 0 0 0 0
40378- 0 0 0 0 0 0 0 0 0 0 0 0
40379- 0 0 0 0 0 0 0 0 0 0 0 0
40380- 0 0 0 0 0 0 0 0 0 0 0 0
40381- 0 0 0 0 0 0 0 0 0 0 0 0
40382- 0 0 0 0 0 0 0 0 0 0 0 0
40383- 10 10 10 26 26 26 66 66 66 82 82 82
40384- 2 2 6 22 22 22 18 18 18 2 2 6
40385-149 149 149 253 253 253 253 253 253 253 253 253
40386-253 253 253 253 253 253 253 253 253 253 253 253
40387-253 253 253 253 253 253 234 234 234 242 242 242
40388-253 253 253 253 253 253 253 253 253 253 253 253
40389-253 253 253 253 253 253 253 253 253 253 253 253
40390-253 253 253 253 253 253 253 253 253 253 253 253
40391-253 253 253 253 253 253 253 253 253 253 253 253
40392-253 253 253 253 253 253 206 206 206 2 2 6
40393- 2 2 6 2 2 6 2 2 6 38 38 38
40394- 2 2 6 2 2 6 2 2 6 2 2 6
40395- 6 6 6 86 86 86 46 46 46 14 14 14
40396- 0 0 0 0 0 0 0 0 0 0 0 0
40397- 0 0 0 0 0 0 0 0 0 0 0 0
40398- 0 0 0 0 0 0 0 0 0 0 0 0
40399- 0 0 0 0 0 0 0 0 0 0 0 0
40400- 0 0 0 0 0 0 0 0 0 0 0 0
40401- 0 0 0 0 0 0 0 0 0 0 0 0
40402- 0 0 0 0 0 0 0 0 0 6 6 6
40403- 18 18 18 46 46 46 86 86 86 18 18 18
40404- 2 2 6 34 34 34 10 10 10 6 6 6
40405-210 210 210 253 253 253 253 253 253 253 253 253
40406-253 253 253 253 253 253 253 253 253 253 253 253
40407-253 253 253 253 253 253 234 234 234 242 242 242
40408-253 253 253 253 253 253 253 253 253 253 253 253
40409-253 253 253 253 253 253 253 253 253 253 253 253
40410-253 253 253 253 253 253 253 253 253 253 253 253
40411-253 253 253 253 253 253 253 253 253 253 253 253
40412-253 253 253 253 253 253 221 221 221 6 6 6
40413- 2 2 6 2 2 6 6 6 6 30 30 30
40414- 2 2 6 2 2 6 2 2 6 2 2 6
40415- 2 2 6 82 82 82 54 54 54 18 18 18
40416- 6 6 6 0 0 0 0 0 0 0 0 0
40417- 0 0 0 0 0 0 0 0 0 0 0 0
40418- 0 0 0 0 0 0 0 0 0 0 0 0
40419- 0 0 0 0 0 0 0 0 0 0 0 0
40420- 0 0 0 0 0 0 0 0 0 0 0 0
40421- 0 0 0 0 0 0 0 0 0 0 0 0
40422- 0 0 0 0 0 0 0 0 0 10 10 10
40423- 26 26 26 66 66 66 62 62 62 2 2 6
40424- 2 2 6 38 38 38 10 10 10 26 26 26
40425-238 238 238 253 253 253 253 253 253 253 253 253
40426-253 253 253 253 253 253 253 253 253 253 253 253
40427-253 253 253 253 253 253 231 231 231 238 238 238
40428-253 253 253 253 253 253 253 253 253 253 253 253
40429-253 253 253 253 253 253 253 253 253 253 253 253
40430-253 253 253 253 253 253 253 253 253 253 253 253
40431-253 253 253 253 253 253 253 253 253 253 253 253
40432-253 253 253 253 253 253 231 231 231 6 6 6
40433- 2 2 6 2 2 6 10 10 10 30 30 30
40434- 2 2 6 2 2 6 2 2 6 2 2 6
40435- 2 2 6 66 66 66 58 58 58 22 22 22
40436- 6 6 6 0 0 0 0 0 0 0 0 0
40437- 0 0 0 0 0 0 0 0 0 0 0 0
40438- 0 0 0 0 0 0 0 0 0 0 0 0
40439- 0 0 0 0 0 0 0 0 0 0 0 0
40440- 0 0 0 0 0 0 0 0 0 0 0 0
40441- 0 0 0 0 0 0 0 0 0 0 0 0
40442- 0 0 0 0 0 0 0 0 0 10 10 10
40443- 38 38 38 78 78 78 6 6 6 2 2 6
40444- 2 2 6 46 46 46 14 14 14 42 42 42
40445-246 246 246 253 253 253 253 253 253 253 253 253
40446-253 253 253 253 253 253 253 253 253 253 253 253
40447-253 253 253 253 253 253 231 231 231 242 242 242
40448-253 253 253 253 253 253 253 253 253 253 253 253
40449-253 253 253 253 253 253 253 253 253 253 253 253
40450-253 253 253 253 253 253 253 253 253 253 253 253
40451-253 253 253 253 253 253 253 253 253 253 253 253
40452-253 253 253 253 253 253 234 234 234 10 10 10
40453- 2 2 6 2 2 6 22 22 22 14 14 14
40454- 2 2 6 2 2 6 2 2 6 2 2 6
40455- 2 2 6 66 66 66 62 62 62 22 22 22
40456- 6 6 6 0 0 0 0 0 0 0 0 0
40457- 0 0 0 0 0 0 0 0 0 0 0 0
40458- 0 0 0 0 0 0 0 0 0 0 0 0
40459- 0 0 0 0 0 0 0 0 0 0 0 0
40460- 0 0 0 0 0 0 0 0 0 0 0 0
40461- 0 0 0 0 0 0 0 0 0 0 0 0
40462- 0 0 0 0 0 0 6 6 6 18 18 18
40463- 50 50 50 74 74 74 2 2 6 2 2 6
40464- 14 14 14 70 70 70 34 34 34 62 62 62
40465-250 250 250 253 253 253 253 253 253 253 253 253
40466-253 253 253 253 253 253 253 253 253 253 253 253
40467-253 253 253 253 253 253 231 231 231 246 246 246
40468-253 253 253 253 253 253 253 253 253 253 253 253
40469-253 253 253 253 253 253 253 253 253 253 253 253
40470-253 253 253 253 253 253 253 253 253 253 253 253
40471-253 253 253 253 253 253 253 253 253 253 253 253
40472-253 253 253 253 253 253 234 234 234 14 14 14
40473- 2 2 6 2 2 6 30 30 30 2 2 6
40474- 2 2 6 2 2 6 2 2 6 2 2 6
40475- 2 2 6 66 66 66 62 62 62 22 22 22
40476- 6 6 6 0 0 0 0 0 0 0 0 0
40477- 0 0 0 0 0 0 0 0 0 0 0 0
40478- 0 0 0 0 0 0 0 0 0 0 0 0
40479- 0 0 0 0 0 0 0 0 0 0 0 0
40480- 0 0 0 0 0 0 0 0 0 0 0 0
40481- 0 0 0 0 0 0 0 0 0 0 0 0
40482- 0 0 0 0 0 0 6 6 6 18 18 18
40483- 54 54 54 62 62 62 2 2 6 2 2 6
40484- 2 2 6 30 30 30 46 46 46 70 70 70
40485-250 250 250 253 253 253 253 253 253 253 253 253
40486-253 253 253 253 253 253 253 253 253 253 253 253
40487-253 253 253 253 253 253 231 231 231 246 246 246
40488-253 253 253 253 253 253 253 253 253 253 253 253
40489-253 253 253 253 253 253 253 253 253 253 253 253
40490-253 253 253 253 253 253 253 253 253 253 253 253
40491-253 253 253 253 253 253 253 253 253 253 253 253
40492-253 253 253 253 253 253 226 226 226 10 10 10
40493- 2 2 6 6 6 6 30 30 30 2 2 6
40494- 2 2 6 2 2 6 2 2 6 2 2 6
40495- 2 2 6 66 66 66 58 58 58 22 22 22
40496- 6 6 6 0 0 0 0 0 0 0 0 0
40497- 0 0 0 0 0 0 0 0 0 0 0 0
40498- 0 0 0 0 0 0 0 0 0 0 0 0
40499- 0 0 0 0 0 0 0 0 0 0 0 0
40500- 0 0 0 0 0 0 0 0 0 0 0 0
40501- 0 0 0 0 0 0 0 0 0 0 0 0
40502- 0 0 0 0 0 0 6 6 6 22 22 22
40503- 58 58 58 62 62 62 2 2 6 2 2 6
40504- 2 2 6 2 2 6 30 30 30 78 78 78
40505-250 250 250 253 253 253 253 253 253 253 253 253
40506-253 253 253 253 253 253 253 253 253 253 253 253
40507-253 253 253 253 253 253 231 231 231 246 246 246
40508-253 253 253 253 253 253 253 253 253 253 253 253
40509-253 253 253 253 253 253 253 253 253 253 253 253
40510-253 253 253 253 253 253 253 253 253 253 253 253
40511-253 253 253 253 253 253 253 253 253 253 253 253
40512-253 253 253 253 253 253 206 206 206 2 2 6
40513- 22 22 22 34 34 34 18 14 6 22 22 22
40514- 26 26 26 18 18 18 6 6 6 2 2 6
40515- 2 2 6 82 82 82 54 54 54 18 18 18
40516- 6 6 6 0 0 0 0 0 0 0 0 0
40517- 0 0 0 0 0 0 0 0 0 0 0 0
40518- 0 0 0 0 0 0 0 0 0 0 0 0
40519- 0 0 0 0 0 0 0 0 0 0 0 0
40520- 0 0 0 0 0 0 0 0 0 0 0 0
40521- 0 0 0 0 0 0 0 0 0 0 0 0
40522- 0 0 0 0 0 0 6 6 6 26 26 26
40523- 62 62 62 106 106 106 74 54 14 185 133 11
40524-210 162 10 121 92 8 6 6 6 62 62 62
40525-238 238 238 253 253 253 253 253 253 253 253 253
40526-253 253 253 253 253 253 253 253 253 253 253 253
40527-253 253 253 253 253 253 231 231 231 246 246 246
40528-253 253 253 253 253 253 253 253 253 253 253 253
40529-253 253 253 253 253 253 253 253 253 253 253 253
40530-253 253 253 253 253 253 253 253 253 253 253 253
40531-253 253 253 253 253 253 253 253 253 253 253 253
40532-253 253 253 253 253 253 158 158 158 18 18 18
40533- 14 14 14 2 2 6 2 2 6 2 2 6
40534- 6 6 6 18 18 18 66 66 66 38 38 38
40535- 6 6 6 94 94 94 50 50 50 18 18 18
40536- 6 6 6 0 0 0 0 0 0 0 0 0
40537- 0 0 0 0 0 0 0 0 0 0 0 0
40538- 0 0 0 0 0 0 0 0 0 0 0 0
40539- 0 0 0 0 0 0 0 0 0 0 0 0
40540- 0 0 0 0 0 0 0 0 0 0 0 0
40541- 0 0 0 0 0 0 0 0 0 6 6 6
40542- 10 10 10 10 10 10 18 18 18 38 38 38
40543- 78 78 78 142 134 106 216 158 10 242 186 14
40544-246 190 14 246 190 14 156 118 10 10 10 10
40545- 90 90 90 238 238 238 253 253 253 253 253 253
40546-253 253 253 253 253 253 253 253 253 253 253 253
40547-253 253 253 253 253 253 231 231 231 250 250 250
40548-253 253 253 253 253 253 253 253 253 253 253 253
40549-253 253 253 253 253 253 253 253 253 253 253 253
40550-253 253 253 253 253 253 253 253 253 253 253 253
40551-253 253 253 253 253 253 253 253 253 246 230 190
40552-238 204 91 238 204 91 181 142 44 37 26 9
40553- 2 2 6 2 2 6 2 2 6 2 2 6
40554- 2 2 6 2 2 6 38 38 38 46 46 46
40555- 26 26 26 106 106 106 54 54 54 18 18 18
40556- 6 6 6 0 0 0 0 0 0 0 0 0
40557- 0 0 0 0 0 0 0 0 0 0 0 0
40558- 0 0 0 0 0 0 0 0 0 0 0 0
40559- 0 0 0 0 0 0 0 0 0 0 0 0
40560- 0 0 0 0 0 0 0 0 0 0 0 0
40561- 0 0 0 6 6 6 14 14 14 22 22 22
40562- 30 30 30 38 38 38 50 50 50 70 70 70
40563-106 106 106 190 142 34 226 170 11 242 186 14
40564-246 190 14 246 190 14 246 190 14 154 114 10
40565- 6 6 6 74 74 74 226 226 226 253 253 253
40566-253 253 253 253 253 253 253 253 253 253 253 253
40567-253 253 253 253 253 253 231 231 231 250 250 250
40568-253 253 253 253 253 253 253 253 253 253 253 253
40569-253 253 253 253 253 253 253 253 253 253 253 253
40570-253 253 253 253 253 253 253 253 253 253 253 253
40571-253 253 253 253 253 253 253 253 253 228 184 62
40572-241 196 14 241 208 19 232 195 16 38 30 10
40573- 2 2 6 2 2 6 2 2 6 2 2 6
40574- 2 2 6 6 6 6 30 30 30 26 26 26
40575-203 166 17 154 142 90 66 66 66 26 26 26
40576- 6 6 6 0 0 0 0 0 0 0 0 0
40577- 0 0 0 0 0 0 0 0 0 0 0 0
40578- 0 0 0 0 0 0 0 0 0 0 0 0
40579- 0 0 0 0 0 0 0 0 0 0 0 0
40580- 0 0 0 0 0 0 0 0 0 0 0 0
40581- 6 6 6 18 18 18 38 38 38 58 58 58
40582- 78 78 78 86 86 86 101 101 101 123 123 123
40583-175 146 61 210 150 10 234 174 13 246 186 14
40584-246 190 14 246 190 14 246 190 14 238 190 10
40585-102 78 10 2 2 6 46 46 46 198 198 198
40586-253 253 253 253 253 253 253 253 253 253 253 253
40587-253 253 253 253 253 253 234 234 234 242 242 242
40588-253 253 253 253 253 253 253 253 253 253 253 253
40589-253 253 253 253 253 253 253 253 253 253 253 253
40590-253 253 253 253 253 253 253 253 253 253 253 253
40591-253 253 253 253 253 253 253 253 253 224 178 62
40592-242 186 14 241 196 14 210 166 10 22 18 6
40593- 2 2 6 2 2 6 2 2 6 2 2 6
40594- 2 2 6 2 2 6 6 6 6 121 92 8
40595-238 202 15 232 195 16 82 82 82 34 34 34
40596- 10 10 10 0 0 0 0 0 0 0 0 0
40597- 0 0 0 0 0 0 0 0 0 0 0 0
40598- 0 0 0 0 0 0 0 0 0 0 0 0
40599- 0 0 0 0 0 0 0 0 0 0 0 0
40600- 0 0 0 0 0 0 0 0 0 0 0 0
40601- 14 14 14 38 38 38 70 70 70 154 122 46
40602-190 142 34 200 144 11 197 138 11 197 138 11
40603-213 154 11 226 170 11 242 186 14 246 190 14
40604-246 190 14 246 190 14 246 190 14 246 190 14
40605-225 175 15 46 32 6 2 2 6 22 22 22
40606-158 158 158 250 250 250 253 253 253 253 253 253
40607-253 253 253 253 253 253 253 253 253 253 253 253
40608-253 253 253 253 253 253 253 253 253 253 253 253
40609-253 253 253 253 253 253 253 253 253 253 253 253
40610-253 253 253 253 253 253 253 253 253 253 253 253
40611-253 253 253 250 250 250 242 242 242 224 178 62
40612-239 182 13 236 186 11 213 154 11 46 32 6
40613- 2 2 6 2 2 6 2 2 6 2 2 6
40614- 2 2 6 2 2 6 61 42 6 225 175 15
40615-238 190 10 236 186 11 112 100 78 42 42 42
40616- 14 14 14 0 0 0 0 0 0 0 0 0
40617- 0 0 0 0 0 0 0 0 0 0 0 0
40618- 0 0 0 0 0 0 0 0 0 0 0 0
40619- 0 0 0 0 0 0 0 0 0 0 0 0
40620- 0 0 0 0 0 0 0 0 0 6 6 6
40621- 22 22 22 54 54 54 154 122 46 213 154 11
40622-226 170 11 230 174 11 226 170 11 226 170 11
40623-236 178 12 242 186 14 246 190 14 246 190 14
40624-246 190 14 246 190 14 246 190 14 246 190 14
40625-241 196 14 184 144 12 10 10 10 2 2 6
40626- 6 6 6 116 116 116 242 242 242 253 253 253
40627-253 253 253 253 253 253 253 253 253 253 253 253
40628-253 253 253 253 253 253 253 253 253 253 253 253
40629-253 253 253 253 253 253 253 253 253 253 253 253
40630-253 253 253 253 253 253 253 253 253 253 253 253
40631-253 253 253 231 231 231 198 198 198 214 170 54
40632-236 178 12 236 178 12 210 150 10 137 92 6
40633- 18 14 6 2 2 6 2 2 6 2 2 6
40634- 6 6 6 70 47 6 200 144 11 236 178 12
40635-239 182 13 239 182 13 124 112 88 58 58 58
40636- 22 22 22 6 6 6 0 0 0 0 0 0
40637- 0 0 0 0 0 0 0 0 0 0 0 0
40638- 0 0 0 0 0 0 0 0 0 0 0 0
40639- 0 0 0 0 0 0 0 0 0 0 0 0
40640- 0 0 0 0 0 0 0 0 0 10 10 10
40641- 30 30 30 70 70 70 180 133 36 226 170 11
40642-239 182 13 242 186 14 242 186 14 246 186 14
40643-246 190 14 246 190 14 246 190 14 246 190 14
40644-246 190 14 246 190 14 246 190 14 246 190 14
40645-246 190 14 232 195 16 98 70 6 2 2 6
40646- 2 2 6 2 2 6 66 66 66 221 221 221
40647-253 253 253 253 253 253 253 253 253 253 253 253
40648-253 253 253 253 253 253 253 253 253 253 253 253
40649-253 253 253 253 253 253 253 253 253 253 253 253
40650-253 253 253 253 253 253 253 253 253 253 253 253
40651-253 253 253 206 206 206 198 198 198 214 166 58
40652-230 174 11 230 174 11 216 158 10 192 133 9
40653-163 110 8 116 81 8 102 78 10 116 81 8
40654-167 114 7 197 138 11 226 170 11 239 182 13
40655-242 186 14 242 186 14 162 146 94 78 78 78
40656- 34 34 34 14 14 14 6 6 6 0 0 0
40657- 0 0 0 0 0 0 0 0 0 0 0 0
40658- 0 0 0 0 0 0 0 0 0 0 0 0
40659- 0 0 0 0 0 0 0 0 0 0 0 0
40660- 0 0 0 0 0 0 0 0 0 6 6 6
40661- 30 30 30 78 78 78 190 142 34 226 170 11
40662-239 182 13 246 190 14 246 190 14 246 190 14
40663-246 190 14 246 190 14 246 190 14 246 190 14
40664-246 190 14 246 190 14 246 190 14 246 190 14
40665-246 190 14 241 196 14 203 166 17 22 18 6
40666- 2 2 6 2 2 6 2 2 6 38 38 38
40667-218 218 218 253 253 253 253 253 253 253 253 253
40668-253 253 253 253 253 253 253 253 253 253 253 253
40669-253 253 253 253 253 253 253 253 253 253 253 253
40670-253 253 253 253 253 253 253 253 253 253 253 253
40671-250 250 250 206 206 206 198 198 198 202 162 69
40672-226 170 11 236 178 12 224 166 10 210 150 10
40673-200 144 11 197 138 11 192 133 9 197 138 11
40674-210 150 10 226 170 11 242 186 14 246 190 14
40675-246 190 14 246 186 14 225 175 15 124 112 88
40676- 62 62 62 30 30 30 14 14 14 6 6 6
40677- 0 0 0 0 0 0 0 0 0 0 0 0
40678- 0 0 0 0 0 0 0 0 0 0 0 0
40679- 0 0 0 0 0 0 0 0 0 0 0 0
40680- 0 0 0 0 0 0 0 0 0 10 10 10
40681- 30 30 30 78 78 78 174 135 50 224 166 10
40682-239 182 13 246 190 14 246 190 14 246 190 14
40683-246 190 14 246 190 14 246 190 14 246 190 14
40684-246 190 14 246 190 14 246 190 14 246 190 14
40685-246 190 14 246 190 14 241 196 14 139 102 15
40686- 2 2 6 2 2 6 2 2 6 2 2 6
40687- 78 78 78 250 250 250 253 253 253 253 253 253
40688-253 253 253 253 253 253 253 253 253 253 253 253
40689-253 253 253 253 253 253 253 253 253 253 253 253
40690-253 253 253 253 253 253 253 253 253 253 253 253
40691-250 250 250 214 214 214 198 198 198 190 150 46
40692-219 162 10 236 178 12 234 174 13 224 166 10
40693-216 158 10 213 154 11 213 154 11 216 158 10
40694-226 170 11 239 182 13 246 190 14 246 190 14
40695-246 190 14 246 190 14 242 186 14 206 162 42
40696-101 101 101 58 58 58 30 30 30 14 14 14
40697- 6 6 6 0 0 0 0 0 0 0 0 0
40698- 0 0 0 0 0 0 0 0 0 0 0 0
40699- 0 0 0 0 0 0 0 0 0 0 0 0
40700- 0 0 0 0 0 0 0 0 0 10 10 10
40701- 30 30 30 74 74 74 174 135 50 216 158 10
40702-236 178 12 246 190 14 246 190 14 246 190 14
40703-246 190 14 246 190 14 246 190 14 246 190 14
40704-246 190 14 246 190 14 246 190 14 246 190 14
40705-246 190 14 246 190 14 241 196 14 226 184 13
40706- 61 42 6 2 2 6 2 2 6 2 2 6
40707- 22 22 22 238 238 238 253 253 253 253 253 253
40708-253 253 253 253 253 253 253 253 253 253 253 253
40709-253 253 253 253 253 253 253 253 253 253 253 253
40710-253 253 253 253 253 253 253 253 253 253 253 253
40711-253 253 253 226 226 226 187 187 187 180 133 36
40712-216 158 10 236 178 12 239 182 13 236 178 12
40713-230 174 11 226 170 11 226 170 11 230 174 11
40714-236 178 12 242 186 14 246 190 14 246 190 14
40715-246 190 14 246 190 14 246 186 14 239 182 13
40716-206 162 42 106 106 106 66 66 66 34 34 34
40717- 14 14 14 6 6 6 0 0 0 0 0 0
40718- 0 0 0 0 0 0 0 0 0 0 0 0
40719- 0 0 0 0 0 0 0 0 0 0 0 0
40720- 0 0 0 0 0 0 0 0 0 6 6 6
40721- 26 26 26 70 70 70 163 133 67 213 154 11
40722-236 178 12 246 190 14 246 190 14 246 190 14
40723-246 190 14 246 190 14 246 190 14 246 190 14
40724-246 190 14 246 190 14 246 190 14 246 190 14
40725-246 190 14 246 190 14 246 190 14 241 196 14
40726-190 146 13 18 14 6 2 2 6 2 2 6
40727- 46 46 46 246 246 246 253 253 253 253 253 253
40728-253 253 253 253 253 253 253 253 253 253 253 253
40729-253 253 253 253 253 253 253 253 253 253 253 253
40730-253 253 253 253 253 253 253 253 253 253 253 253
40731-253 253 253 221 221 221 86 86 86 156 107 11
40732-216 158 10 236 178 12 242 186 14 246 186 14
40733-242 186 14 239 182 13 239 182 13 242 186 14
40734-242 186 14 246 186 14 246 190 14 246 190 14
40735-246 190 14 246 190 14 246 190 14 246 190 14
40736-242 186 14 225 175 15 142 122 72 66 66 66
40737- 30 30 30 10 10 10 0 0 0 0 0 0
40738- 0 0 0 0 0 0 0 0 0 0 0 0
40739- 0 0 0 0 0 0 0 0 0 0 0 0
40740- 0 0 0 0 0 0 0 0 0 6 6 6
40741- 26 26 26 70 70 70 163 133 67 210 150 10
40742-236 178 12 246 190 14 246 190 14 246 190 14
40743-246 190 14 246 190 14 246 190 14 246 190 14
40744-246 190 14 246 190 14 246 190 14 246 190 14
40745-246 190 14 246 190 14 246 190 14 246 190 14
40746-232 195 16 121 92 8 34 34 34 106 106 106
40747-221 221 221 253 253 253 253 253 253 253 253 253
40748-253 253 253 253 253 253 253 253 253 253 253 253
40749-253 253 253 253 253 253 253 253 253 253 253 253
40750-253 253 253 253 253 253 253 253 253 253 253 253
40751-242 242 242 82 82 82 18 14 6 163 110 8
40752-216 158 10 236 178 12 242 186 14 246 190 14
40753-246 190 14 246 190 14 246 190 14 246 190 14
40754-246 190 14 246 190 14 246 190 14 246 190 14
40755-246 190 14 246 190 14 246 190 14 246 190 14
40756-246 190 14 246 190 14 242 186 14 163 133 67
40757- 46 46 46 18 18 18 6 6 6 0 0 0
40758- 0 0 0 0 0 0 0 0 0 0 0 0
40759- 0 0 0 0 0 0 0 0 0 0 0 0
40760- 0 0 0 0 0 0 0 0 0 10 10 10
40761- 30 30 30 78 78 78 163 133 67 210 150 10
40762-236 178 12 246 186 14 246 190 14 246 190 14
40763-246 190 14 246 190 14 246 190 14 246 190 14
40764-246 190 14 246 190 14 246 190 14 246 190 14
40765-246 190 14 246 190 14 246 190 14 246 190 14
40766-241 196 14 215 174 15 190 178 144 253 253 253
40767-253 253 253 253 253 253 253 253 253 253 253 253
40768-253 253 253 253 253 253 253 253 253 253 253 253
40769-253 253 253 253 253 253 253 253 253 253 253 253
40770-253 253 253 253 253 253 253 253 253 218 218 218
40771- 58 58 58 2 2 6 22 18 6 167 114 7
40772-216 158 10 236 178 12 246 186 14 246 190 14
40773-246 190 14 246 190 14 246 190 14 246 190 14
40774-246 190 14 246 190 14 246 190 14 246 190 14
40775-246 190 14 246 190 14 246 190 14 246 190 14
40776-246 190 14 246 186 14 242 186 14 190 150 46
40777- 54 54 54 22 22 22 6 6 6 0 0 0
40778- 0 0 0 0 0 0 0 0 0 0 0 0
40779- 0 0 0 0 0 0 0 0 0 0 0 0
40780- 0 0 0 0 0 0 0 0 0 14 14 14
40781- 38 38 38 86 86 86 180 133 36 213 154 11
40782-236 178 12 246 186 14 246 190 14 246 190 14
40783-246 190 14 246 190 14 246 190 14 246 190 14
40784-246 190 14 246 190 14 246 190 14 246 190 14
40785-246 190 14 246 190 14 246 190 14 246 190 14
40786-246 190 14 232 195 16 190 146 13 214 214 214
40787-253 253 253 253 253 253 253 253 253 253 253 253
40788-253 253 253 253 253 253 253 253 253 253 253 253
40789-253 253 253 253 253 253 253 253 253 253 253 253
40790-253 253 253 250 250 250 170 170 170 26 26 26
40791- 2 2 6 2 2 6 37 26 9 163 110 8
40792-219 162 10 239 182 13 246 186 14 246 190 14
40793-246 190 14 246 190 14 246 190 14 246 190 14
40794-246 190 14 246 190 14 246 190 14 246 190 14
40795-246 190 14 246 190 14 246 190 14 246 190 14
40796-246 186 14 236 178 12 224 166 10 142 122 72
40797- 46 46 46 18 18 18 6 6 6 0 0 0
40798- 0 0 0 0 0 0 0 0 0 0 0 0
40799- 0 0 0 0 0 0 0 0 0 0 0 0
40800- 0 0 0 0 0 0 6 6 6 18 18 18
40801- 50 50 50 109 106 95 192 133 9 224 166 10
40802-242 186 14 246 190 14 246 190 14 246 190 14
40803-246 190 14 246 190 14 246 190 14 246 190 14
40804-246 190 14 246 190 14 246 190 14 246 190 14
40805-246 190 14 246 190 14 246 190 14 246 190 14
40806-242 186 14 226 184 13 210 162 10 142 110 46
40807-226 226 226 253 253 253 253 253 253 253 253 253
40808-253 253 253 253 253 253 253 253 253 253 253 253
40809-253 253 253 253 253 253 253 253 253 253 253 253
40810-198 198 198 66 66 66 2 2 6 2 2 6
40811- 2 2 6 2 2 6 50 34 6 156 107 11
40812-219 162 10 239 182 13 246 186 14 246 190 14
40813-246 190 14 246 190 14 246 190 14 246 190 14
40814-246 190 14 246 190 14 246 190 14 246 190 14
40815-246 190 14 246 190 14 246 190 14 242 186 14
40816-234 174 13 213 154 11 154 122 46 66 66 66
40817- 30 30 30 10 10 10 0 0 0 0 0 0
40818- 0 0 0 0 0 0 0 0 0 0 0 0
40819- 0 0 0 0 0 0 0 0 0 0 0 0
40820- 0 0 0 0 0 0 6 6 6 22 22 22
40821- 58 58 58 154 121 60 206 145 10 234 174 13
40822-242 186 14 246 186 14 246 190 14 246 190 14
40823-246 190 14 246 190 14 246 190 14 246 190 14
40824-246 190 14 246 190 14 246 190 14 246 190 14
40825-246 190 14 246 190 14 246 190 14 246 190 14
40826-246 186 14 236 178 12 210 162 10 163 110 8
40827- 61 42 6 138 138 138 218 218 218 250 250 250
40828-253 253 253 253 253 253 253 253 253 250 250 250
40829-242 242 242 210 210 210 144 144 144 66 66 66
40830- 6 6 6 2 2 6 2 2 6 2 2 6
40831- 2 2 6 2 2 6 61 42 6 163 110 8
40832-216 158 10 236 178 12 246 190 14 246 190 14
40833-246 190 14 246 190 14 246 190 14 246 190 14
40834-246 190 14 246 190 14 246 190 14 246 190 14
40835-246 190 14 239 182 13 230 174 11 216 158 10
40836-190 142 34 124 112 88 70 70 70 38 38 38
40837- 18 18 18 6 6 6 0 0 0 0 0 0
40838- 0 0 0 0 0 0 0 0 0 0 0 0
40839- 0 0 0 0 0 0 0 0 0 0 0 0
40840- 0 0 0 0 0 0 6 6 6 22 22 22
40841- 62 62 62 168 124 44 206 145 10 224 166 10
40842-236 178 12 239 182 13 242 186 14 242 186 14
40843-246 186 14 246 190 14 246 190 14 246 190 14
40844-246 190 14 246 190 14 246 190 14 246 190 14
40845-246 190 14 246 190 14 246 190 14 246 190 14
40846-246 190 14 236 178 12 216 158 10 175 118 6
40847- 80 54 7 2 2 6 6 6 6 30 30 30
40848- 54 54 54 62 62 62 50 50 50 38 38 38
40849- 14 14 14 2 2 6 2 2 6 2 2 6
40850- 2 2 6 2 2 6 2 2 6 2 2 6
40851- 2 2 6 6 6 6 80 54 7 167 114 7
40852-213 154 11 236 178 12 246 190 14 246 190 14
40853-246 190 14 246 190 14 246 190 14 246 190 14
40854-246 190 14 242 186 14 239 182 13 239 182 13
40855-230 174 11 210 150 10 174 135 50 124 112 88
40856- 82 82 82 54 54 54 34 34 34 18 18 18
40857- 6 6 6 0 0 0 0 0 0 0 0 0
40858- 0 0 0 0 0 0 0 0 0 0 0 0
40859- 0 0 0 0 0 0 0 0 0 0 0 0
40860- 0 0 0 0 0 0 6 6 6 18 18 18
40861- 50 50 50 158 118 36 192 133 9 200 144 11
40862-216 158 10 219 162 10 224 166 10 226 170 11
40863-230 174 11 236 178 12 239 182 13 239 182 13
40864-242 186 14 246 186 14 246 190 14 246 190 14
40865-246 190 14 246 190 14 246 190 14 246 190 14
40866-246 186 14 230 174 11 210 150 10 163 110 8
40867-104 69 6 10 10 10 2 2 6 2 2 6
40868- 2 2 6 2 2 6 2 2 6 2 2 6
40869- 2 2 6 2 2 6 2 2 6 2 2 6
40870- 2 2 6 2 2 6 2 2 6 2 2 6
40871- 2 2 6 6 6 6 91 60 6 167 114 7
40872-206 145 10 230 174 11 242 186 14 246 190 14
40873-246 190 14 246 190 14 246 186 14 242 186 14
40874-239 182 13 230 174 11 224 166 10 213 154 11
40875-180 133 36 124 112 88 86 86 86 58 58 58
40876- 38 38 38 22 22 22 10 10 10 6 6 6
40877- 0 0 0 0 0 0 0 0 0 0 0 0
40878- 0 0 0 0 0 0 0 0 0 0 0 0
40879- 0 0 0 0 0 0 0 0 0 0 0 0
40880- 0 0 0 0 0 0 0 0 0 14 14 14
40881- 34 34 34 70 70 70 138 110 50 158 118 36
40882-167 114 7 180 123 7 192 133 9 197 138 11
40883-200 144 11 206 145 10 213 154 11 219 162 10
40884-224 166 10 230 174 11 239 182 13 242 186 14
40885-246 186 14 246 186 14 246 186 14 246 186 14
40886-239 182 13 216 158 10 185 133 11 152 99 6
40887-104 69 6 18 14 6 2 2 6 2 2 6
40888- 2 2 6 2 2 6 2 2 6 2 2 6
40889- 2 2 6 2 2 6 2 2 6 2 2 6
40890- 2 2 6 2 2 6 2 2 6 2 2 6
40891- 2 2 6 6 6 6 80 54 7 152 99 6
40892-192 133 9 219 162 10 236 178 12 239 182 13
40893-246 186 14 242 186 14 239 182 13 236 178 12
40894-224 166 10 206 145 10 192 133 9 154 121 60
40895- 94 94 94 62 62 62 42 42 42 22 22 22
40896- 14 14 14 6 6 6 0 0 0 0 0 0
40897- 0 0 0 0 0 0 0 0 0 0 0 0
40898- 0 0 0 0 0 0 0 0 0 0 0 0
40899- 0 0 0 0 0 0 0 0 0 0 0 0
40900- 0 0 0 0 0 0 0 0 0 6 6 6
40901- 18 18 18 34 34 34 58 58 58 78 78 78
40902-101 98 89 124 112 88 142 110 46 156 107 11
40903-163 110 8 167 114 7 175 118 6 180 123 7
40904-185 133 11 197 138 11 210 150 10 219 162 10
40905-226 170 11 236 178 12 236 178 12 234 174 13
40906-219 162 10 197 138 11 163 110 8 130 83 6
40907- 91 60 6 10 10 10 2 2 6 2 2 6
40908- 18 18 18 38 38 38 38 38 38 38 38 38
40909- 38 38 38 38 38 38 38 38 38 38 38 38
40910- 38 38 38 38 38 38 26 26 26 2 2 6
40911- 2 2 6 6 6 6 70 47 6 137 92 6
40912-175 118 6 200 144 11 219 162 10 230 174 11
40913-234 174 13 230 174 11 219 162 10 210 150 10
40914-192 133 9 163 110 8 124 112 88 82 82 82
40915- 50 50 50 30 30 30 14 14 14 6 6 6
40916- 0 0 0 0 0 0 0 0 0 0 0 0
40917- 0 0 0 0 0 0 0 0 0 0 0 0
40918- 0 0 0 0 0 0 0 0 0 0 0 0
40919- 0 0 0 0 0 0 0 0 0 0 0 0
40920- 0 0 0 0 0 0 0 0 0 0 0 0
40921- 6 6 6 14 14 14 22 22 22 34 34 34
40922- 42 42 42 58 58 58 74 74 74 86 86 86
40923-101 98 89 122 102 70 130 98 46 121 87 25
40924-137 92 6 152 99 6 163 110 8 180 123 7
40925-185 133 11 197 138 11 206 145 10 200 144 11
40926-180 123 7 156 107 11 130 83 6 104 69 6
40927- 50 34 6 54 54 54 110 110 110 101 98 89
40928- 86 86 86 82 82 82 78 78 78 78 78 78
40929- 78 78 78 78 78 78 78 78 78 78 78 78
40930- 78 78 78 82 82 82 86 86 86 94 94 94
40931-106 106 106 101 101 101 86 66 34 124 80 6
40932-156 107 11 180 123 7 192 133 9 200 144 11
40933-206 145 10 200 144 11 192 133 9 175 118 6
40934-139 102 15 109 106 95 70 70 70 42 42 42
40935- 22 22 22 10 10 10 0 0 0 0 0 0
40936- 0 0 0 0 0 0 0 0 0 0 0 0
40937- 0 0 0 0 0 0 0 0 0 0 0 0
40938- 0 0 0 0 0 0 0 0 0 0 0 0
40939- 0 0 0 0 0 0 0 0 0 0 0 0
40940- 0 0 0 0 0 0 0 0 0 0 0 0
40941- 0 0 0 0 0 0 6 6 6 10 10 10
40942- 14 14 14 22 22 22 30 30 30 38 38 38
40943- 50 50 50 62 62 62 74 74 74 90 90 90
40944-101 98 89 112 100 78 121 87 25 124 80 6
40945-137 92 6 152 99 6 152 99 6 152 99 6
40946-138 86 6 124 80 6 98 70 6 86 66 30
40947-101 98 89 82 82 82 58 58 58 46 46 46
40948- 38 38 38 34 34 34 34 34 34 34 34 34
40949- 34 34 34 34 34 34 34 34 34 34 34 34
40950- 34 34 34 34 34 34 38 38 38 42 42 42
40951- 54 54 54 82 82 82 94 86 76 91 60 6
40952-134 86 6 156 107 11 167 114 7 175 118 6
40953-175 118 6 167 114 7 152 99 6 121 87 25
40954-101 98 89 62 62 62 34 34 34 18 18 18
40955- 6 6 6 0 0 0 0 0 0 0 0 0
40956- 0 0 0 0 0 0 0 0 0 0 0 0
40957- 0 0 0 0 0 0 0 0 0 0 0 0
40958- 0 0 0 0 0 0 0 0 0 0 0 0
40959- 0 0 0 0 0 0 0 0 0 0 0 0
40960- 0 0 0 0 0 0 0 0 0 0 0 0
40961- 0 0 0 0 0 0 0 0 0 0 0 0
40962- 0 0 0 6 6 6 6 6 6 10 10 10
40963- 18 18 18 22 22 22 30 30 30 42 42 42
40964- 50 50 50 66 66 66 86 86 86 101 98 89
40965-106 86 58 98 70 6 104 69 6 104 69 6
40966-104 69 6 91 60 6 82 62 34 90 90 90
40967- 62 62 62 38 38 38 22 22 22 14 14 14
40968- 10 10 10 10 10 10 10 10 10 10 10 10
40969- 10 10 10 10 10 10 6 6 6 10 10 10
40970- 10 10 10 10 10 10 10 10 10 14 14 14
40971- 22 22 22 42 42 42 70 70 70 89 81 66
40972- 80 54 7 104 69 6 124 80 6 137 92 6
40973-134 86 6 116 81 8 100 82 52 86 86 86
40974- 58 58 58 30 30 30 14 14 14 6 6 6
40975- 0 0 0 0 0 0 0 0 0 0 0 0
40976- 0 0 0 0 0 0 0 0 0 0 0 0
40977- 0 0 0 0 0 0 0 0 0 0 0 0
40978- 0 0 0 0 0 0 0 0 0 0 0 0
40979- 0 0 0 0 0 0 0 0 0 0 0 0
40980- 0 0 0 0 0 0 0 0 0 0 0 0
40981- 0 0 0 0 0 0 0 0 0 0 0 0
40982- 0 0 0 0 0 0 0 0 0 0 0 0
40983- 0 0 0 6 6 6 10 10 10 14 14 14
40984- 18 18 18 26 26 26 38 38 38 54 54 54
40985- 70 70 70 86 86 86 94 86 76 89 81 66
40986- 89 81 66 86 86 86 74 74 74 50 50 50
40987- 30 30 30 14 14 14 6 6 6 0 0 0
40988- 0 0 0 0 0 0 0 0 0 0 0 0
40989- 0 0 0 0 0 0 0 0 0 0 0 0
40990- 0 0 0 0 0 0 0 0 0 0 0 0
40991- 6 6 6 18 18 18 34 34 34 58 58 58
40992- 82 82 82 89 81 66 89 81 66 89 81 66
40993- 94 86 66 94 86 76 74 74 74 50 50 50
40994- 26 26 26 14 14 14 6 6 6 0 0 0
40995- 0 0 0 0 0 0 0 0 0 0 0 0
40996- 0 0 0 0 0 0 0 0 0 0 0 0
40997- 0 0 0 0 0 0 0 0 0 0 0 0
40998- 0 0 0 0 0 0 0 0 0 0 0 0
40999- 0 0 0 0 0 0 0 0 0 0 0 0
41000- 0 0 0 0 0 0 0 0 0 0 0 0
41001- 0 0 0 0 0 0 0 0 0 0 0 0
41002- 0 0 0 0 0 0 0 0 0 0 0 0
41003- 0 0 0 0 0 0 0 0 0 0 0 0
41004- 6 6 6 6 6 6 14 14 14 18 18 18
41005- 30 30 30 38 38 38 46 46 46 54 54 54
41006- 50 50 50 42 42 42 30 30 30 18 18 18
41007- 10 10 10 0 0 0 0 0 0 0 0 0
41008- 0 0 0 0 0 0 0 0 0 0 0 0
41009- 0 0 0 0 0 0 0 0 0 0 0 0
41010- 0 0 0 0 0 0 0 0 0 0 0 0
41011- 0 0 0 6 6 6 14 14 14 26 26 26
41012- 38 38 38 50 50 50 58 58 58 58 58 58
41013- 54 54 54 42 42 42 30 30 30 18 18 18
41014- 10 10 10 0 0 0 0 0 0 0 0 0
41015- 0 0 0 0 0 0 0 0 0 0 0 0
41016- 0 0 0 0 0 0 0 0 0 0 0 0
41017- 0 0 0 0 0 0 0 0 0 0 0 0
41018- 0 0 0 0 0 0 0 0 0 0 0 0
41019- 0 0 0 0 0 0 0 0 0 0 0 0
41020- 0 0 0 0 0 0 0 0 0 0 0 0
41021- 0 0 0 0 0 0 0 0 0 0 0 0
41022- 0 0 0 0 0 0 0 0 0 0 0 0
41023- 0 0 0 0 0 0 0 0 0 0 0 0
41024- 0 0 0 0 0 0 0 0 0 6 6 6
41025- 6 6 6 10 10 10 14 14 14 18 18 18
41026- 18 18 18 14 14 14 10 10 10 6 6 6
41027- 0 0 0 0 0 0 0 0 0 0 0 0
41028- 0 0 0 0 0 0 0 0 0 0 0 0
41029- 0 0 0 0 0 0 0 0 0 0 0 0
41030- 0 0 0 0 0 0 0 0 0 0 0 0
41031- 0 0 0 0 0 0 0 0 0 6 6 6
41032- 14 14 14 18 18 18 22 22 22 22 22 22
41033- 18 18 18 14 14 14 10 10 10 6 6 6
41034- 0 0 0 0 0 0 0 0 0 0 0 0
41035- 0 0 0 0 0 0 0 0 0 0 0 0
41036- 0 0 0 0 0 0 0 0 0 0 0 0
41037- 0 0 0 0 0 0 0 0 0 0 0 0
41038- 0 0 0 0 0 0 0 0 0 0 0 0
41039+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41040+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41041+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41042+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41043+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41044+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41045+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41046+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41047+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41048+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41049+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41050+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41051+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41052+4 4 4 4 4 4
41053+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41054+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41055+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41056+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41057+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41058+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41059+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41060+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41061+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41062+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41063+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41064+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41065+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41066+4 4 4 4 4 4
41067+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41068+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41069+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41070+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41071+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41072+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41073+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41074+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41075+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41076+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41077+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41078+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41079+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41080+4 4 4 4 4 4
41081+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41082+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41083+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41084+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41085+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41086+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41087+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41088+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41089+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41090+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41091+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41092+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41093+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41094+4 4 4 4 4 4
41095+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41096+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41097+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41098+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41099+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41100+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41101+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41102+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41103+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41104+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41105+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41106+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41107+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41108+4 4 4 4 4 4
41109+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41110+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41111+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41112+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41113+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41114+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41115+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41116+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41117+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41118+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41119+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41120+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41121+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41122+4 4 4 4 4 4
41123+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41124+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41125+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41126+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41127+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
41128+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
41129+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41130+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41131+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41132+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
41133+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
41134+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
41135+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41136+4 4 4 4 4 4
41137+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41138+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41139+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41140+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41141+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
41142+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
41143+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41144+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41145+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41146+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
41147+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
41148+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
41149+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41150+4 4 4 4 4 4
41151+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41152+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41153+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41154+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41155+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
41156+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
41157+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
41158+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41159+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41160+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
41161+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
41162+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
41163+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
41164+4 4 4 4 4 4
41165+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41166+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41167+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41168+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
41169+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
41170+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
41171+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
41172+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41173+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
41174+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
41175+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
41176+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
41177+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
41178+4 4 4 4 4 4
41179+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41180+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41181+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41182+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
41183+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
41184+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
41185+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
41186+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41187+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
41188+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
41189+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
41190+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
41191+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
41192+4 4 4 4 4 4
41193+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41194+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41195+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
41196+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
41197+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
41198+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
41199+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
41200+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
41201+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
41202+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
41203+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
41204+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
41205+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
41206+4 4 4 4 4 4
41207+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41208+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41209+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
41210+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
41211+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
41212+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
41213+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
41214+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
41215+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
41216+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
41217+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
41218+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
41219+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
41220+4 4 4 4 4 4
41221+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41222+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41223+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
41224+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
41225+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
41226+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
41227+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
41228+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
41229+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
41230+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
41231+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
41232+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
41233+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
41234+4 4 4 4 4 4
41235+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41236+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41237+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
41238+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
41239+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
41240+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
41241+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
41242+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
41243+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
41244+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
41245+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
41246+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
41247+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
41248+4 4 4 4 4 4
41249+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41250+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41251+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
41252+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
41253+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
41254+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
41255+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
41256+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
41257+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
41258+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
41259+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
41260+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
41261+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
41262+4 4 4 4 4 4
41263+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41264+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
41265+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
41266+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
41267+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
41268+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
41269+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
41270+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
41271+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
41272+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
41273+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
41274+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
41275+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
41276+4 4 4 4 4 4
41277+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41278+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
41279+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
41280+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
41281+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
41282+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
41283+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
41284+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
41285+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
41286+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
41287+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
41288+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
41289+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
41290+0 0 0 4 4 4
41291+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
41292+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
41293+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
41294+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
41295+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
41296+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
41297+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
41298+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
41299+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
41300+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
41301+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
41302+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
41303+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
41304+2 0 0 0 0 0
41305+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
41306+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
41307+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
41308+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
41309+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
41310+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
41311+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
41312+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
41313+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
41314+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
41315+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
41316+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
41317+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
41318+37 38 37 0 0 0
41319+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
41320+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
41321+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
41322+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
41323+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
41324+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
41325+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
41326+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
41327+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
41328+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
41329+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
41330+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
41331+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
41332+85 115 134 4 0 0
41333+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
41334+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
41335+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
41336+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
41337+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
41338+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
41339+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
41340+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
41341+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
41342+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
41343+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
41344+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
41345+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
41346+60 73 81 4 0 0
41347+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
41348+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
41349+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
41350+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
41351+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
41352+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
41353+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
41354+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
41355+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
41356+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
41357+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
41358+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
41359+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
41360+16 19 21 4 0 0
41361+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
41362+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
41363+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
41364+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
41365+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
41366+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
41367+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
41368+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
41369+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
41370+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
41371+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
41372+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
41373+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
41374+4 0 0 4 3 3
41375+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
41376+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
41377+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
41378+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
41379+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
41380+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
41381+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
41382+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
41383+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
41384+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
41385+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
41386+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
41387+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
41388+3 2 2 4 4 4
41389+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
41390+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
41391+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
41392+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
41393+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
41394+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
41395+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
41396+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
41397+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
41398+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
41399+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
41400+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
41401+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
41402+4 4 4 4 4 4
41403+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
41404+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
41405+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
41406+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
41407+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
41408+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
41409+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
41410+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
41411+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
41412+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
41413+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
41414+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
41415+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
41416+4 4 4 4 4 4
41417+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
41418+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
41419+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
41420+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
41421+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
41422+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
41423+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
41424+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
41425+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
41426+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
41427+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
41428+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
41429+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
41430+5 5 5 5 5 5
41431+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
41432+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
41433+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
41434+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
41435+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
41436+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
41437+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
41438+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
41439+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
41440+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
41441+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
41442+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
41443+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
41444+5 5 5 4 4 4
41445+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
41446+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
41447+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
41448+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
41449+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
41450+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
41451+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
41452+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
41453+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
41454+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
41455+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
41456+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
41457+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41458+4 4 4 4 4 4
41459+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
41460+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
41461+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
41462+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
41463+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
41464+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
41465+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
41466+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
41467+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
41468+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
41469+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
41470+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
41471+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41472+4 4 4 4 4 4
41473+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
41474+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
41475+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
41476+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
41477+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
41478+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
41479+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
41480+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
41481+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
41482+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
41483+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
41484+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41485+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41486+4 4 4 4 4 4
41487+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
41488+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
41489+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
41490+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
41491+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
41492+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
41493+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
41494+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
41495+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
41496+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
41497+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
41498+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41499+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41500+4 4 4 4 4 4
41501+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
41502+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
41503+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
41504+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
41505+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
41506+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
41507+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
41508+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
41509+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
41510+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
41511+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41512+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41513+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41514+4 4 4 4 4 4
41515+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
41516+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
41517+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
41518+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
41519+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
41520+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
41521+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
41522+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
41523+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
41524+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
41525+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
41526+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41527+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41528+4 4 4 4 4 4
41529+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
41530+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
41531+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
41532+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
41533+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
41534+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
41535+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
41536+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
41537+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
41538+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
41539+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
41540+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41541+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41542+4 4 4 4 4 4
41543+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
41544+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
41545+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
41546+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
41547+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
41548+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
41549+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
41550+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
41551+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
41552+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
41553+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41554+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41555+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41556+4 4 4 4 4 4
41557+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
41558+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
41559+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
41560+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
41561+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
41562+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
41563+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
41564+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
41565+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
41566+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
41567+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41568+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41569+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41570+4 4 4 4 4 4
41571+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
41572+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
41573+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
41574+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
41575+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
41576+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
41577+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
41578+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
41579+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
41580+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
41581+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41582+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41583+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41584+4 4 4 4 4 4
41585+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
41586+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
41587+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
41588+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
41589+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
41590+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
41591+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
41592+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
41593+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
41594+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41595+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41596+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41597+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41598+4 4 4 4 4 4
41599+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
41600+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
41601+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
41602+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
41603+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
41604+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
41605+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
41606+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
41607+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
41608+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41609+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41610+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41611+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41612+4 4 4 4 4 4
41613+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
41614+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
41615+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
41616+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
41617+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
41618+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
41619+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
41620+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
41621+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
41622+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41623+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41624+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41625+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41626+4 4 4 4 4 4
41627+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
41628+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
41629+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
41630+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
41631+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
41632+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
41633+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
41634+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
41635+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
41636+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41637+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41638+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41639+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41640+4 4 4 4 4 4
41641+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
41642+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
41643+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
41644+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
41645+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
41646+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
41647+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
41648+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
41649+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
41650+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41651+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41652+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41653+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41654+4 4 4 4 4 4
41655+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
41656+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
41657+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
41658+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
41659+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
41660+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
41661+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
41662+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
41663+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
41664+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41665+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41666+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41667+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41668+4 4 4 4 4 4
41669+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
41670+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
41671+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
41672+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
41673+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
41674+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
41675+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
41676+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
41677+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
41678+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41679+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41680+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41681+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41682+4 4 4 4 4 4
41683+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
41684+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
41685+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
41686+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
41687+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
41688+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
41689+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
41690+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
41691+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
41692+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41693+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41694+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41695+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41696+4 4 4 4 4 4
41697+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
41698+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
41699+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
41700+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
41701+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
41702+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
41703+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
41704+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
41705+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
41706+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41707+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41708+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41709+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41710+4 4 4 4 4 4
41711+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
41712+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
41713+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
41714+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
41715+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
41716+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
41717+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
41718+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
41719+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
41720+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41721+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41722+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41723+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41724+4 4 4 4 4 4
41725+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
41726+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
41727+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
41728+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
41729+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
41730+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
41731+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
41732+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
41733+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
41734+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41735+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41736+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41737+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41738+4 4 4 4 4 4
41739+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
41740+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
41741+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
41742+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
41743+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
41744+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
41745+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
41746+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
41747+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
41748+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41749+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41750+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41751+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41752+4 4 4 4 4 4
41753+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
41754+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
41755+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
41756+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
41757+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
41758+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
41759+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
41760+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
41761+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
41762+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41763+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41764+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41765+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41766+4 4 4 4 4 4
41767+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
41768+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
41769+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
41770+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
41771+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
41772+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
41773+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
41774+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
41775+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
41776+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41777+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41778+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41779+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41780+4 4 4 4 4 4
41781+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
41782+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
41783+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
41784+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
41785+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
41786+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
41787+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
41788+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
41789+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
41790+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41791+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41792+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41793+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41794+4 4 4 4 4 4
41795+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
41796+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
41797+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
41798+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
41799+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
41800+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
41801+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
41802+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
41803+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
41804+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
41805+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41806+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41807+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41808+4 4 4 4 4 4
41809+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
41810+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
41811+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
41812+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
41813+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
41814+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
41815+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
41816+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
41817+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
41818+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
41819+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41820+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41821+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41822+4 4 4 4 4 4
41823+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
41824+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
41825+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
41826+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
41827+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
41828+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
41829+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41830+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
41831+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
41832+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
41833+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41834+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41835+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41836+4 4 4 4 4 4
41837+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
41838+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
41839+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
41840+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
41841+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
41842+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
41843+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
41844+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
41845+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
41846+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
41847+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41848+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41849+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41850+4 4 4 4 4 4
41851+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
41852+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
41853+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
41854+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
41855+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
41856+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
41857+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
41858+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
41859+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
41860+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
41861+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41862+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41863+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41864+4 4 4 4 4 4
41865+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
41866+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
41867+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
41868+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
41869+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
41870+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
41871+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
41872+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
41873+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
41874+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
41875+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41876+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41877+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41878+4 4 4 4 4 4
41879+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
41880+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
41881+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
41882+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
41883+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
41884+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
41885+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
41886+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
41887+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
41888+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
41889+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41890+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41891+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41892+4 4 4 4 4 4
41893+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
41894+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
41895+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
41896+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
41897+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
41898+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
41899+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
41900+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
41901+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
41902+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41903+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41904+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41905+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41906+4 4 4 4 4 4
41907+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
41908+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
41909+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
41910+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
41911+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
41912+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
41913+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
41914+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
41915+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
41916+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41917+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41918+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41919+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41920+4 4 4 4 4 4
41921+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
41922+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
41923+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
41924+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
41925+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
41926+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
41927+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
41928+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
41929+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
41930+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41931+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41932+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41933+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41934+4 4 4 4 4 4
41935+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
41936+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
41937+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
41938+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
41939+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
41940+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
41941+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
41942+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
41943+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41944+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41945+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41946+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41947+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41948+4 4 4 4 4 4
41949+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
41950+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
41951+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
41952+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
41953+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
41954+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
41955+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
41956+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
41957+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41958+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41959+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41960+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41961+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41962+4 4 4 4 4 4
41963+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
41964+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
41965+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
41966+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
41967+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
41968+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
41969+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
41970+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
41971+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41972+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41973+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41974+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41975+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41976+4 4 4 4 4 4
41977+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
41978+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
41979+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
41980+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
41981+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
41982+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
41983+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
41984+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
41985+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41986+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41987+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41988+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41989+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41990+4 4 4 4 4 4
41991+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41992+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
41993+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
41994+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
41995+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
41996+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
41997+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
41998+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
41999+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42000+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42001+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42002+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42003+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42004+4 4 4 4 4 4
42005+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42006+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
42007+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
42008+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
42009+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
42010+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
42011+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
42012+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
42013+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42014+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42015+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42016+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42017+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42018+4 4 4 4 4 4
42019+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42020+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42021+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
42022+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
42023+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
42024+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
42025+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
42026+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
42027+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42028+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42029+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42030+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42031+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42032+4 4 4 4 4 4
42033+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42034+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42035+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
42036+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
42037+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
42038+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
42039+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
42040+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42041+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42042+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42043+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42044+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42045+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42046+4 4 4 4 4 4
42047+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42048+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42049+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42050+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
42051+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
42052+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
42053+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
42054+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42055+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42056+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42057+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42058+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42059+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42060+4 4 4 4 4 4
42061+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42062+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42063+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42064+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
42065+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
42066+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
42067+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
42068+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42069+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42070+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42071+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42072+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42073+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42074+4 4 4 4 4 4
42075+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42076+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42077+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42078+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
42079+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
42080+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
42081+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
42082+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42083+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42084+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42085+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42086+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42087+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42088+4 4 4 4 4 4
42089+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42090+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42091+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42092+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
42093+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
42094+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
42095+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
42096+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42097+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42098+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42099+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42100+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42101+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42102+4 4 4 4 4 4
42103+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42104+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42105+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42106+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42107+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
42108+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
42109+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
42110+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42111+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42112+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42113+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42114+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42115+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42116+4 4 4 4 4 4
42117+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42118+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42119+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42120+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42121+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
42122+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
42123+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42124+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42125+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42126+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42127+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42128+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42129+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42130+4 4 4 4 4 4
42131+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42132+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42133+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42134+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42135+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
42136+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
42137+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42138+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42139+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42140+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42141+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42142+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42143+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42144+4 4 4 4 4 4
42145+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42146+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42147+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42148+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42149+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
42150+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
42151+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42152+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42153+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42154+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42155+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42156+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42157+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42158+4 4 4 4 4 4
42159diff -urNp linux-2.6.32.48/drivers/video/nvidia/nv_backlight.c linux-2.6.32.48/drivers/video/nvidia/nv_backlight.c
42160--- linux-2.6.32.48/drivers/video/nvidia/nv_backlight.c 2011-11-08 19:02:43.000000000 -0500
42161+++ linux-2.6.32.48/drivers/video/nvidia/nv_backlight.c 2011-11-15 19:59:43.000000000 -0500
42162@@ -87,7 +87,7 @@ static int nvidia_bl_get_brightness(stru
42163 return bd->props.brightness;
42164 }
42165
42166-static struct backlight_ops nvidia_bl_ops = {
42167+static const struct backlight_ops nvidia_bl_ops = {
42168 .get_brightness = nvidia_bl_get_brightness,
42169 .update_status = nvidia_bl_update_status,
42170 };
42171diff -urNp linux-2.6.32.48/drivers/video/riva/fbdev.c linux-2.6.32.48/drivers/video/riva/fbdev.c
42172--- linux-2.6.32.48/drivers/video/riva/fbdev.c 2011-11-08 19:02:43.000000000 -0500
42173+++ linux-2.6.32.48/drivers/video/riva/fbdev.c 2011-11-15 19:59:43.000000000 -0500
42174@@ -331,7 +331,7 @@ static int riva_bl_get_brightness(struct
42175 return bd->props.brightness;
42176 }
42177
42178-static struct backlight_ops riva_bl_ops = {
42179+static const struct backlight_ops riva_bl_ops = {
42180 .get_brightness = riva_bl_get_brightness,
42181 .update_status = riva_bl_update_status,
42182 };
42183diff -urNp linux-2.6.32.48/drivers/video/uvesafb.c linux-2.6.32.48/drivers/video/uvesafb.c
42184--- linux-2.6.32.48/drivers/video/uvesafb.c 2011-11-08 19:02:43.000000000 -0500
42185+++ linux-2.6.32.48/drivers/video/uvesafb.c 2011-11-15 19:59:43.000000000 -0500
42186@@ -18,6 +18,7 @@
42187 #include <linux/fb.h>
42188 #include <linux/io.h>
42189 #include <linux/mutex.h>
42190+#include <linux/moduleloader.h>
42191 #include <video/edid.h>
42192 #include <video/uvesafb.h>
42193 #ifdef CONFIG_X86
42194@@ -120,7 +121,7 @@ static int uvesafb_helper_start(void)
42195 NULL,
42196 };
42197
42198- return call_usermodehelper(v86d_path, argv, envp, 1);
42199+ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
42200 }
42201
42202 /*
42203@@ -568,10 +569,32 @@ static int __devinit uvesafb_vbe_getpmi(
42204 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
42205 par->pmi_setpal = par->ypan = 0;
42206 } else {
42207+
42208+#ifdef CONFIG_PAX_KERNEXEC
42209+#ifdef CONFIG_MODULES
42210+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
42211+#endif
42212+ if (!par->pmi_code) {
42213+ par->pmi_setpal = par->ypan = 0;
42214+ return 0;
42215+ }
42216+#endif
42217+
42218 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
42219 + task->t.regs.edi);
42220+
42221+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
42222+ pax_open_kernel();
42223+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
42224+ pax_close_kernel();
42225+
42226+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
42227+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
42228+#else
42229 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
42230 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
42231+#endif
42232+
42233 printk(KERN_INFO "uvesafb: protected mode interface info at "
42234 "%04x:%04x\n",
42235 (u16)task->t.regs.es, (u16)task->t.regs.edi);
42236@@ -1799,6 +1822,11 @@ out:
42237 if (par->vbe_modes)
42238 kfree(par->vbe_modes);
42239
42240+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
42241+ if (par->pmi_code)
42242+ module_free_exec(NULL, par->pmi_code);
42243+#endif
42244+
42245 framebuffer_release(info);
42246 return err;
42247 }
42248@@ -1825,6 +1853,12 @@ static int uvesafb_remove(struct platfor
42249 kfree(par->vbe_state_orig);
42250 if (par->vbe_state_saved)
42251 kfree(par->vbe_state_saved);
42252+
42253+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
42254+ if (par->pmi_code)
42255+ module_free_exec(NULL, par->pmi_code);
42256+#endif
42257+
42258 }
42259
42260 framebuffer_release(info);
42261diff -urNp linux-2.6.32.48/drivers/video/vesafb.c linux-2.6.32.48/drivers/video/vesafb.c
42262--- linux-2.6.32.48/drivers/video/vesafb.c 2011-11-08 19:02:43.000000000 -0500
42263+++ linux-2.6.32.48/drivers/video/vesafb.c 2011-11-15 19:59:43.000000000 -0500
42264@@ -9,6 +9,7 @@
42265 */
42266
42267 #include <linux/module.h>
42268+#include <linux/moduleloader.h>
42269 #include <linux/kernel.h>
42270 #include <linux/errno.h>
42271 #include <linux/string.h>
42272@@ -53,8 +54,8 @@ static int vram_remap __initdata; /*
42273 static int vram_total __initdata; /* Set total amount of memory */
42274 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
42275 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
42276-static void (*pmi_start)(void) __read_mostly;
42277-static void (*pmi_pal) (void) __read_mostly;
42278+static void (*pmi_start)(void) __read_only;
42279+static void (*pmi_pal) (void) __read_only;
42280 static int depth __read_mostly;
42281 static int vga_compat __read_mostly;
42282 /* --------------------------------------------------------------------- */
42283@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct pl
42284 unsigned int size_vmode;
42285 unsigned int size_remap;
42286 unsigned int size_total;
42287+ void *pmi_code = NULL;
42288
42289 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
42290 return -ENODEV;
42291@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct pl
42292 size_remap = size_total;
42293 vesafb_fix.smem_len = size_remap;
42294
42295-#ifndef __i386__
42296- screen_info.vesapm_seg = 0;
42297-#endif
42298-
42299 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
42300 printk(KERN_WARNING
42301 "vesafb: cannot reserve video memory at 0x%lx\n",
42302@@ -315,9 +313,21 @@ static int __init vesafb_probe(struct pl
42303 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
42304 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
42305
42306+#ifdef __i386__
42307+
42308+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
42309+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
42310+ if (!pmi_code)
42311+#elif !defined(CONFIG_PAX_KERNEXEC)
42312+ if (0)
42313+#endif
42314+
42315+#endif
42316+ screen_info.vesapm_seg = 0;
42317+
42318 if (screen_info.vesapm_seg) {
42319- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
42320- screen_info.vesapm_seg,screen_info.vesapm_off);
42321+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
42322+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
42323 }
42324
42325 if (screen_info.vesapm_seg < 0xc000)
42326@@ -325,9 +335,25 @@ static int __init vesafb_probe(struct pl
42327
42328 if (ypan || pmi_setpal) {
42329 unsigned short *pmi_base;
42330+
42331 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
42332- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
42333- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
42334+
42335+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
42336+ pax_open_kernel();
42337+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
42338+#else
42339+ pmi_code = pmi_base;
42340+#endif
42341+
42342+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
42343+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
42344+
42345+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
42346+ pmi_start = ktva_ktla(pmi_start);
42347+ pmi_pal = ktva_ktla(pmi_pal);
42348+ pax_close_kernel();
42349+#endif
42350+
42351 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
42352 if (pmi_base[3]) {
42353 printk(KERN_INFO "vesafb: pmi: ports = ");
42354@@ -469,6 +495,11 @@ static int __init vesafb_probe(struct pl
42355 info->node, info->fix.id);
42356 return 0;
42357 err:
42358+
42359+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
42360+ module_free_exec(NULL, pmi_code);
42361+#endif
42362+
42363 if (info->screen_base)
42364 iounmap(info->screen_base);
42365 framebuffer_release(info);
42366diff -urNp linux-2.6.32.48/drivers/xen/sys-hypervisor.c linux-2.6.32.48/drivers/xen/sys-hypervisor.c
42367--- linux-2.6.32.48/drivers/xen/sys-hypervisor.c 2011-11-08 19:02:43.000000000 -0500
42368+++ linux-2.6.32.48/drivers/xen/sys-hypervisor.c 2011-11-15 19:59:43.000000000 -0500
42369@@ -425,7 +425,7 @@ static ssize_t hyp_sysfs_store(struct ko
42370 return 0;
42371 }
42372
42373-static struct sysfs_ops hyp_sysfs_ops = {
42374+static const struct sysfs_ops hyp_sysfs_ops = {
42375 .show = hyp_sysfs_show,
42376 .store = hyp_sysfs_store,
42377 };
42378diff -urNp linux-2.6.32.48/fs/9p/vfs_inode.c linux-2.6.32.48/fs/9p/vfs_inode.c
42379--- linux-2.6.32.48/fs/9p/vfs_inode.c 2011-11-08 19:02:43.000000000 -0500
42380+++ linux-2.6.32.48/fs/9p/vfs_inode.c 2011-11-15 19:59:43.000000000 -0500
42381@@ -1079,7 +1079,7 @@ static void *v9fs_vfs_follow_link(struct
42382 static void
42383 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
42384 {
42385- char *s = nd_get_link(nd);
42386+ const char *s = nd_get_link(nd);
42387
42388 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
42389 IS_ERR(s) ? "<error>" : s);
42390diff -urNp linux-2.6.32.48/fs/aio.c linux-2.6.32.48/fs/aio.c
42391--- linux-2.6.32.48/fs/aio.c 2011-11-08 19:02:43.000000000 -0500
42392+++ linux-2.6.32.48/fs/aio.c 2011-11-15 19:59:43.000000000 -0500
42393@@ -115,7 +115,7 @@ static int aio_setup_ring(struct kioctx
42394 size += sizeof(struct io_event) * nr_events;
42395 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
42396
42397- if (nr_pages < 0)
42398+ if (nr_pages <= 0)
42399 return -EINVAL;
42400
42401 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
42402@@ -1089,6 +1089,8 @@ static int read_events(struct kioctx *ct
42403 struct aio_timeout to;
42404 int retry = 0;
42405
42406+ pax_track_stack();
42407+
42408 /* needed to zero any padding within an entry (there shouldn't be
42409 * any, but C is fun!
42410 */
42411@@ -1382,13 +1384,18 @@ static ssize_t aio_fsync(struct kiocb *i
42412 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb)
42413 {
42414 ssize_t ret;
42415+ struct iovec iovstack;
42416
42417 ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf,
42418 kiocb->ki_nbytes, 1,
42419- &kiocb->ki_inline_vec, &kiocb->ki_iovec);
42420+ &iovstack, &kiocb->ki_iovec);
42421 if (ret < 0)
42422 goto out;
42423
42424+ if (kiocb->ki_iovec == &iovstack) {
42425+ kiocb->ki_inline_vec = iovstack;
42426+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
42427+ }
42428 kiocb->ki_nr_segs = kiocb->ki_nbytes;
42429 kiocb->ki_cur_seg = 0;
42430 /* ki_nbytes/left now reflect bytes instead of segs */
42431diff -urNp linux-2.6.32.48/fs/attr.c linux-2.6.32.48/fs/attr.c
42432--- linux-2.6.32.48/fs/attr.c 2011-11-08 19:02:43.000000000 -0500
42433+++ linux-2.6.32.48/fs/attr.c 2011-11-15 19:59:43.000000000 -0500
42434@@ -83,6 +83,7 @@ int inode_newsize_ok(const struct inode
42435 unsigned long limit;
42436
42437 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
42438+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
42439 if (limit != RLIM_INFINITY && offset > limit)
42440 goto out_sig;
42441 if (offset > inode->i_sb->s_maxbytes)
42442diff -urNp linux-2.6.32.48/fs/autofs/root.c linux-2.6.32.48/fs/autofs/root.c
42443--- linux-2.6.32.48/fs/autofs/root.c 2011-11-08 19:02:43.000000000 -0500
42444+++ linux-2.6.32.48/fs/autofs/root.c 2011-11-15 19:59:43.000000000 -0500
42445@@ -299,7 +299,8 @@ static int autofs_root_symlink(struct in
42446 set_bit(n,sbi->symlink_bitmap);
42447 sl = &sbi->symlink[n];
42448 sl->len = strlen(symname);
42449- sl->data = kmalloc(slsize = sl->len+1, GFP_KERNEL);
42450+ slsize = sl->len+1;
42451+ sl->data = kmalloc(slsize, GFP_KERNEL);
42452 if (!sl->data) {
42453 clear_bit(n,sbi->symlink_bitmap);
42454 unlock_kernel();
42455diff -urNp linux-2.6.32.48/fs/autofs4/symlink.c linux-2.6.32.48/fs/autofs4/symlink.c
42456--- linux-2.6.32.48/fs/autofs4/symlink.c 2011-11-08 19:02:43.000000000 -0500
42457+++ linux-2.6.32.48/fs/autofs4/symlink.c 2011-11-15 19:59:43.000000000 -0500
42458@@ -15,7 +15,7 @@
42459 static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
42460 {
42461 struct autofs_info *ino = autofs4_dentry_ino(dentry);
42462- nd_set_link(nd, (char *)ino->u.symlink);
42463+ nd_set_link(nd, ino->u.symlink);
42464 return NULL;
42465 }
42466
42467diff -urNp linux-2.6.32.48/fs/autofs4/waitq.c linux-2.6.32.48/fs/autofs4/waitq.c
42468--- linux-2.6.32.48/fs/autofs4/waitq.c 2011-11-08 19:02:43.000000000 -0500
42469+++ linux-2.6.32.48/fs/autofs4/waitq.c 2011-11-15 19:59:43.000000000 -0500
42470@@ -60,7 +60,7 @@ static int autofs4_write(struct file *fi
42471 {
42472 unsigned long sigpipe, flags;
42473 mm_segment_t fs;
42474- const char *data = (const char *)addr;
42475+ const char __user *data = (const char __force_user *)addr;
42476 ssize_t wr = 0;
42477
42478 /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
42479diff -urNp linux-2.6.32.48/fs/befs/linuxvfs.c linux-2.6.32.48/fs/befs/linuxvfs.c
42480--- linux-2.6.32.48/fs/befs/linuxvfs.c 2011-11-08 19:02:43.000000000 -0500
42481+++ linux-2.6.32.48/fs/befs/linuxvfs.c 2011-11-15 19:59:43.000000000 -0500
42482@@ -498,7 +498,7 @@ static void befs_put_link(struct dentry
42483 {
42484 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
42485 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
42486- char *link = nd_get_link(nd);
42487+ const char *link = nd_get_link(nd);
42488 if (!IS_ERR(link))
42489 kfree(link);
42490 }
42491diff -urNp linux-2.6.32.48/fs/binfmt_aout.c linux-2.6.32.48/fs/binfmt_aout.c
42492--- linux-2.6.32.48/fs/binfmt_aout.c 2011-11-08 19:02:43.000000000 -0500
42493+++ linux-2.6.32.48/fs/binfmt_aout.c 2011-11-15 19:59:43.000000000 -0500
42494@@ -16,6 +16,7 @@
42495 #include <linux/string.h>
42496 #include <linux/fs.h>
42497 #include <linux/file.h>
42498+#include <linux/security.h>
42499 #include <linux/stat.h>
42500 #include <linux/fcntl.h>
42501 #include <linux/ptrace.h>
42502@@ -102,6 +103,8 @@ static int aout_core_dump(long signr, st
42503 #endif
42504 # define START_STACK(u) (u.start_stack)
42505
42506+ memset(&dump, 0, sizeof(dump));
42507+
42508 fs = get_fs();
42509 set_fs(KERNEL_DS);
42510 has_dumped = 1;
42511@@ -113,10 +116,12 @@ static int aout_core_dump(long signr, st
42512
42513 /* If the size of the dump file exceeds the rlimit, then see what would happen
42514 if we wrote the stack, but not the data area. */
42515+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
42516 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > limit)
42517 dump.u_dsize = 0;
42518
42519 /* Make sure we have enough room to write the stack and data areas. */
42520+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
42521 if ((dump.u_ssize + 1) * PAGE_SIZE > limit)
42522 dump.u_ssize = 0;
42523
42524@@ -146,9 +151,7 @@ static int aout_core_dump(long signr, st
42525 dump_size = dump.u_ssize << PAGE_SHIFT;
42526 DUMP_WRITE(dump_start,dump_size);
42527 }
42528-/* Finally dump the task struct. Not be used by gdb, but could be useful */
42529- set_fs(KERNEL_DS);
42530- DUMP_WRITE(current,sizeof(*current));
42531+/* Finally, let's not dump the task struct. Not be used by gdb, but could be useful to an attacker */
42532 end_coredump:
42533 set_fs(fs);
42534 return has_dumped;
42535@@ -249,6 +252,8 @@ static int load_aout_binary(struct linux
42536 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
42537 if (rlim >= RLIM_INFINITY)
42538 rlim = ~0;
42539+
42540+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
42541 if (ex.a_data + ex.a_bss > rlim)
42542 return -ENOMEM;
42543
42544@@ -277,6 +282,27 @@ static int load_aout_binary(struct linux
42545 install_exec_creds(bprm);
42546 current->flags &= ~PF_FORKNOEXEC;
42547
42548+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
42549+ current->mm->pax_flags = 0UL;
42550+#endif
42551+
42552+#ifdef CONFIG_PAX_PAGEEXEC
42553+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
42554+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
42555+
42556+#ifdef CONFIG_PAX_EMUTRAMP
42557+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
42558+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
42559+#endif
42560+
42561+#ifdef CONFIG_PAX_MPROTECT
42562+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
42563+ current->mm->pax_flags |= MF_PAX_MPROTECT;
42564+#endif
42565+
42566+ }
42567+#endif
42568+
42569 if (N_MAGIC(ex) == OMAGIC) {
42570 unsigned long text_addr, map_size;
42571 loff_t pos;
42572@@ -349,7 +375,7 @@ static int load_aout_binary(struct linux
42573
42574 down_write(&current->mm->mmap_sem);
42575 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
42576- PROT_READ | PROT_WRITE | PROT_EXEC,
42577+ PROT_READ | PROT_WRITE,
42578 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
42579 fd_offset + ex.a_text);
42580 up_write(&current->mm->mmap_sem);
42581diff -urNp linux-2.6.32.48/fs/binfmt_elf.c linux-2.6.32.48/fs/binfmt_elf.c
42582--- linux-2.6.32.48/fs/binfmt_elf.c 2011-11-08 19:02:43.000000000 -0500
42583+++ linux-2.6.32.48/fs/binfmt_elf.c 2011-11-15 19:59:43.000000000 -0500
42584@@ -50,6 +50,10 @@ static int elf_core_dump(long signr, str
42585 #define elf_core_dump NULL
42586 #endif
42587
42588+#ifdef CONFIG_PAX_MPROTECT
42589+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
42590+#endif
42591+
42592 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
42593 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
42594 #else
42595@@ -69,6 +73,11 @@ static struct linux_binfmt elf_format =
42596 .load_binary = load_elf_binary,
42597 .load_shlib = load_elf_library,
42598 .core_dump = elf_core_dump,
42599+
42600+#ifdef CONFIG_PAX_MPROTECT
42601+ .handle_mprotect= elf_handle_mprotect,
42602+#endif
42603+
42604 .min_coredump = ELF_EXEC_PAGESIZE,
42605 .hasvdso = 1
42606 };
42607@@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
42608
42609 static int set_brk(unsigned long start, unsigned long end)
42610 {
42611+ unsigned long e = end;
42612+
42613 start = ELF_PAGEALIGN(start);
42614 end = ELF_PAGEALIGN(end);
42615 if (end > start) {
42616@@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
42617 if (BAD_ADDR(addr))
42618 return addr;
42619 }
42620- current->mm->start_brk = current->mm->brk = end;
42621+ current->mm->start_brk = current->mm->brk = e;
42622 return 0;
42623 }
42624
42625@@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
42626 elf_addr_t __user *u_rand_bytes;
42627 const char *k_platform = ELF_PLATFORM;
42628 const char *k_base_platform = ELF_BASE_PLATFORM;
42629- unsigned char k_rand_bytes[16];
42630+ u32 k_rand_bytes[4];
42631 int items;
42632 elf_addr_t *elf_info;
42633 int ei_index = 0;
42634 const struct cred *cred = current_cred();
42635 struct vm_area_struct *vma;
42636+ unsigned long saved_auxv[AT_VECTOR_SIZE];
42637+
42638+ pax_track_stack();
42639
42640 /*
42641 * In some cases (e.g. Hyper-Threading), we want to avoid L1
42642@@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
42643 * Generate 16 random bytes for userspace PRNG seeding.
42644 */
42645 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
42646- u_rand_bytes = (elf_addr_t __user *)
42647- STACK_ALLOC(p, sizeof(k_rand_bytes));
42648+ srandom32(k_rand_bytes[0] ^ random32());
42649+ srandom32(k_rand_bytes[1] ^ random32());
42650+ srandom32(k_rand_bytes[2] ^ random32());
42651+ srandom32(k_rand_bytes[3] ^ random32());
42652+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
42653+ u_rand_bytes = (elf_addr_t __user *) p;
42654 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
42655 return -EFAULT;
42656
42657@@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
42658 return -EFAULT;
42659 current->mm->env_end = p;
42660
42661+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
42662+
42663 /* Put the elf_info on the stack in the right place. */
42664 sp = (elf_addr_t __user *)envp + 1;
42665- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
42666+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
42667 return -EFAULT;
42668 return 0;
42669 }
42670@@ -385,10 +405,10 @@ static unsigned long load_elf_interp(str
42671 {
42672 struct elf_phdr *elf_phdata;
42673 struct elf_phdr *eppnt;
42674- unsigned long load_addr = 0;
42675+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
42676 int load_addr_set = 0;
42677 unsigned long last_bss = 0, elf_bss = 0;
42678- unsigned long error = ~0UL;
42679+ unsigned long error = -EINVAL;
42680 unsigned long total_size;
42681 int retval, i, size;
42682
42683@@ -434,6 +454,11 @@ static unsigned long load_elf_interp(str
42684 goto out_close;
42685 }
42686
42687+#ifdef CONFIG_PAX_SEGMEXEC
42688+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
42689+ pax_task_size = SEGMEXEC_TASK_SIZE;
42690+#endif
42691+
42692 eppnt = elf_phdata;
42693 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
42694 if (eppnt->p_type == PT_LOAD) {
42695@@ -477,8 +502,8 @@ static unsigned long load_elf_interp(str
42696 k = load_addr + eppnt->p_vaddr;
42697 if (BAD_ADDR(k) ||
42698 eppnt->p_filesz > eppnt->p_memsz ||
42699- eppnt->p_memsz > TASK_SIZE ||
42700- TASK_SIZE - eppnt->p_memsz < k) {
42701+ eppnt->p_memsz > pax_task_size ||
42702+ pax_task_size - eppnt->p_memsz < k) {
42703 error = -ENOMEM;
42704 goto out_close;
42705 }
42706@@ -532,6 +557,194 @@ out:
42707 return error;
42708 }
42709
42710+#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
42711+static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
42712+{
42713+ unsigned long pax_flags = 0UL;
42714+
42715+#ifdef CONFIG_PAX_PAGEEXEC
42716+ if (elf_phdata->p_flags & PF_PAGEEXEC)
42717+ pax_flags |= MF_PAX_PAGEEXEC;
42718+#endif
42719+
42720+#ifdef CONFIG_PAX_SEGMEXEC
42721+ if (elf_phdata->p_flags & PF_SEGMEXEC)
42722+ pax_flags |= MF_PAX_SEGMEXEC;
42723+#endif
42724+
42725+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
42726+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42727+ if (nx_enabled)
42728+ pax_flags &= ~MF_PAX_SEGMEXEC;
42729+ else
42730+ pax_flags &= ~MF_PAX_PAGEEXEC;
42731+ }
42732+#endif
42733+
42734+#ifdef CONFIG_PAX_EMUTRAMP
42735+ if (elf_phdata->p_flags & PF_EMUTRAMP)
42736+ pax_flags |= MF_PAX_EMUTRAMP;
42737+#endif
42738+
42739+#ifdef CONFIG_PAX_MPROTECT
42740+ if (elf_phdata->p_flags & PF_MPROTECT)
42741+ pax_flags |= MF_PAX_MPROTECT;
42742+#endif
42743+
42744+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
42745+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
42746+ pax_flags |= MF_PAX_RANDMMAP;
42747+#endif
42748+
42749+ return pax_flags;
42750+}
42751+#endif
42752+
42753+#ifdef CONFIG_PAX_PT_PAX_FLAGS
42754+static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
42755+{
42756+ unsigned long pax_flags = 0UL;
42757+
42758+#ifdef CONFIG_PAX_PAGEEXEC
42759+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
42760+ pax_flags |= MF_PAX_PAGEEXEC;
42761+#endif
42762+
42763+#ifdef CONFIG_PAX_SEGMEXEC
42764+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
42765+ pax_flags |= MF_PAX_SEGMEXEC;
42766+#endif
42767+
42768+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
42769+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42770+ if (nx_enabled)
42771+ pax_flags &= ~MF_PAX_SEGMEXEC;
42772+ else
42773+ pax_flags &= ~MF_PAX_PAGEEXEC;
42774+ }
42775+#endif
42776+
42777+#ifdef CONFIG_PAX_EMUTRAMP
42778+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
42779+ pax_flags |= MF_PAX_EMUTRAMP;
42780+#endif
42781+
42782+#ifdef CONFIG_PAX_MPROTECT
42783+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
42784+ pax_flags |= MF_PAX_MPROTECT;
42785+#endif
42786+
42787+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
42788+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
42789+ pax_flags |= MF_PAX_RANDMMAP;
42790+#endif
42791+
42792+ return pax_flags;
42793+}
42794+#endif
42795+
42796+#ifdef CONFIG_PAX_EI_PAX
42797+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
42798+{
42799+ unsigned long pax_flags = 0UL;
42800+
42801+#ifdef CONFIG_PAX_PAGEEXEC
42802+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
42803+ pax_flags |= MF_PAX_PAGEEXEC;
42804+#endif
42805+
42806+#ifdef CONFIG_PAX_SEGMEXEC
42807+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
42808+ pax_flags |= MF_PAX_SEGMEXEC;
42809+#endif
42810+
42811+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
42812+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42813+ if (nx_enabled)
42814+ pax_flags &= ~MF_PAX_SEGMEXEC;
42815+ else
42816+ pax_flags &= ~MF_PAX_PAGEEXEC;
42817+ }
42818+#endif
42819+
42820+#ifdef CONFIG_PAX_EMUTRAMP
42821+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
42822+ pax_flags |= MF_PAX_EMUTRAMP;
42823+#endif
42824+
42825+#ifdef CONFIG_PAX_MPROTECT
42826+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
42827+ pax_flags |= MF_PAX_MPROTECT;
42828+#endif
42829+
42830+#ifdef CONFIG_PAX_ASLR
42831+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
42832+ pax_flags |= MF_PAX_RANDMMAP;
42833+#endif
42834+
42835+ return pax_flags;
42836+}
42837+#endif
42838+
42839+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
42840+static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
42841+{
42842+ unsigned long pax_flags = 0UL;
42843+
42844+#ifdef CONFIG_PAX_PT_PAX_FLAGS
42845+ unsigned long i;
42846+ int found_flags = 0;
42847+#endif
42848+
42849+#ifdef CONFIG_PAX_EI_PAX
42850+ pax_flags = pax_parse_ei_pax(elf_ex);
42851+#endif
42852+
42853+#ifdef CONFIG_PAX_PT_PAX_FLAGS
42854+ for (i = 0UL; i < elf_ex->e_phnum; i++)
42855+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
42856+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
42857+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
42858+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
42859+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
42860+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
42861+ return -EINVAL;
42862+
42863+#ifdef CONFIG_PAX_SOFTMODE
42864+ if (pax_softmode)
42865+ pax_flags = pax_parse_softmode(&elf_phdata[i]);
42866+ else
42867+#endif
42868+
42869+ pax_flags = pax_parse_hardmode(&elf_phdata[i]);
42870+ found_flags = 1;
42871+ break;
42872+ }
42873+#endif
42874+
42875+#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
42876+ if (found_flags == 0) {
42877+ struct elf_phdr phdr;
42878+ memset(&phdr, 0, sizeof(phdr));
42879+ phdr.p_flags = PF_NOEMUTRAMP;
42880+#ifdef CONFIG_PAX_SOFTMODE
42881+ if (pax_softmode)
42882+ pax_flags = pax_parse_softmode(&phdr);
42883+ else
42884+#endif
42885+ pax_flags = pax_parse_hardmode(&phdr);
42886+ }
42887+#endif
42888+
42889+
42890+ if (0 > pax_check_flags(&pax_flags))
42891+ return -EINVAL;
42892+
42893+ current->mm->pax_flags = pax_flags;
42894+ return 0;
42895+}
42896+#endif
42897+
42898 /*
42899 * These are the functions used to load ELF style executables and shared
42900 * libraries. There is no binary dependent code anywhere else.
42901@@ -548,6 +761,11 @@ static unsigned long randomize_stack_top
42902 {
42903 unsigned int random_variable = 0;
42904
42905+#ifdef CONFIG_PAX_RANDUSTACK
42906+ if (randomize_va_space)
42907+ return stack_top - current->mm->delta_stack;
42908+#endif
42909+
42910 if ((current->flags & PF_RANDOMIZE) &&
42911 !(current->personality & ADDR_NO_RANDOMIZE)) {
42912 random_variable = get_random_int() & STACK_RND_MASK;
42913@@ -566,7 +784,7 @@ static int load_elf_binary(struct linux_
42914 unsigned long load_addr = 0, load_bias = 0;
42915 int load_addr_set = 0;
42916 char * elf_interpreter = NULL;
42917- unsigned long error;
42918+ unsigned long error = 0;
42919 struct elf_phdr *elf_ppnt, *elf_phdata;
42920 unsigned long elf_bss, elf_brk;
42921 int retval, i;
42922@@ -576,11 +794,11 @@ static int load_elf_binary(struct linux_
42923 unsigned long start_code, end_code, start_data, end_data;
42924 unsigned long reloc_func_desc = 0;
42925 int executable_stack = EXSTACK_DEFAULT;
42926- unsigned long def_flags = 0;
42927 struct {
42928 struct elfhdr elf_ex;
42929 struct elfhdr interp_elf_ex;
42930 } *loc;
42931+ unsigned long pax_task_size = TASK_SIZE;
42932
42933 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
42934 if (!loc) {
42935@@ -718,11 +936,80 @@ static int load_elf_binary(struct linux_
42936
42937 /* OK, This is the point of no return */
42938 current->flags &= ~PF_FORKNOEXEC;
42939- current->mm->def_flags = def_flags;
42940+
42941+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
42942+ current->mm->pax_flags = 0UL;
42943+#endif
42944+
42945+#ifdef CONFIG_PAX_DLRESOLVE
42946+ current->mm->call_dl_resolve = 0UL;
42947+#endif
42948+
42949+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
42950+ current->mm->call_syscall = 0UL;
42951+#endif
42952+
42953+#ifdef CONFIG_PAX_ASLR
42954+ current->mm->delta_mmap = 0UL;
42955+ current->mm->delta_stack = 0UL;
42956+#endif
42957+
42958+ current->mm->def_flags = 0;
42959+
42960+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
42961+ if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
42962+ send_sig(SIGKILL, current, 0);
42963+ goto out_free_dentry;
42964+ }
42965+#endif
42966+
42967+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
42968+ pax_set_initial_flags(bprm);
42969+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
42970+ if (pax_set_initial_flags_func)
42971+ (pax_set_initial_flags_func)(bprm);
42972+#endif
42973+
42974+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
42975+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !nx_enabled) {
42976+ current->mm->context.user_cs_limit = PAGE_SIZE;
42977+ current->mm->def_flags |= VM_PAGEEXEC;
42978+ }
42979+#endif
42980+
42981+#ifdef CONFIG_PAX_SEGMEXEC
42982+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
42983+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
42984+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
42985+ pax_task_size = SEGMEXEC_TASK_SIZE;
42986+ }
42987+#endif
42988+
42989+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
42990+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42991+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
42992+ put_cpu();
42993+ }
42994+#endif
42995
42996 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
42997 may depend on the personality. */
42998 SET_PERSONALITY(loc->elf_ex);
42999+
43000+#ifdef CONFIG_PAX_ASLR
43001+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
43002+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
43003+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
43004+ }
43005+#endif
43006+
43007+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
43008+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
43009+ executable_stack = EXSTACK_DISABLE_X;
43010+ current->personality &= ~READ_IMPLIES_EXEC;
43011+ } else
43012+#endif
43013+
43014 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
43015 current->personality |= READ_IMPLIES_EXEC;
43016
43017@@ -804,6 +1091,20 @@ static int load_elf_binary(struct linux_
43018 #else
43019 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
43020 #endif
43021+
43022+#ifdef CONFIG_PAX_RANDMMAP
43023+ /* PaX: randomize base address at the default exe base if requested */
43024+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
43025+#ifdef CONFIG_SPARC64
43026+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
43027+#else
43028+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
43029+#endif
43030+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
43031+ elf_flags |= MAP_FIXED;
43032+ }
43033+#endif
43034+
43035 }
43036
43037 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
43038@@ -836,9 +1137,9 @@ static int load_elf_binary(struct linux_
43039 * allowed task size. Note that p_filesz must always be
43040 * <= p_memsz so it is only necessary to check p_memsz.
43041 */
43042- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
43043- elf_ppnt->p_memsz > TASK_SIZE ||
43044- TASK_SIZE - elf_ppnt->p_memsz < k) {
43045+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
43046+ elf_ppnt->p_memsz > pax_task_size ||
43047+ pax_task_size - elf_ppnt->p_memsz < k) {
43048 /* set_brk can never work. Avoid overflows. */
43049 send_sig(SIGKILL, current, 0);
43050 retval = -EINVAL;
43051@@ -866,6 +1167,11 @@ static int load_elf_binary(struct linux_
43052 start_data += load_bias;
43053 end_data += load_bias;
43054
43055+#ifdef CONFIG_PAX_RANDMMAP
43056+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
43057+ elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
43058+#endif
43059+
43060 /* Calling set_brk effectively mmaps the pages that we need
43061 * for the bss and break sections. We must do this before
43062 * mapping in the interpreter, to make sure it doesn't wind
43063@@ -877,9 +1183,11 @@ static int load_elf_binary(struct linux_
43064 goto out_free_dentry;
43065 }
43066 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
43067- send_sig(SIGSEGV, current, 0);
43068- retval = -EFAULT; /* Nobody gets to see this, but.. */
43069- goto out_free_dentry;
43070+ /*
43071+ * This bss-zeroing can fail if the ELF
43072+ * file specifies odd protections. So
43073+ * we don't check the return value
43074+ */
43075 }
43076
43077 if (elf_interpreter) {
43078@@ -1112,8 +1420,10 @@ static int dump_seek(struct file *file,
43079 unsigned long n = off;
43080 if (n > PAGE_SIZE)
43081 n = PAGE_SIZE;
43082- if (!dump_write(file, buf, n))
43083+ if (!dump_write(file, buf, n)) {
43084+ free_page((unsigned long)buf);
43085 return 0;
43086+ }
43087 off -= n;
43088 }
43089 free_page((unsigned long)buf);
43090@@ -1125,7 +1435,7 @@ static int dump_seek(struct file *file,
43091 * Decide what to dump of a segment, part, all or none.
43092 */
43093 static unsigned long vma_dump_size(struct vm_area_struct *vma,
43094- unsigned long mm_flags)
43095+ unsigned long mm_flags, long signr)
43096 {
43097 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
43098
43099@@ -1159,7 +1469,7 @@ static unsigned long vma_dump_size(struc
43100 if (vma->vm_file == NULL)
43101 return 0;
43102
43103- if (FILTER(MAPPED_PRIVATE))
43104+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
43105 goto whole;
43106
43107 /*
43108@@ -1255,8 +1565,11 @@ static int writenote(struct memelfnote *
43109 #undef DUMP_WRITE
43110
43111 #define DUMP_WRITE(addr, nr) \
43112+ do { \
43113+ gr_learn_resource(current, RLIMIT_CORE, size + (nr), 1); \
43114 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
43115- goto end_coredump;
43116+ goto end_coredump; \
43117+ } while (0);
43118
43119 static void fill_elf_header(struct elfhdr *elf, int segs,
43120 u16 machine, u32 flags, u8 osabi)
43121@@ -1385,9 +1698,9 @@ static void fill_auxv_note(struct memelf
43122 {
43123 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
43124 int i = 0;
43125- do
43126+ do {
43127 i += 2;
43128- while (auxv[i - 2] != AT_NULL);
43129+ } while (auxv[i - 2] != AT_NULL);
43130 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
43131 }
43132
43133@@ -1973,7 +2286,7 @@ static int elf_core_dump(long signr, str
43134 phdr.p_offset = offset;
43135 phdr.p_vaddr = vma->vm_start;
43136 phdr.p_paddr = 0;
43137- phdr.p_filesz = vma_dump_size(vma, mm_flags);
43138+ phdr.p_filesz = vma_dump_size(vma, mm_flags, signr);
43139 phdr.p_memsz = vma->vm_end - vma->vm_start;
43140 offset += phdr.p_filesz;
43141 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
43142@@ -2006,7 +2319,7 @@ static int elf_core_dump(long signr, str
43143 unsigned long addr;
43144 unsigned long end;
43145
43146- end = vma->vm_start + vma_dump_size(vma, mm_flags);
43147+ end = vma->vm_start + vma_dump_size(vma, mm_flags, signr);
43148
43149 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
43150 struct page *page;
43151@@ -2015,6 +2328,7 @@ static int elf_core_dump(long signr, str
43152 page = get_dump_page(addr);
43153 if (page) {
43154 void *kaddr = kmap(page);
43155+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
43156 stop = ((size += PAGE_SIZE) > limit) ||
43157 !dump_write(file, kaddr, PAGE_SIZE);
43158 kunmap(page);
43159@@ -2042,6 +2356,97 @@ out:
43160
43161 #endif /* USE_ELF_CORE_DUMP */
43162
43163+#ifdef CONFIG_PAX_MPROTECT
43164+/* PaX: non-PIC ELF libraries need relocations on their executable segments
43165+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
43166+ * we'll remove VM_MAYWRITE for good on RELRO segments.
43167+ *
43168+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
43169+ * basis because we want to allow the common case and not the special ones.
43170+ */
43171+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
43172+{
43173+ struct elfhdr elf_h;
43174+ struct elf_phdr elf_p;
43175+ unsigned long i;
43176+ unsigned long oldflags;
43177+ bool is_textrel_rw, is_textrel_rx, is_relro;
43178+
43179+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
43180+ return;
43181+
43182+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
43183+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
43184+
43185+#ifdef CONFIG_PAX_ELFRELOCS
43186+ /* possible TEXTREL */
43187+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
43188+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
43189+#else
43190+ is_textrel_rw = false;
43191+ is_textrel_rx = false;
43192+#endif
43193+
43194+ /* possible RELRO */
43195+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
43196+
43197+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
43198+ return;
43199+
43200+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
43201+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
43202+
43203+#ifdef CONFIG_PAX_ETEXECRELOCS
43204+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
43205+#else
43206+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
43207+#endif
43208+
43209+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
43210+ !elf_check_arch(&elf_h) ||
43211+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
43212+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
43213+ return;
43214+
43215+ for (i = 0UL; i < elf_h.e_phnum; i++) {
43216+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
43217+ return;
43218+ switch (elf_p.p_type) {
43219+ case PT_DYNAMIC:
43220+ if (!is_textrel_rw && !is_textrel_rx)
43221+ continue;
43222+ i = 0UL;
43223+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
43224+ elf_dyn dyn;
43225+
43226+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
43227+ return;
43228+ if (dyn.d_tag == DT_NULL)
43229+ return;
43230+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
43231+ gr_log_textrel(vma);
43232+ if (is_textrel_rw)
43233+ vma->vm_flags |= VM_MAYWRITE;
43234+ else
43235+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
43236+ vma->vm_flags &= ~VM_MAYWRITE;
43237+ return;
43238+ }
43239+ i++;
43240+ }
43241+ return;
43242+
43243+ case PT_GNU_RELRO:
43244+ if (!is_relro)
43245+ continue;
43246+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
43247+ vma->vm_flags &= ~VM_MAYWRITE;
43248+ return;
43249+ }
43250+ }
43251+}
43252+#endif
43253+
43254 static int __init init_elf_binfmt(void)
43255 {
43256 return register_binfmt(&elf_format);
43257diff -urNp linux-2.6.32.48/fs/binfmt_flat.c linux-2.6.32.48/fs/binfmt_flat.c
43258--- linux-2.6.32.48/fs/binfmt_flat.c 2011-11-08 19:02:43.000000000 -0500
43259+++ linux-2.6.32.48/fs/binfmt_flat.c 2011-11-15 19:59:43.000000000 -0500
43260@@ -564,7 +564,9 @@ static int load_flat_file(struct linux_b
43261 realdatastart = (unsigned long) -ENOMEM;
43262 printk("Unable to allocate RAM for process data, errno %d\n",
43263 (int)-realdatastart);
43264+ down_write(&current->mm->mmap_sem);
43265 do_munmap(current->mm, textpos, text_len);
43266+ up_write(&current->mm->mmap_sem);
43267 ret = realdatastart;
43268 goto err;
43269 }
43270@@ -588,8 +590,10 @@ static int load_flat_file(struct linux_b
43271 }
43272 if (IS_ERR_VALUE(result)) {
43273 printk("Unable to read data+bss, errno %d\n", (int)-result);
43274+ down_write(&current->mm->mmap_sem);
43275 do_munmap(current->mm, textpos, text_len);
43276 do_munmap(current->mm, realdatastart, data_len + extra);
43277+ up_write(&current->mm->mmap_sem);
43278 ret = result;
43279 goto err;
43280 }
43281@@ -658,8 +662,10 @@ static int load_flat_file(struct linux_b
43282 }
43283 if (IS_ERR_VALUE(result)) {
43284 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
43285+ down_write(&current->mm->mmap_sem);
43286 do_munmap(current->mm, textpos, text_len + data_len + extra +
43287 MAX_SHARED_LIBS * sizeof(unsigned long));
43288+ up_write(&current->mm->mmap_sem);
43289 ret = result;
43290 goto err;
43291 }
43292diff -urNp linux-2.6.32.48/fs/bio.c linux-2.6.32.48/fs/bio.c
43293--- linux-2.6.32.48/fs/bio.c 2011-11-08 19:02:43.000000000 -0500
43294+++ linux-2.6.32.48/fs/bio.c 2011-11-15 19:59:43.000000000 -0500
43295@@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_cr
43296
43297 i = 0;
43298 while (i < bio_slab_nr) {
43299- struct bio_slab *bslab = &bio_slabs[i];
43300+ bslab = &bio_slabs[i];
43301
43302 if (!bslab->slab && entry == -1)
43303 entry = i;
43304@@ -1236,7 +1236,7 @@ static void bio_copy_kern_endio(struct b
43305 const int read = bio_data_dir(bio) == READ;
43306 struct bio_map_data *bmd = bio->bi_private;
43307 int i;
43308- char *p = bmd->sgvecs[0].iov_base;
43309+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
43310
43311 __bio_for_each_segment(bvec, bio, i, 0) {
43312 char *addr = page_address(bvec->bv_page);
43313diff -urNp linux-2.6.32.48/fs/block_dev.c linux-2.6.32.48/fs/block_dev.c
43314--- linux-2.6.32.48/fs/block_dev.c 2011-11-08 19:02:43.000000000 -0500
43315+++ linux-2.6.32.48/fs/block_dev.c 2011-11-15 19:59:43.000000000 -0500
43316@@ -664,7 +664,7 @@ int bd_claim(struct block_device *bdev,
43317 else if (bdev->bd_contains == bdev)
43318 res = 0; /* is a whole device which isn't held */
43319
43320- else if (bdev->bd_contains->bd_holder == bd_claim)
43321+ else if (bdev->bd_contains->bd_holder == (void *)bd_claim)
43322 res = 0; /* is a partition of a device that is being partitioned */
43323 else if (bdev->bd_contains->bd_holder != NULL)
43324 res = -EBUSY; /* is a partition of a held device */
43325diff -urNp linux-2.6.32.48/fs/btrfs/ctree.c linux-2.6.32.48/fs/btrfs/ctree.c
43326--- linux-2.6.32.48/fs/btrfs/ctree.c 2011-11-08 19:02:43.000000000 -0500
43327+++ linux-2.6.32.48/fs/btrfs/ctree.c 2011-11-15 19:59:43.000000000 -0500
43328@@ -461,9 +461,12 @@ static noinline int __btrfs_cow_block(st
43329 free_extent_buffer(buf);
43330 add_root_to_dirty_list(root);
43331 } else {
43332- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
43333- parent_start = parent->start;
43334- else
43335+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
43336+ if (parent)
43337+ parent_start = parent->start;
43338+ else
43339+ parent_start = 0;
43340+ } else
43341 parent_start = 0;
43342
43343 WARN_ON(trans->transid != btrfs_header_generation(parent));
43344@@ -3645,7 +3648,6 @@ setup_items_for_insert(struct btrfs_tran
43345
43346 ret = 0;
43347 if (slot == 0) {
43348- struct btrfs_disk_key disk_key;
43349 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
43350 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
43351 }
43352diff -urNp linux-2.6.32.48/fs/btrfs/disk-io.c linux-2.6.32.48/fs/btrfs/disk-io.c
43353--- linux-2.6.32.48/fs/btrfs/disk-io.c 2011-11-08 19:02:43.000000000 -0500
43354+++ linux-2.6.32.48/fs/btrfs/disk-io.c 2011-11-15 19:59:43.000000000 -0500
43355@@ -39,7 +39,7 @@
43356 #include "tree-log.h"
43357 #include "free-space-cache.h"
43358
43359-static struct extent_io_ops btree_extent_io_ops;
43360+static const struct extent_io_ops btree_extent_io_ops;
43361 static void end_workqueue_fn(struct btrfs_work *work);
43362 static void free_fs_root(struct btrfs_root *root);
43363
43364@@ -2607,7 +2607,7 @@ out:
43365 return 0;
43366 }
43367
43368-static struct extent_io_ops btree_extent_io_ops = {
43369+static const struct extent_io_ops btree_extent_io_ops = {
43370 .write_cache_pages_lock_hook = btree_lock_page_hook,
43371 .readpage_end_io_hook = btree_readpage_end_io_hook,
43372 .submit_bio_hook = btree_submit_bio_hook,
43373diff -urNp linux-2.6.32.48/fs/btrfs/extent_io.h linux-2.6.32.48/fs/btrfs/extent_io.h
43374--- linux-2.6.32.48/fs/btrfs/extent_io.h 2011-11-08 19:02:43.000000000 -0500
43375+++ linux-2.6.32.48/fs/btrfs/extent_io.h 2011-11-15 19:59:43.000000000 -0500
43376@@ -49,36 +49,36 @@ typedef int (extent_submit_bio_hook_t)(s
43377 struct bio *bio, int mirror_num,
43378 unsigned long bio_flags);
43379 struct extent_io_ops {
43380- int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
43381+ int (* const fill_delalloc)(struct inode *inode, struct page *locked_page,
43382 u64 start, u64 end, int *page_started,
43383 unsigned long *nr_written);
43384- int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
43385- int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
43386+ int (* const writepage_start_hook)(struct page *page, u64 start, u64 end);
43387+ int (* const writepage_io_hook)(struct page *page, u64 start, u64 end);
43388 extent_submit_bio_hook_t *submit_bio_hook;
43389- int (*merge_bio_hook)(struct page *page, unsigned long offset,
43390+ int (* const merge_bio_hook)(struct page *page, unsigned long offset,
43391 size_t size, struct bio *bio,
43392 unsigned long bio_flags);
43393- int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
43394- int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
43395+ int (* const readpage_io_hook)(struct page *page, u64 start, u64 end);
43396+ int (* const readpage_io_failed_hook)(struct bio *bio, struct page *page,
43397 u64 start, u64 end,
43398 struct extent_state *state);
43399- int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
43400+ int (* const writepage_io_failed_hook)(struct bio *bio, struct page *page,
43401 u64 start, u64 end,
43402 struct extent_state *state);
43403- int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
43404+ int (* const readpage_end_io_hook)(struct page *page, u64 start, u64 end,
43405 struct extent_state *state);
43406- int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
43407+ int (* const writepage_end_io_hook)(struct page *page, u64 start, u64 end,
43408 struct extent_state *state, int uptodate);
43409- int (*set_bit_hook)(struct inode *inode, u64 start, u64 end,
43410+ int (* const set_bit_hook)(struct inode *inode, u64 start, u64 end,
43411 unsigned long old, unsigned long bits);
43412- int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
43413+ int (* const clear_bit_hook)(struct inode *inode, struct extent_state *state,
43414 unsigned long bits);
43415- int (*merge_extent_hook)(struct inode *inode,
43416+ int (* const merge_extent_hook)(struct inode *inode,
43417 struct extent_state *new,
43418 struct extent_state *other);
43419- int (*split_extent_hook)(struct inode *inode,
43420+ int (* const split_extent_hook)(struct inode *inode,
43421 struct extent_state *orig, u64 split);
43422- int (*write_cache_pages_lock_hook)(struct page *page);
43423+ int (* const write_cache_pages_lock_hook)(struct page *page);
43424 };
43425
43426 struct extent_io_tree {
43427@@ -88,7 +88,7 @@ struct extent_io_tree {
43428 u64 dirty_bytes;
43429 spinlock_t lock;
43430 spinlock_t buffer_lock;
43431- struct extent_io_ops *ops;
43432+ const struct extent_io_ops *ops;
43433 };
43434
43435 struct extent_state {
43436diff -urNp linux-2.6.32.48/fs/btrfs/extent-tree.c linux-2.6.32.48/fs/btrfs/extent-tree.c
43437--- linux-2.6.32.48/fs/btrfs/extent-tree.c 2011-11-08 19:02:43.000000000 -0500
43438+++ linux-2.6.32.48/fs/btrfs/extent-tree.c 2011-11-15 19:59:43.000000000 -0500
43439@@ -7141,6 +7141,10 @@ static noinline int relocate_one_extent(
43440 u64 group_start = group->key.objectid;
43441 new_extents = kmalloc(sizeof(*new_extents),
43442 GFP_NOFS);
43443+ if (!new_extents) {
43444+ ret = -ENOMEM;
43445+ goto out;
43446+ }
43447 nr_extents = 1;
43448 ret = get_new_locations(reloc_inode,
43449 extent_key,
43450diff -urNp linux-2.6.32.48/fs/btrfs/free-space-cache.c linux-2.6.32.48/fs/btrfs/free-space-cache.c
43451--- linux-2.6.32.48/fs/btrfs/free-space-cache.c 2011-11-08 19:02:43.000000000 -0500
43452+++ linux-2.6.32.48/fs/btrfs/free-space-cache.c 2011-11-15 19:59:43.000000000 -0500
43453@@ -1074,8 +1074,6 @@ u64 btrfs_alloc_from_cluster(struct btrf
43454
43455 while(1) {
43456 if (entry->bytes < bytes || entry->offset < min_start) {
43457- struct rb_node *node;
43458-
43459 node = rb_next(&entry->offset_index);
43460 if (!node)
43461 break;
43462@@ -1226,7 +1224,7 @@ again:
43463 */
43464 while (entry->bitmap || found_bitmap ||
43465 (!entry->bitmap && entry->bytes < min_bytes)) {
43466- struct rb_node *node = rb_next(&entry->offset_index);
43467+ node = rb_next(&entry->offset_index);
43468
43469 if (entry->bitmap && entry->bytes > bytes + empty_size) {
43470 ret = btrfs_bitmap_cluster(block_group, entry, cluster,
43471diff -urNp linux-2.6.32.48/fs/btrfs/inode.c linux-2.6.32.48/fs/btrfs/inode.c
43472--- linux-2.6.32.48/fs/btrfs/inode.c 2011-11-08 19:02:43.000000000 -0500
43473+++ linux-2.6.32.48/fs/btrfs/inode.c 2011-11-15 19:59:43.000000000 -0500
43474@@ -63,7 +63,7 @@ static const struct inode_operations btr
43475 static const struct address_space_operations btrfs_aops;
43476 static const struct address_space_operations btrfs_symlink_aops;
43477 static const struct file_operations btrfs_dir_file_operations;
43478-static struct extent_io_ops btrfs_extent_io_ops;
43479+static const struct extent_io_ops btrfs_extent_io_ops;
43480
43481 static struct kmem_cache *btrfs_inode_cachep;
43482 struct kmem_cache *btrfs_trans_handle_cachep;
43483@@ -925,6 +925,7 @@ static int cow_file_range_async(struct i
43484 1, 0, NULL, GFP_NOFS);
43485 while (start < end) {
43486 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
43487+ BUG_ON(!async_cow);
43488 async_cow->inode = inode;
43489 async_cow->root = root;
43490 async_cow->locked_page = locked_page;
43491@@ -4591,6 +4592,8 @@ static noinline int uncompress_inline(st
43492 inline_size = btrfs_file_extent_inline_item_len(leaf,
43493 btrfs_item_nr(leaf, path->slots[0]));
43494 tmp = kmalloc(inline_size, GFP_NOFS);
43495+ if (!tmp)
43496+ return -ENOMEM;
43497 ptr = btrfs_file_extent_inline_start(item);
43498
43499 read_extent_buffer(leaf, tmp, ptr, inline_size);
43500@@ -5410,7 +5413,7 @@ fail:
43501 return -ENOMEM;
43502 }
43503
43504-static int btrfs_getattr(struct vfsmount *mnt,
43505+int btrfs_getattr(struct vfsmount *mnt,
43506 struct dentry *dentry, struct kstat *stat)
43507 {
43508 struct inode *inode = dentry->d_inode;
43509@@ -5422,6 +5425,14 @@ static int btrfs_getattr(struct vfsmount
43510 return 0;
43511 }
43512
43513+EXPORT_SYMBOL(btrfs_getattr);
43514+
43515+dev_t get_btrfs_dev_from_inode(struct inode *inode)
43516+{
43517+ return BTRFS_I(inode)->root->anon_super.s_dev;
43518+}
43519+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
43520+
43521 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
43522 struct inode *new_dir, struct dentry *new_dentry)
43523 {
43524@@ -5972,7 +5983,7 @@ static const struct file_operations btrf
43525 .fsync = btrfs_sync_file,
43526 };
43527
43528-static struct extent_io_ops btrfs_extent_io_ops = {
43529+static const struct extent_io_ops btrfs_extent_io_ops = {
43530 .fill_delalloc = run_delalloc_range,
43531 .submit_bio_hook = btrfs_submit_bio_hook,
43532 .merge_bio_hook = btrfs_merge_bio_hook,
43533diff -urNp linux-2.6.32.48/fs/btrfs/relocation.c linux-2.6.32.48/fs/btrfs/relocation.c
43534--- linux-2.6.32.48/fs/btrfs/relocation.c 2011-11-08 19:02:43.000000000 -0500
43535+++ linux-2.6.32.48/fs/btrfs/relocation.c 2011-11-15 19:59:43.000000000 -0500
43536@@ -884,7 +884,7 @@ static int __update_reloc_root(struct bt
43537 }
43538 spin_unlock(&rc->reloc_root_tree.lock);
43539
43540- BUG_ON((struct btrfs_root *)node->data != root);
43541+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
43542
43543 if (!del) {
43544 spin_lock(&rc->reloc_root_tree.lock);
43545diff -urNp linux-2.6.32.48/fs/btrfs/sysfs.c linux-2.6.32.48/fs/btrfs/sysfs.c
43546--- linux-2.6.32.48/fs/btrfs/sysfs.c 2011-11-08 19:02:43.000000000 -0500
43547+++ linux-2.6.32.48/fs/btrfs/sysfs.c 2011-11-15 19:59:43.000000000 -0500
43548@@ -164,12 +164,12 @@ static void btrfs_root_release(struct ko
43549 complete(&root->kobj_unregister);
43550 }
43551
43552-static struct sysfs_ops btrfs_super_attr_ops = {
43553+static const struct sysfs_ops btrfs_super_attr_ops = {
43554 .show = btrfs_super_attr_show,
43555 .store = btrfs_super_attr_store,
43556 };
43557
43558-static struct sysfs_ops btrfs_root_attr_ops = {
43559+static const struct sysfs_ops btrfs_root_attr_ops = {
43560 .show = btrfs_root_attr_show,
43561 .store = btrfs_root_attr_store,
43562 };
43563diff -urNp linux-2.6.32.48/fs/buffer.c linux-2.6.32.48/fs/buffer.c
43564--- linux-2.6.32.48/fs/buffer.c 2011-11-08 19:02:43.000000000 -0500
43565+++ linux-2.6.32.48/fs/buffer.c 2011-11-15 19:59:43.000000000 -0500
43566@@ -25,6 +25,7 @@
43567 #include <linux/percpu.h>
43568 #include <linux/slab.h>
43569 #include <linux/capability.h>
43570+#include <linux/security.h>
43571 #include <linux/blkdev.h>
43572 #include <linux/file.h>
43573 #include <linux/quotaops.h>
43574diff -urNp linux-2.6.32.48/fs/cachefiles/bind.c linux-2.6.32.48/fs/cachefiles/bind.c
43575--- linux-2.6.32.48/fs/cachefiles/bind.c 2011-11-08 19:02:43.000000000 -0500
43576+++ linux-2.6.32.48/fs/cachefiles/bind.c 2011-11-15 19:59:43.000000000 -0500
43577@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
43578 args);
43579
43580 /* start by checking things over */
43581- ASSERT(cache->fstop_percent >= 0 &&
43582- cache->fstop_percent < cache->fcull_percent &&
43583+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
43584 cache->fcull_percent < cache->frun_percent &&
43585 cache->frun_percent < 100);
43586
43587- ASSERT(cache->bstop_percent >= 0 &&
43588- cache->bstop_percent < cache->bcull_percent &&
43589+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
43590 cache->bcull_percent < cache->brun_percent &&
43591 cache->brun_percent < 100);
43592
43593diff -urNp linux-2.6.32.48/fs/cachefiles/daemon.c linux-2.6.32.48/fs/cachefiles/daemon.c
43594--- linux-2.6.32.48/fs/cachefiles/daemon.c 2011-11-08 19:02:43.000000000 -0500
43595+++ linux-2.6.32.48/fs/cachefiles/daemon.c 2011-11-15 19:59:43.000000000 -0500
43596@@ -220,7 +220,7 @@ static ssize_t cachefiles_daemon_write(s
43597 if (test_bit(CACHEFILES_DEAD, &cache->flags))
43598 return -EIO;
43599
43600- if (datalen < 0 || datalen > PAGE_SIZE - 1)
43601+ if (datalen > PAGE_SIZE - 1)
43602 return -EOPNOTSUPP;
43603
43604 /* drag the command string into the kernel so we can parse it */
43605@@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struc
43606 if (args[0] != '%' || args[1] != '\0')
43607 return -EINVAL;
43608
43609- if (fstop < 0 || fstop >= cache->fcull_percent)
43610+ if (fstop >= cache->fcull_percent)
43611 return cachefiles_daemon_range_error(cache, args);
43612
43613 cache->fstop_percent = fstop;
43614@@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struc
43615 if (args[0] != '%' || args[1] != '\0')
43616 return -EINVAL;
43617
43618- if (bstop < 0 || bstop >= cache->bcull_percent)
43619+ if (bstop >= cache->bcull_percent)
43620 return cachefiles_daemon_range_error(cache, args);
43621
43622 cache->bstop_percent = bstop;
43623diff -urNp linux-2.6.32.48/fs/cachefiles/internal.h linux-2.6.32.48/fs/cachefiles/internal.h
43624--- linux-2.6.32.48/fs/cachefiles/internal.h 2011-11-08 19:02:43.000000000 -0500
43625+++ linux-2.6.32.48/fs/cachefiles/internal.h 2011-11-15 19:59:43.000000000 -0500
43626@@ -56,7 +56,7 @@ struct cachefiles_cache {
43627 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
43628 struct rb_root active_nodes; /* active nodes (can't be culled) */
43629 rwlock_t active_lock; /* lock for active_nodes */
43630- atomic_t gravecounter; /* graveyard uniquifier */
43631+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
43632 unsigned frun_percent; /* when to stop culling (% files) */
43633 unsigned fcull_percent; /* when to start culling (% files) */
43634 unsigned fstop_percent; /* when to stop allocating (% files) */
43635@@ -168,19 +168,19 @@ extern int cachefiles_check_in_use(struc
43636 * proc.c
43637 */
43638 #ifdef CONFIG_CACHEFILES_HISTOGRAM
43639-extern atomic_t cachefiles_lookup_histogram[HZ];
43640-extern atomic_t cachefiles_mkdir_histogram[HZ];
43641-extern atomic_t cachefiles_create_histogram[HZ];
43642+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
43643+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
43644+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
43645
43646 extern int __init cachefiles_proc_init(void);
43647 extern void cachefiles_proc_cleanup(void);
43648 static inline
43649-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
43650+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
43651 {
43652 unsigned long jif = jiffies - start_jif;
43653 if (jif >= HZ)
43654 jif = HZ - 1;
43655- atomic_inc(&histogram[jif]);
43656+ atomic_inc_unchecked(&histogram[jif]);
43657 }
43658
43659 #else
43660diff -urNp linux-2.6.32.48/fs/cachefiles/namei.c linux-2.6.32.48/fs/cachefiles/namei.c
43661--- linux-2.6.32.48/fs/cachefiles/namei.c 2011-11-08 19:02:43.000000000 -0500
43662+++ linux-2.6.32.48/fs/cachefiles/namei.c 2011-11-15 19:59:43.000000000 -0500
43663@@ -250,7 +250,7 @@ try_again:
43664 /* first step is to make up a grave dentry in the graveyard */
43665 sprintf(nbuffer, "%08x%08x",
43666 (uint32_t) get_seconds(),
43667- (uint32_t) atomic_inc_return(&cache->gravecounter));
43668+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
43669
43670 /* do the multiway lock magic */
43671 trap = lock_rename(cache->graveyard, dir);
43672diff -urNp linux-2.6.32.48/fs/cachefiles/proc.c linux-2.6.32.48/fs/cachefiles/proc.c
43673--- linux-2.6.32.48/fs/cachefiles/proc.c 2011-11-08 19:02:43.000000000 -0500
43674+++ linux-2.6.32.48/fs/cachefiles/proc.c 2011-11-15 19:59:43.000000000 -0500
43675@@ -14,9 +14,9 @@
43676 #include <linux/seq_file.h>
43677 #include "internal.h"
43678
43679-atomic_t cachefiles_lookup_histogram[HZ];
43680-atomic_t cachefiles_mkdir_histogram[HZ];
43681-atomic_t cachefiles_create_histogram[HZ];
43682+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
43683+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
43684+atomic_unchecked_t cachefiles_create_histogram[HZ];
43685
43686 /*
43687 * display the latency histogram
43688@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
43689 return 0;
43690 default:
43691 index = (unsigned long) v - 3;
43692- x = atomic_read(&cachefiles_lookup_histogram[index]);
43693- y = atomic_read(&cachefiles_mkdir_histogram[index]);
43694- z = atomic_read(&cachefiles_create_histogram[index]);
43695+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
43696+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
43697+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
43698 if (x == 0 && y == 0 && z == 0)
43699 return 0;
43700
43701diff -urNp linux-2.6.32.48/fs/cachefiles/rdwr.c linux-2.6.32.48/fs/cachefiles/rdwr.c
43702--- linux-2.6.32.48/fs/cachefiles/rdwr.c 2011-11-08 19:02:43.000000000 -0500
43703+++ linux-2.6.32.48/fs/cachefiles/rdwr.c 2011-11-15 19:59:43.000000000 -0500
43704@@ -946,7 +946,7 @@ int cachefiles_write_page(struct fscache
43705 old_fs = get_fs();
43706 set_fs(KERNEL_DS);
43707 ret = file->f_op->write(
43708- file, (const void __user *) data, len, &pos);
43709+ file, (const void __force_user *) data, len, &pos);
43710 set_fs(old_fs);
43711 kunmap(page);
43712 if (ret != len)
43713diff -urNp linux-2.6.32.48/fs/cifs/cifs_debug.c linux-2.6.32.48/fs/cifs/cifs_debug.c
43714--- linux-2.6.32.48/fs/cifs/cifs_debug.c 2011-11-08 19:02:43.000000000 -0500
43715+++ linux-2.6.32.48/fs/cifs/cifs_debug.c 2011-11-15 19:59:43.000000000 -0500
43716@@ -256,25 +256,25 @@ static ssize_t cifs_stats_proc_write(str
43717 tcon = list_entry(tmp3,
43718 struct cifsTconInfo,
43719 tcon_list);
43720- atomic_set(&tcon->num_smbs_sent, 0);
43721- atomic_set(&tcon->num_writes, 0);
43722- atomic_set(&tcon->num_reads, 0);
43723- atomic_set(&tcon->num_oplock_brks, 0);
43724- atomic_set(&tcon->num_opens, 0);
43725- atomic_set(&tcon->num_posixopens, 0);
43726- atomic_set(&tcon->num_posixmkdirs, 0);
43727- atomic_set(&tcon->num_closes, 0);
43728- atomic_set(&tcon->num_deletes, 0);
43729- atomic_set(&tcon->num_mkdirs, 0);
43730- atomic_set(&tcon->num_rmdirs, 0);
43731- atomic_set(&tcon->num_renames, 0);
43732- atomic_set(&tcon->num_t2renames, 0);
43733- atomic_set(&tcon->num_ffirst, 0);
43734- atomic_set(&tcon->num_fnext, 0);
43735- atomic_set(&tcon->num_fclose, 0);
43736- atomic_set(&tcon->num_hardlinks, 0);
43737- atomic_set(&tcon->num_symlinks, 0);
43738- atomic_set(&tcon->num_locks, 0);
43739+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
43740+ atomic_set_unchecked(&tcon->num_writes, 0);
43741+ atomic_set_unchecked(&tcon->num_reads, 0);
43742+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
43743+ atomic_set_unchecked(&tcon->num_opens, 0);
43744+ atomic_set_unchecked(&tcon->num_posixopens, 0);
43745+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
43746+ atomic_set_unchecked(&tcon->num_closes, 0);
43747+ atomic_set_unchecked(&tcon->num_deletes, 0);
43748+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
43749+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
43750+ atomic_set_unchecked(&tcon->num_renames, 0);
43751+ atomic_set_unchecked(&tcon->num_t2renames, 0);
43752+ atomic_set_unchecked(&tcon->num_ffirst, 0);
43753+ atomic_set_unchecked(&tcon->num_fnext, 0);
43754+ atomic_set_unchecked(&tcon->num_fclose, 0);
43755+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
43756+ atomic_set_unchecked(&tcon->num_symlinks, 0);
43757+ atomic_set_unchecked(&tcon->num_locks, 0);
43758 }
43759 }
43760 }
43761@@ -334,41 +334,41 @@ static int cifs_stats_proc_show(struct s
43762 if (tcon->need_reconnect)
43763 seq_puts(m, "\tDISCONNECTED ");
43764 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
43765- atomic_read(&tcon->num_smbs_sent),
43766- atomic_read(&tcon->num_oplock_brks));
43767+ atomic_read_unchecked(&tcon->num_smbs_sent),
43768+ atomic_read_unchecked(&tcon->num_oplock_brks));
43769 seq_printf(m, "\nReads: %d Bytes: %lld",
43770- atomic_read(&tcon->num_reads),
43771+ atomic_read_unchecked(&tcon->num_reads),
43772 (long long)(tcon->bytes_read));
43773 seq_printf(m, "\nWrites: %d Bytes: %lld",
43774- atomic_read(&tcon->num_writes),
43775+ atomic_read_unchecked(&tcon->num_writes),
43776 (long long)(tcon->bytes_written));
43777 seq_printf(m, "\nFlushes: %d",
43778- atomic_read(&tcon->num_flushes));
43779+ atomic_read_unchecked(&tcon->num_flushes));
43780 seq_printf(m, "\nLocks: %d HardLinks: %d "
43781 "Symlinks: %d",
43782- atomic_read(&tcon->num_locks),
43783- atomic_read(&tcon->num_hardlinks),
43784- atomic_read(&tcon->num_symlinks));
43785+ atomic_read_unchecked(&tcon->num_locks),
43786+ atomic_read_unchecked(&tcon->num_hardlinks),
43787+ atomic_read_unchecked(&tcon->num_symlinks));
43788 seq_printf(m, "\nOpens: %d Closes: %d "
43789 "Deletes: %d",
43790- atomic_read(&tcon->num_opens),
43791- atomic_read(&tcon->num_closes),
43792- atomic_read(&tcon->num_deletes));
43793+ atomic_read_unchecked(&tcon->num_opens),
43794+ atomic_read_unchecked(&tcon->num_closes),
43795+ atomic_read_unchecked(&tcon->num_deletes));
43796 seq_printf(m, "\nPosix Opens: %d "
43797 "Posix Mkdirs: %d",
43798- atomic_read(&tcon->num_posixopens),
43799- atomic_read(&tcon->num_posixmkdirs));
43800+ atomic_read_unchecked(&tcon->num_posixopens),
43801+ atomic_read_unchecked(&tcon->num_posixmkdirs));
43802 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
43803- atomic_read(&tcon->num_mkdirs),
43804- atomic_read(&tcon->num_rmdirs));
43805+ atomic_read_unchecked(&tcon->num_mkdirs),
43806+ atomic_read_unchecked(&tcon->num_rmdirs));
43807 seq_printf(m, "\nRenames: %d T2 Renames %d",
43808- atomic_read(&tcon->num_renames),
43809- atomic_read(&tcon->num_t2renames));
43810+ atomic_read_unchecked(&tcon->num_renames),
43811+ atomic_read_unchecked(&tcon->num_t2renames));
43812 seq_printf(m, "\nFindFirst: %d FNext %d "
43813 "FClose %d",
43814- atomic_read(&tcon->num_ffirst),
43815- atomic_read(&tcon->num_fnext),
43816- atomic_read(&tcon->num_fclose));
43817+ atomic_read_unchecked(&tcon->num_ffirst),
43818+ atomic_read_unchecked(&tcon->num_fnext),
43819+ atomic_read_unchecked(&tcon->num_fclose));
43820 }
43821 }
43822 }
43823diff -urNp linux-2.6.32.48/fs/cifs/cifsfs.c linux-2.6.32.48/fs/cifs/cifsfs.c
43824--- linux-2.6.32.48/fs/cifs/cifsfs.c 2011-11-08 19:02:43.000000000 -0500
43825+++ linux-2.6.32.48/fs/cifs/cifsfs.c 2011-11-15 19:59:43.000000000 -0500
43826@@ -869,7 +869,7 @@ cifs_init_request_bufs(void)
43827 cifs_req_cachep = kmem_cache_create("cifs_request",
43828 CIFSMaxBufSize +
43829 MAX_CIFS_HDR_SIZE, 0,
43830- SLAB_HWCACHE_ALIGN, NULL);
43831+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
43832 if (cifs_req_cachep == NULL)
43833 return -ENOMEM;
43834
43835@@ -896,7 +896,7 @@ cifs_init_request_bufs(void)
43836 efficient to alloc 1 per page off the slab compared to 17K (5page)
43837 alloc of large cifs buffers even when page debugging is on */
43838 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
43839- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
43840+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
43841 NULL);
43842 if (cifs_sm_req_cachep == NULL) {
43843 mempool_destroy(cifs_req_poolp);
43844@@ -991,8 +991,8 @@ init_cifs(void)
43845 atomic_set(&bufAllocCount, 0);
43846 atomic_set(&smBufAllocCount, 0);
43847 #ifdef CONFIG_CIFS_STATS2
43848- atomic_set(&totBufAllocCount, 0);
43849- atomic_set(&totSmBufAllocCount, 0);
43850+ atomic_set_unchecked(&totBufAllocCount, 0);
43851+ atomic_set_unchecked(&totSmBufAllocCount, 0);
43852 #endif /* CONFIG_CIFS_STATS2 */
43853
43854 atomic_set(&midCount, 0);
43855diff -urNp linux-2.6.32.48/fs/cifs/cifsglob.h linux-2.6.32.48/fs/cifs/cifsglob.h
43856--- linux-2.6.32.48/fs/cifs/cifsglob.h 2011-11-08 19:02:43.000000000 -0500
43857+++ linux-2.6.32.48/fs/cifs/cifsglob.h 2011-11-15 19:59:43.000000000 -0500
43858@@ -252,28 +252,28 @@ struct cifsTconInfo {
43859 __u16 Flags; /* optional support bits */
43860 enum statusEnum tidStatus;
43861 #ifdef CONFIG_CIFS_STATS
43862- atomic_t num_smbs_sent;
43863- atomic_t num_writes;
43864- atomic_t num_reads;
43865- atomic_t num_flushes;
43866- atomic_t num_oplock_brks;
43867- atomic_t num_opens;
43868- atomic_t num_closes;
43869- atomic_t num_deletes;
43870- atomic_t num_mkdirs;
43871- atomic_t num_posixopens;
43872- atomic_t num_posixmkdirs;
43873- atomic_t num_rmdirs;
43874- atomic_t num_renames;
43875- atomic_t num_t2renames;
43876- atomic_t num_ffirst;
43877- atomic_t num_fnext;
43878- atomic_t num_fclose;
43879- atomic_t num_hardlinks;
43880- atomic_t num_symlinks;
43881- atomic_t num_locks;
43882- atomic_t num_acl_get;
43883- atomic_t num_acl_set;
43884+ atomic_unchecked_t num_smbs_sent;
43885+ atomic_unchecked_t num_writes;
43886+ atomic_unchecked_t num_reads;
43887+ atomic_unchecked_t num_flushes;
43888+ atomic_unchecked_t num_oplock_brks;
43889+ atomic_unchecked_t num_opens;
43890+ atomic_unchecked_t num_closes;
43891+ atomic_unchecked_t num_deletes;
43892+ atomic_unchecked_t num_mkdirs;
43893+ atomic_unchecked_t num_posixopens;
43894+ atomic_unchecked_t num_posixmkdirs;
43895+ atomic_unchecked_t num_rmdirs;
43896+ atomic_unchecked_t num_renames;
43897+ atomic_unchecked_t num_t2renames;
43898+ atomic_unchecked_t num_ffirst;
43899+ atomic_unchecked_t num_fnext;
43900+ atomic_unchecked_t num_fclose;
43901+ atomic_unchecked_t num_hardlinks;
43902+ atomic_unchecked_t num_symlinks;
43903+ atomic_unchecked_t num_locks;
43904+ atomic_unchecked_t num_acl_get;
43905+ atomic_unchecked_t num_acl_set;
43906 #ifdef CONFIG_CIFS_STATS2
43907 unsigned long long time_writes;
43908 unsigned long long time_reads;
43909@@ -414,7 +414,7 @@ static inline char CIFS_DIR_SEP(const st
43910 }
43911
43912 #ifdef CONFIG_CIFS_STATS
43913-#define cifs_stats_inc atomic_inc
43914+#define cifs_stats_inc atomic_inc_unchecked
43915
43916 static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
43917 unsigned int bytes)
43918@@ -701,8 +701,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnect
43919 /* Various Debug counters */
43920 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
43921 #ifdef CONFIG_CIFS_STATS2
43922-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
43923-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
43924+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
43925+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
43926 #endif
43927 GLOBAL_EXTERN atomic_t smBufAllocCount;
43928 GLOBAL_EXTERN atomic_t midCount;
43929diff -urNp linux-2.6.32.48/fs/cifs/link.c linux-2.6.32.48/fs/cifs/link.c
43930--- linux-2.6.32.48/fs/cifs/link.c 2011-11-08 19:02:43.000000000 -0500
43931+++ linux-2.6.32.48/fs/cifs/link.c 2011-11-15 19:59:43.000000000 -0500
43932@@ -215,7 +215,7 @@ cifs_symlink(struct inode *inode, struct
43933
43934 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
43935 {
43936- char *p = nd_get_link(nd);
43937+ const char *p = nd_get_link(nd);
43938 if (!IS_ERR(p))
43939 kfree(p);
43940 }
43941diff -urNp linux-2.6.32.48/fs/cifs/misc.c linux-2.6.32.48/fs/cifs/misc.c
43942--- linux-2.6.32.48/fs/cifs/misc.c 2011-11-08 19:02:43.000000000 -0500
43943+++ linux-2.6.32.48/fs/cifs/misc.c 2011-11-15 19:59:43.000000000 -0500
43944@@ -155,7 +155,7 @@ cifs_buf_get(void)
43945 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
43946 atomic_inc(&bufAllocCount);
43947 #ifdef CONFIG_CIFS_STATS2
43948- atomic_inc(&totBufAllocCount);
43949+ atomic_inc_unchecked(&totBufAllocCount);
43950 #endif /* CONFIG_CIFS_STATS2 */
43951 }
43952
43953@@ -190,7 +190,7 @@ cifs_small_buf_get(void)
43954 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
43955 atomic_inc(&smBufAllocCount);
43956 #ifdef CONFIG_CIFS_STATS2
43957- atomic_inc(&totSmBufAllocCount);
43958+ atomic_inc_unchecked(&totSmBufAllocCount);
43959 #endif /* CONFIG_CIFS_STATS2 */
43960
43961 }
43962diff -urNp linux-2.6.32.48/fs/coda/cache.c linux-2.6.32.48/fs/coda/cache.c
43963--- linux-2.6.32.48/fs/coda/cache.c 2011-11-08 19:02:43.000000000 -0500
43964+++ linux-2.6.32.48/fs/coda/cache.c 2011-11-15 19:59:43.000000000 -0500
43965@@ -24,14 +24,14 @@
43966 #include <linux/coda_fs_i.h>
43967 #include <linux/coda_cache.h>
43968
43969-static atomic_t permission_epoch = ATOMIC_INIT(0);
43970+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
43971
43972 /* replace or extend an acl cache hit */
43973 void coda_cache_enter(struct inode *inode, int mask)
43974 {
43975 struct coda_inode_info *cii = ITOC(inode);
43976
43977- cii->c_cached_epoch = atomic_read(&permission_epoch);
43978+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
43979 if (cii->c_uid != current_fsuid()) {
43980 cii->c_uid = current_fsuid();
43981 cii->c_cached_perm = mask;
43982@@ -43,13 +43,13 @@ void coda_cache_enter(struct inode *inod
43983 void coda_cache_clear_inode(struct inode *inode)
43984 {
43985 struct coda_inode_info *cii = ITOC(inode);
43986- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
43987+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
43988 }
43989
43990 /* remove all acl caches */
43991 void coda_cache_clear_all(struct super_block *sb)
43992 {
43993- atomic_inc(&permission_epoch);
43994+ atomic_inc_unchecked(&permission_epoch);
43995 }
43996
43997
43998@@ -61,7 +61,7 @@ int coda_cache_check(struct inode *inode
43999
44000 hit = (mask & cii->c_cached_perm) == mask &&
44001 cii->c_uid == current_fsuid() &&
44002- cii->c_cached_epoch == atomic_read(&permission_epoch);
44003+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
44004
44005 return hit;
44006 }
44007diff -urNp linux-2.6.32.48/fs/compat_binfmt_elf.c linux-2.6.32.48/fs/compat_binfmt_elf.c
44008--- linux-2.6.32.48/fs/compat_binfmt_elf.c 2011-11-08 19:02:43.000000000 -0500
44009+++ linux-2.6.32.48/fs/compat_binfmt_elf.c 2011-11-15 19:59:43.000000000 -0500
44010@@ -29,10 +29,12 @@
44011 #undef elfhdr
44012 #undef elf_phdr
44013 #undef elf_note
44014+#undef elf_dyn
44015 #undef elf_addr_t
44016 #define elfhdr elf32_hdr
44017 #define elf_phdr elf32_phdr
44018 #define elf_note elf32_note
44019+#define elf_dyn Elf32_Dyn
44020 #define elf_addr_t Elf32_Addr
44021
44022 /*
44023diff -urNp linux-2.6.32.48/fs/compat.c linux-2.6.32.48/fs/compat.c
44024--- linux-2.6.32.48/fs/compat.c 2011-11-08 19:02:43.000000000 -0500
44025+++ linux-2.6.32.48/fs/compat.c 2011-11-15 19:59:43.000000000 -0500
44026@@ -133,8 +133,8 @@ asmlinkage long compat_sys_utimes(char _
44027 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
44028 {
44029 compat_ino_t ino = stat->ino;
44030- typeof(ubuf->st_uid) uid = 0;
44031- typeof(ubuf->st_gid) gid = 0;
44032+ typeof(((struct compat_stat *)0)->st_uid) uid = 0;
44033+ typeof(((struct compat_stat *)0)->st_gid) gid = 0;
44034 int err;
44035
44036 SET_UID(uid, stat->uid);
44037@@ -533,7 +533,7 @@ compat_sys_io_setup(unsigned nr_reqs, u3
44038
44039 set_fs(KERNEL_DS);
44040 /* The __user pointer cast is valid because of the set_fs() */
44041- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
44042+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
44043 set_fs(oldfs);
44044 /* truncating is ok because it's a user address */
44045 if (!ret)
44046@@ -830,6 +830,7 @@ struct compat_old_linux_dirent {
44047
44048 struct compat_readdir_callback {
44049 struct compat_old_linux_dirent __user *dirent;
44050+ struct file * file;
44051 int result;
44052 };
44053
44054@@ -847,6 +848,10 @@ static int compat_fillonedir(void *__buf
44055 buf->result = -EOVERFLOW;
44056 return -EOVERFLOW;
44057 }
44058+
44059+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
44060+ return 0;
44061+
44062 buf->result++;
44063 dirent = buf->dirent;
44064 if (!access_ok(VERIFY_WRITE, dirent,
44065@@ -879,6 +884,7 @@ asmlinkage long compat_sys_old_readdir(u
44066
44067 buf.result = 0;
44068 buf.dirent = dirent;
44069+ buf.file = file;
44070
44071 error = vfs_readdir(file, compat_fillonedir, &buf);
44072 if (buf.result)
44073@@ -899,6 +905,7 @@ struct compat_linux_dirent {
44074 struct compat_getdents_callback {
44075 struct compat_linux_dirent __user *current_dir;
44076 struct compat_linux_dirent __user *previous;
44077+ struct file * file;
44078 int count;
44079 int error;
44080 };
44081@@ -919,6 +926,10 @@ static int compat_filldir(void *__buf, c
44082 buf->error = -EOVERFLOW;
44083 return -EOVERFLOW;
44084 }
44085+
44086+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
44087+ return 0;
44088+
44089 dirent = buf->previous;
44090 if (dirent) {
44091 if (__put_user(offset, &dirent->d_off))
44092@@ -966,6 +977,7 @@ asmlinkage long compat_sys_getdents(unsi
44093 buf.previous = NULL;
44094 buf.count = count;
44095 buf.error = 0;
44096+ buf.file = file;
44097
44098 error = vfs_readdir(file, compat_filldir, &buf);
44099 if (error >= 0)
44100@@ -987,6 +999,7 @@ out:
44101 struct compat_getdents_callback64 {
44102 struct linux_dirent64 __user *current_dir;
44103 struct linux_dirent64 __user *previous;
44104+ struct file * file;
44105 int count;
44106 int error;
44107 };
44108@@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf
44109 buf->error = -EINVAL; /* only used if we fail.. */
44110 if (reclen > buf->count)
44111 return -EINVAL;
44112+
44113+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
44114+ return 0;
44115+
44116 dirent = buf->previous;
44117
44118 if (dirent) {
44119@@ -1054,13 +1071,14 @@ asmlinkage long compat_sys_getdents64(un
44120 buf.previous = NULL;
44121 buf.count = count;
44122 buf.error = 0;
44123+ buf.file = file;
44124
44125 error = vfs_readdir(file, compat_filldir64, &buf);
44126 if (error >= 0)
44127 error = buf.error;
44128 lastdirent = buf.previous;
44129 if (lastdirent) {
44130- typeof(lastdirent->d_off) d_off = file->f_pos;
44131+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
44132 if (__put_user_unaligned(d_off, &lastdirent->d_off))
44133 error = -EFAULT;
44134 else
44135@@ -1098,7 +1116,7 @@ static ssize_t compat_do_readv_writev(in
44136 * verify all the pointers
44137 */
44138 ret = -EINVAL;
44139- if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0))
44140+ if (nr_segs > UIO_MAXIOV)
44141 goto out;
44142 if (!file->f_op)
44143 goto out;
44144@@ -1463,11 +1481,35 @@ int compat_do_execve(char * filename,
44145 compat_uptr_t __user *envp,
44146 struct pt_regs * regs)
44147 {
44148+#ifdef CONFIG_GRKERNSEC
44149+ struct file *old_exec_file;
44150+ struct acl_subject_label *old_acl;
44151+ struct rlimit old_rlim[RLIM_NLIMITS];
44152+#endif
44153 struct linux_binprm *bprm;
44154 struct file *file;
44155 struct files_struct *displaced;
44156 bool clear_in_exec;
44157 int retval;
44158+ const struct cred *cred = current_cred();
44159+
44160+ /*
44161+ * We move the actual failure in case of RLIMIT_NPROC excess from
44162+ * set*uid() to execve() because too many poorly written programs
44163+ * don't check setuid() return code. Here we additionally recheck
44164+ * whether NPROC limit is still exceeded.
44165+ */
44166+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
44167+
44168+ if ((current->flags & PF_NPROC_EXCEEDED) &&
44169+ atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
44170+ retval = -EAGAIN;
44171+ goto out_ret;
44172+ }
44173+
44174+ /* We're below the limit (still or again), so we don't want to make
44175+ * further execve() calls fail. */
44176+ current->flags &= ~PF_NPROC_EXCEEDED;
44177
44178 retval = unshare_files(&displaced);
44179 if (retval)
44180@@ -1499,6 +1541,15 @@ int compat_do_execve(char * filename,
44181 bprm->filename = filename;
44182 bprm->interp = filename;
44183
44184+ if (gr_process_user_ban()) {
44185+ retval = -EPERM;
44186+ goto out_file;
44187+ }
44188+
44189+ retval = -EACCES;
44190+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
44191+ goto out_file;
44192+
44193 retval = bprm_mm_init(bprm);
44194 if (retval)
44195 goto out_file;
44196@@ -1528,9 +1579,40 @@ int compat_do_execve(char * filename,
44197 if (retval < 0)
44198 goto out;
44199
44200+ if (!gr_tpe_allow(file)) {
44201+ retval = -EACCES;
44202+ goto out;
44203+ }
44204+
44205+ if (gr_check_crash_exec(file)) {
44206+ retval = -EACCES;
44207+ goto out;
44208+ }
44209+
44210+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
44211+
44212+ gr_handle_exec_args_compat(bprm, argv);
44213+
44214+#ifdef CONFIG_GRKERNSEC
44215+ old_acl = current->acl;
44216+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
44217+ old_exec_file = current->exec_file;
44218+ get_file(file);
44219+ current->exec_file = file;
44220+#endif
44221+
44222+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
44223+ bprm->unsafe & LSM_UNSAFE_SHARE);
44224+ if (retval < 0)
44225+ goto out_fail;
44226+
44227 retval = search_binary_handler(bprm, regs);
44228 if (retval < 0)
44229- goto out;
44230+ goto out_fail;
44231+#ifdef CONFIG_GRKERNSEC
44232+ if (old_exec_file)
44233+ fput(old_exec_file);
44234+#endif
44235
44236 /* execve succeeded */
44237 current->fs->in_exec = 0;
44238@@ -1541,6 +1623,14 @@ int compat_do_execve(char * filename,
44239 put_files_struct(displaced);
44240 return retval;
44241
44242+out_fail:
44243+#ifdef CONFIG_GRKERNSEC
44244+ current->acl = old_acl;
44245+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
44246+ fput(current->exec_file);
44247+ current->exec_file = old_exec_file;
44248+#endif
44249+
44250 out:
44251 if (bprm->mm) {
44252 acct_arg_size(bprm, 0);
44253@@ -1711,6 +1801,8 @@ int compat_core_sys_select(int n, compat
44254 struct fdtable *fdt;
44255 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
44256
44257+ pax_track_stack();
44258+
44259 if (n < 0)
44260 goto out_nofds;
44261
44262@@ -2151,7 +2243,7 @@ asmlinkage long compat_sys_nfsservctl(in
44263 oldfs = get_fs();
44264 set_fs(KERNEL_DS);
44265 /* The __user pointer casts are valid because of the set_fs() */
44266- err = sys_nfsservctl(cmd, (void __user *) karg, (void __user *) kres);
44267+ err = sys_nfsservctl(cmd, (void __force_user *) karg, (void __force_user *) kres);
44268 set_fs(oldfs);
44269
44270 if (err)
44271diff -urNp linux-2.6.32.48/fs/compat_ioctl.c linux-2.6.32.48/fs/compat_ioctl.c
44272--- linux-2.6.32.48/fs/compat_ioctl.c 2011-11-08 19:02:43.000000000 -0500
44273+++ linux-2.6.32.48/fs/compat_ioctl.c 2011-11-15 19:59:43.000000000 -0500
44274@@ -234,6 +234,8 @@ static int do_video_set_spu_palette(unsi
44275 up = (struct compat_video_spu_palette __user *) arg;
44276 err = get_user(palp, &up->palette);
44277 err |= get_user(length, &up->length);
44278+ if (err)
44279+ return -EFAULT;
44280
44281 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
44282 err = put_user(compat_ptr(palp), &up_native->palette);
44283@@ -1513,7 +1515,7 @@ static int serial_struct_ioctl(unsigned
44284 return -EFAULT;
44285 if (__get_user(udata, &ss32->iomem_base))
44286 return -EFAULT;
44287- ss.iomem_base = compat_ptr(udata);
44288+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
44289 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
44290 __get_user(ss.port_high, &ss32->port_high))
44291 return -EFAULT;
44292@@ -1809,7 +1811,7 @@ static int compat_ioctl_preallocate(stru
44293 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
44294 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
44295 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
44296- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
44297+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
44298 return -EFAULT;
44299
44300 return ioctl_preallocate(file, p);
44301diff -urNp linux-2.6.32.48/fs/configfs/dir.c linux-2.6.32.48/fs/configfs/dir.c
44302--- linux-2.6.32.48/fs/configfs/dir.c 2011-11-08 19:02:43.000000000 -0500
44303+++ linux-2.6.32.48/fs/configfs/dir.c 2011-11-15 19:59:43.000000000 -0500
44304@@ -1572,7 +1572,8 @@ static int configfs_readdir(struct file
44305 }
44306 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
44307 struct configfs_dirent *next;
44308- const char * name;
44309+ const unsigned char * name;
44310+ char d_name[sizeof(next->s_dentry->d_iname)];
44311 int len;
44312
44313 next = list_entry(p, struct configfs_dirent,
44314@@ -1581,7 +1582,12 @@ static int configfs_readdir(struct file
44315 continue;
44316
44317 name = configfs_get_name(next);
44318- len = strlen(name);
44319+ if (next->s_dentry && name == next->s_dentry->d_iname) {
44320+ len = next->s_dentry->d_name.len;
44321+ memcpy(d_name, name, len);
44322+ name = d_name;
44323+ } else
44324+ len = strlen(name);
44325 if (next->s_dentry)
44326 ino = next->s_dentry->d_inode->i_ino;
44327 else
44328diff -urNp linux-2.6.32.48/fs/dcache.c linux-2.6.32.48/fs/dcache.c
44329--- linux-2.6.32.48/fs/dcache.c 2011-11-08 19:02:43.000000000 -0500
44330+++ linux-2.6.32.48/fs/dcache.c 2011-11-15 19:59:43.000000000 -0500
44331@@ -45,8 +45,6 @@ EXPORT_SYMBOL(dcache_lock);
44332
44333 static struct kmem_cache *dentry_cache __read_mostly;
44334
44335-#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
44336-
44337 /*
44338 * This is the single most critical data structure when it comes
44339 * to the dcache: the hashtable for lookups. Somebody should try
44340@@ -2319,7 +2317,7 @@ void __init vfs_caches_init(unsigned lon
44341 mempages -= reserve;
44342
44343 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
44344- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
44345+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
44346
44347 dcache_init();
44348 inode_init();
44349diff -urNp linux-2.6.32.48/fs/dlm/lockspace.c linux-2.6.32.48/fs/dlm/lockspace.c
44350--- linux-2.6.32.48/fs/dlm/lockspace.c 2011-11-08 19:02:43.000000000 -0500
44351+++ linux-2.6.32.48/fs/dlm/lockspace.c 2011-11-15 19:59:43.000000000 -0500
44352@@ -148,7 +148,7 @@ static void lockspace_kobj_release(struc
44353 kfree(ls);
44354 }
44355
44356-static struct sysfs_ops dlm_attr_ops = {
44357+static const struct sysfs_ops dlm_attr_ops = {
44358 .show = dlm_attr_show,
44359 .store = dlm_attr_store,
44360 };
44361diff -urNp linux-2.6.32.48/fs/ecryptfs/inode.c linux-2.6.32.48/fs/ecryptfs/inode.c
44362--- linux-2.6.32.48/fs/ecryptfs/inode.c 2011-11-08 19:02:43.000000000 -0500
44363+++ linux-2.6.32.48/fs/ecryptfs/inode.c 2011-11-15 19:59:43.000000000 -0500
44364@@ -660,7 +660,7 @@ static int ecryptfs_readlink_lower(struc
44365 old_fs = get_fs();
44366 set_fs(get_ds());
44367 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
44368- (char __user *)lower_buf,
44369+ (char __force_user *)lower_buf,
44370 lower_bufsiz);
44371 set_fs(old_fs);
44372 if (rc < 0)
44373@@ -706,7 +706,7 @@ static void *ecryptfs_follow_link(struct
44374 }
44375 old_fs = get_fs();
44376 set_fs(get_ds());
44377- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
44378+ rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
44379 set_fs(old_fs);
44380 if (rc < 0)
44381 goto out_free;
44382diff -urNp linux-2.6.32.48/fs/exec.c linux-2.6.32.48/fs/exec.c
44383--- linux-2.6.32.48/fs/exec.c 2011-11-08 19:02:43.000000000 -0500
44384+++ linux-2.6.32.48/fs/exec.c 2011-11-15 19:59:43.000000000 -0500
44385@@ -56,12 +56,24 @@
44386 #include <linux/fsnotify.h>
44387 #include <linux/fs_struct.h>
44388 #include <linux/pipe_fs_i.h>
44389+#include <linux/random.h>
44390+#include <linux/seq_file.h>
44391+
44392+#ifdef CONFIG_PAX_REFCOUNT
44393+#include <linux/kallsyms.h>
44394+#include <linux/kdebug.h>
44395+#endif
44396
44397 #include <asm/uaccess.h>
44398 #include <asm/mmu_context.h>
44399 #include <asm/tlb.h>
44400 #include "internal.h"
44401
44402+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
44403+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
44404+EXPORT_SYMBOL(pax_set_initial_flags_func);
44405+#endif
44406+
44407 int core_uses_pid;
44408 char core_pattern[CORENAME_MAX_SIZE] = "core";
44409 unsigned int core_pipe_limit;
44410@@ -115,7 +127,7 @@ SYSCALL_DEFINE1(uselib, const char __use
44411 goto out;
44412
44413 file = do_filp_open(AT_FDCWD, tmp,
44414- O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
44415+ O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0,
44416 MAY_READ | MAY_EXEC | MAY_OPEN);
44417 putname(tmp);
44418 error = PTR_ERR(file);
44419@@ -178,18 +190,10 @@ struct page *get_arg_page(struct linux_b
44420 int write)
44421 {
44422 struct page *page;
44423- int ret;
44424
44425-#ifdef CONFIG_STACK_GROWSUP
44426- if (write) {
44427- ret = expand_stack_downwards(bprm->vma, pos);
44428- if (ret < 0)
44429- return NULL;
44430- }
44431-#endif
44432- ret = get_user_pages(current, bprm->mm, pos,
44433- 1, write, 1, &page, NULL);
44434- if (ret <= 0)
44435+ if (0 > expand_stack_downwards(bprm->vma, pos))
44436+ return NULL;
44437+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
44438 return NULL;
44439
44440 if (write) {
44441@@ -263,6 +267,11 @@ static int __bprm_mm_init(struct linux_b
44442 vma->vm_end = STACK_TOP_MAX;
44443 vma->vm_start = vma->vm_end - PAGE_SIZE;
44444 vma->vm_flags = VM_STACK_FLAGS;
44445+
44446+#ifdef CONFIG_PAX_SEGMEXEC
44447+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
44448+#endif
44449+
44450 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
44451
44452 err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
44453@@ -276,6 +285,12 @@ static int __bprm_mm_init(struct linux_b
44454 mm->stack_vm = mm->total_vm = 1;
44455 up_write(&mm->mmap_sem);
44456 bprm->p = vma->vm_end - sizeof(void *);
44457+
44458+#ifdef CONFIG_PAX_RANDUSTACK
44459+ if (randomize_va_space)
44460+ bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
44461+#endif
44462+
44463 return 0;
44464 err:
44465 up_write(&mm->mmap_sem);
44466@@ -510,7 +525,7 @@ int copy_strings_kernel(int argc,char **
44467 int r;
44468 mm_segment_t oldfs = get_fs();
44469 set_fs(KERNEL_DS);
44470- r = copy_strings(argc, (char __user * __user *)argv, bprm);
44471+ r = copy_strings(argc, (__force char __user * __user *)argv, bprm);
44472 set_fs(oldfs);
44473 return r;
44474 }
44475@@ -540,7 +555,8 @@ static int shift_arg_pages(struct vm_are
44476 unsigned long new_end = old_end - shift;
44477 struct mmu_gather *tlb;
44478
44479- BUG_ON(new_start > new_end);
44480+ if (new_start >= new_end || new_start < mmap_min_addr)
44481+ return -ENOMEM;
44482
44483 /*
44484 * ensure there are no vmas between where we want to go
44485@@ -549,6 +565,10 @@ static int shift_arg_pages(struct vm_are
44486 if (vma != find_vma(mm, new_start))
44487 return -EFAULT;
44488
44489+#ifdef CONFIG_PAX_SEGMEXEC
44490+ BUG_ON(pax_find_mirror_vma(vma));
44491+#endif
44492+
44493 /*
44494 * cover the whole range: [new_start, old_end)
44495 */
44496@@ -630,10 +650,6 @@ int setup_arg_pages(struct linux_binprm
44497 stack_top = arch_align_stack(stack_top);
44498 stack_top = PAGE_ALIGN(stack_top);
44499
44500- if (unlikely(stack_top < mmap_min_addr) ||
44501- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
44502- return -ENOMEM;
44503-
44504 stack_shift = vma->vm_end - stack_top;
44505
44506 bprm->p -= stack_shift;
44507@@ -645,6 +661,14 @@ int setup_arg_pages(struct linux_binprm
44508 bprm->exec -= stack_shift;
44509
44510 down_write(&mm->mmap_sem);
44511+
44512+ /* Move stack pages down in memory. */
44513+ if (stack_shift) {
44514+ ret = shift_arg_pages(vma, stack_shift);
44515+ if (ret)
44516+ goto out_unlock;
44517+ }
44518+
44519 vm_flags = VM_STACK_FLAGS;
44520
44521 /*
44522@@ -658,19 +682,24 @@ int setup_arg_pages(struct linux_binprm
44523 vm_flags &= ~VM_EXEC;
44524 vm_flags |= mm->def_flags;
44525
44526+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
44527+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
44528+ vm_flags &= ~VM_EXEC;
44529+
44530+#ifdef CONFIG_PAX_MPROTECT
44531+ if (mm->pax_flags & MF_PAX_MPROTECT)
44532+ vm_flags &= ~VM_MAYEXEC;
44533+#endif
44534+
44535+ }
44536+#endif
44537+
44538 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
44539 vm_flags);
44540 if (ret)
44541 goto out_unlock;
44542 BUG_ON(prev != vma);
44543
44544- /* Move stack pages down in memory. */
44545- if (stack_shift) {
44546- ret = shift_arg_pages(vma, stack_shift);
44547- if (ret)
44548- goto out_unlock;
44549- }
44550-
44551 stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
44552 stack_size = vma->vm_end - vma->vm_start;
44553 /*
44554@@ -707,7 +736,7 @@ struct file *open_exec(const char *name)
44555 int err;
44556
44557 file = do_filp_open(AT_FDCWD, name,
44558- O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
44559+ O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0,
44560 MAY_EXEC | MAY_OPEN);
44561 if (IS_ERR(file))
44562 goto out;
44563@@ -744,7 +773,7 @@ int kernel_read(struct file *file, loff_
44564 old_fs = get_fs();
44565 set_fs(get_ds());
44566 /* The cast to a user pointer is valid due to the set_fs() */
44567- result = vfs_read(file, (void __user *)addr, count, &pos);
44568+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
44569 set_fs(old_fs);
44570 return result;
44571 }
44572@@ -1152,7 +1181,7 @@ int check_unsafe_exec(struct linux_binpr
44573 }
44574 rcu_read_unlock();
44575
44576- if (p->fs->users > n_fs) {
44577+ if (atomic_read(&p->fs->users) > n_fs) {
44578 bprm->unsafe |= LSM_UNSAFE_SHARE;
44579 } else {
44580 res = -EAGAIN;
44581@@ -1347,11 +1376,35 @@ int do_execve(char * filename,
44582 char __user *__user *envp,
44583 struct pt_regs * regs)
44584 {
44585+#ifdef CONFIG_GRKERNSEC
44586+ struct file *old_exec_file;
44587+ struct acl_subject_label *old_acl;
44588+ struct rlimit old_rlim[RLIM_NLIMITS];
44589+#endif
44590 struct linux_binprm *bprm;
44591 struct file *file;
44592 struct files_struct *displaced;
44593 bool clear_in_exec;
44594 int retval;
44595+ const struct cred *cred = current_cred();
44596+
44597+ /*
44598+ * We move the actual failure in case of RLIMIT_NPROC excess from
44599+ * set*uid() to execve() because too many poorly written programs
44600+ * don't check setuid() return code. Here we additionally recheck
44601+ * whether NPROC limit is still exceeded.
44602+ */
44603+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
44604+
44605+ if ((current->flags & PF_NPROC_EXCEEDED) &&
44606+ atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
44607+ retval = -EAGAIN;
44608+ goto out_ret;
44609+ }
44610+
44611+ /* We're below the limit (still or again), so we don't want to make
44612+ * further execve() calls fail. */
44613+ current->flags &= ~PF_NPROC_EXCEEDED;
44614
44615 retval = unshare_files(&displaced);
44616 if (retval)
44617@@ -1383,6 +1436,16 @@ int do_execve(char * filename,
44618 bprm->filename = filename;
44619 bprm->interp = filename;
44620
44621+ if (gr_process_user_ban()) {
44622+ retval = -EPERM;
44623+ goto out_file;
44624+ }
44625+
44626+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
44627+ retval = -EACCES;
44628+ goto out_file;
44629+ }
44630+
44631 retval = bprm_mm_init(bprm);
44632 if (retval)
44633 goto out_file;
44634@@ -1412,10 +1475,41 @@ int do_execve(char * filename,
44635 if (retval < 0)
44636 goto out;
44637
44638+ if (!gr_tpe_allow(file)) {
44639+ retval = -EACCES;
44640+ goto out;
44641+ }
44642+
44643+ if (gr_check_crash_exec(file)) {
44644+ retval = -EACCES;
44645+ goto out;
44646+ }
44647+
44648+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
44649+
44650+ gr_handle_exec_args(bprm, (const char __user *const __user *)argv);
44651+
44652+#ifdef CONFIG_GRKERNSEC
44653+ old_acl = current->acl;
44654+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
44655+ old_exec_file = current->exec_file;
44656+ get_file(file);
44657+ current->exec_file = file;
44658+#endif
44659+
44660+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
44661+ bprm->unsafe & LSM_UNSAFE_SHARE);
44662+ if (retval < 0)
44663+ goto out_fail;
44664+
44665 current->flags &= ~PF_KTHREAD;
44666 retval = search_binary_handler(bprm,regs);
44667 if (retval < 0)
44668- goto out;
44669+ goto out_fail;
44670+#ifdef CONFIG_GRKERNSEC
44671+ if (old_exec_file)
44672+ fput(old_exec_file);
44673+#endif
44674
44675 /* execve succeeded */
44676 current->fs->in_exec = 0;
44677@@ -1426,6 +1520,14 @@ int do_execve(char * filename,
44678 put_files_struct(displaced);
44679 return retval;
44680
44681+out_fail:
44682+#ifdef CONFIG_GRKERNSEC
44683+ current->acl = old_acl;
44684+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
44685+ fput(current->exec_file);
44686+ current->exec_file = old_exec_file;
44687+#endif
44688+
44689 out:
44690 if (bprm->mm) {
44691 acct_arg_size(bprm, 0);
44692@@ -1591,6 +1693,220 @@ out:
44693 return ispipe;
44694 }
44695
44696+int pax_check_flags(unsigned long *flags)
44697+{
44698+ int retval = 0;
44699+
44700+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
44701+ if (*flags & MF_PAX_SEGMEXEC)
44702+ {
44703+ *flags &= ~MF_PAX_SEGMEXEC;
44704+ retval = -EINVAL;
44705+ }
44706+#endif
44707+
44708+ if ((*flags & MF_PAX_PAGEEXEC)
44709+
44710+#ifdef CONFIG_PAX_PAGEEXEC
44711+ && (*flags & MF_PAX_SEGMEXEC)
44712+#endif
44713+
44714+ )
44715+ {
44716+ *flags &= ~MF_PAX_PAGEEXEC;
44717+ retval = -EINVAL;
44718+ }
44719+
44720+ if ((*flags & MF_PAX_MPROTECT)
44721+
44722+#ifdef CONFIG_PAX_MPROTECT
44723+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
44724+#endif
44725+
44726+ )
44727+ {
44728+ *flags &= ~MF_PAX_MPROTECT;
44729+ retval = -EINVAL;
44730+ }
44731+
44732+ if ((*flags & MF_PAX_EMUTRAMP)
44733+
44734+#ifdef CONFIG_PAX_EMUTRAMP
44735+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
44736+#endif
44737+
44738+ )
44739+ {
44740+ *flags &= ~MF_PAX_EMUTRAMP;
44741+ retval = -EINVAL;
44742+ }
44743+
44744+ return retval;
44745+}
44746+
44747+EXPORT_SYMBOL(pax_check_flags);
44748+
44749+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
44750+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
44751+{
44752+ struct task_struct *tsk = current;
44753+ struct mm_struct *mm = current->mm;
44754+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
44755+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
44756+ char *path_exec = NULL;
44757+ char *path_fault = NULL;
44758+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
44759+
44760+ if (buffer_exec && buffer_fault) {
44761+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
44762+
44763+ down_read(&mm->mmap_sem);
44764+ vma = mm->mmap;
44765+ while (vma && (!vma_exec || !vma_fault)) {
44766+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
44767+ vma_exec = vma;
44768+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
44769+ vma_fault = vma;
44770+ vma = vma->vm_next;
44771+ }
44772+ if (vma_exec) {
44773+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
44774+ if (IS_ERR(path_exec))
44775+ path_exec = "<path too long>";
44776+ else {
44777+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
44778+ if (path_exec) {
44779+ *path_exec = 0;
44780+ path_exec = buffer_exec;
44781+ } else
44782+ path_exec = "<path too long>";
44783+ }
44784+ }
44785+ if (vma_fault) {
44786+ start = vma_fault->vm_start;
44787+ end = vma_fault->vm_end;
44788+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
44789+ if (vma_fault->vm_file) {
44790+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
44791+ if (IS_ERR(path_fault))
44792+ path_fault = "<path too long>";
44793+ else {
44794+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
44795+ if (path_fault) {
44796+ *path_fault = 0;
44797+ path_fault = buffer_fault;
44798+ } else
44799+ path_fault = "<path too long>";
44800+ }
44801+ } else
44802+ path_fault = "<anonymous mapping>";
44803+ }
44804+ up_read(&mm->mmap_sem);
44805+ }
44806+ if (tsk->signal->curr_ip)
44807+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
44808+ else
44809+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
44810+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
44811+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
44812+ task_uid(tsk), task_euid(tsk), pc, sp);
44813+ free_page((unsigned long)buffer_exec);
44814+ free_page((unsigned long)buffer_fault);
44815+ pax_report_insns(pc, sp);
44816+ do_coredump(SIGKILL, SIGKILL, regs);
44817+}
44818+#endif
44819+
44820+#ifdef CONFIG_PAX_REFCOUNT
44821+void pax_report_refcount_overflow(struct pt_regs *regs)
44822+{
44823+ if (current->signal->curr_ip)
44824+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
44825+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
44826+ else
44827+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
44828+ current->comm, task_pid_nr(current), current_uid(), current_euid());
44829+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
44830+ show_regs(regs);
44831+ force_sig_specific(SIGKILL, current);
44832+}
44833+#endif
44834+
44835+#ifdef CONFIG_PAX_USERCOPY
44836+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
44837+int object_is_on_stack(const void *obj, unsigned long len)
44838+{
44839+ const void * const stack = task_stack_page(current);
44840+ const void * const stackend = stack + THREAD_SIZE;
44841+
44842+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
44843+ const void *frame = NULL;
44844+ const void *oldframe;
44845+#endif
44846+
44847+ if (obj + len < obj)
44848+ return -1;
44849+
44850+ if (obj + len <= stack || stackend <= obj)
44851+ return 0;
44852+
44853+ if (obj < stack || stackend < obj + len)
44854+ return -1;
44855+
44856+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
44857+ oldframe = __builtin_frame_address(1);
44858+ if (oldframe)
44859+ frame = __builtin_frame_address(2);
44860+ /*
44861+ low ----------------------------------------------> high
44862+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
44863+ ^----------------^
44864+ allow copies only within here
44865+ */
44866+ while (stack <= frame && frame < stackend) {
44867+ /* if obj + len extends past the last frame, this
44868+ check won't pass and the next frame will be 0,
44869+ causing us to bail out and correctly report
44870+ the copy as invalid
44871+ */
44872+ if (obj + len <= frame)
44873+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
44874+ oldframe = frame;
44875+ frame = *(const void * const *)frame;
44876+ }
44877+ return -1;
44878+#else
44879+ return 1;
44880+#endif
44881+}
44882+
44883+
44884+NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
44885+{
44886+ if (current->signal->curr_ip)
44887+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
44888+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
44889+ else
44890+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
44891+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
44892+
44893+ dump_stack();
44894+ gr_handle_kernel_exploit();
44895+ do_group_exit(SIGKILL);
44896+}
44897+#endif
44898+
44899+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
44900+void pax_track_stack(void)
44901+{
44902+ unsigned long sp = (unsigned long)&sp;
44903+ if (sp < current_thread_info()->lowest_stack &&
44904+ sp > (unsigned long)task_stack_page(current))
44905+ current_thread_info()->lowest_stack = sp;
44906+}
44907+EXPORT_SYMBOL(pax_track_stack);
44908+#endif
44909+
44910 static int zap_process(struct task_struct *start)
44911 {
44912 struct task_struct *t;
44913@@ -1793,17 +2109,17 @@ static void wait_for_dump_helpers(struct
44914 pipe = file->f_path.dentry->d_inode->i_pipe;
44915
44916 pipe_lock(pipe);
44917- pipe->readers++;
44918- pipe->writers--;
44919+ atomic_inc(&pipe->readers);
44920+ atomic_dec(&pipe->writers);
44921
44922- while ((pipe->readers > 1) && (!signal_pending(current))) {
44923+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
44924 wake_up_interruptible_sync(&pipe->wait);
44925 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
44926 pipe_wait(pipe);
44927 }
44928
44929- pipe->readers--;
44930- pipe->writers++;
44931+ atomic_dec(&pipe->readers);
44932+ atomic_inc(&pipe->writers);
44933 pipe_unlock(pipe);
44934
44935 }
44936@@ -1826,10 +2142,13 @@ void do_coredump(long signr, int exit_co
44937 char **helper_argv = NULL;
44938 int helper_argc = 0;
44939 int dump_count = 0;
44940- static atomic_t core_dump_count = ATOMIC_INIT(0);
44941+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
44942
44943 audit_core_dumps(signr);
44944
44945+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
44946+ gr_handle_brute_attach(current, mm->flags);
44947+
44948 binfmt = mm->binfmt;
44949 if (!binfmt || !binfmt->core_dump)
44950 goto fail;
44951@@ -1874,6 +2193,8 @@ void do_coredump(long signr, int exit_co
44952 */
44953 clear_thread_flag(TIF_SIGPENDING);
44954
44955+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
44956+
44957 /*
44958 * lock_kernel() because format_corename() is controlled by sysctl, which
44959 * uses lock_kernel()
44960@@ -1908,7 +2229,7 @@ void do_coredump(long signr, int exit_co
44961 goto fail_unlock;
44962 }
44963
44964- dump_count = atomic_inc_return(&core_dump_count);
44965+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
44966 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
44967 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
44968 task_tgid_vnr(current), current->comm);
44969@@ -1972,7 +2293,7 @@ close_fail:
44970 filp_close(file, NULL);
44971 fail_dropcount:
44972 if (dump_count)
44973- atomic_dec(&core_dump_count);
44974+ atomic_dec_unchecked(&core_dump_count);
44975 fail_unlock:
44976 if (helper_argv)
44977 argv_free(helper_argv);
44978diff -urNp linux-2.6.32.48/fs/ext2/balloc.c linux-2.6.32.48/fs/ext2/balloc.c
44979--- linux-2.6.32.48/fs/ext2/balloc.c 2011-11-08 19:02:43.000000000 -0500
44980+++ linux-2.6.32.48/fs/ext2/balloc.c 2011-11-15 19:59:43.000000000 -0500
44981@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
44982
44983 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
44984 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
44985- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
44986+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
44987 sbi->s_resuid != current_fsuid() &&
44988 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
44989 return 0;
44990diff -urNp linux-2.6.32.48/fs/ext3/balloc.c linux-2.6.32.48/fs/ext3/balloc.c
44991--- linux-2.6.32.48/fs/ext3/balloc.c 2011-11-08 19:02:43.000000000 -0500
44992+++ linux-2.6.32.48/fs/ext3/balloc.c 2011-11-15 19:59:43.000000000 -0500
44993@@ -1421,7 +1421,7 @@ static int ext3_has_free_blocks(struct e
44994
44995 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
44996 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
44997- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
44998+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
44999 sbi->s_resuid != current_fsuid() &&
45000 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
45001 return 0;
45002diff -urNp linux-2.6.32.48/fs/ext4/balloc.c linux-2.6.32.48/fs/ext4/balloc.c
45003--- linux-2.6.32.48/fs/ext4/balloc.c 2011-11-08 19:02:43.000000000 -0500
45004+++ linux-2.6.32.48/fs/ext4/balloc.c 2011-11-15 19:59:43.000000000 -0500
45005@@ -570,7 +570,7 @@ int ext4_has_free_blocks(struct ext4_sb_
45006 /* Hm, nope. Are (enough) root reserved blocks available? */
45007 if (sbi->s_resuid == current_fsuid() ||
45008 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
45009- capable(CAP_SYS_RESOURCE)) {
45010+ capable_nolog(CAP_SYS_RESOURCE)) {
45011 if (free_blocks >= (nblocks + dirty_blocks))
45012 return 1;
45013 }
45014diff -urNp linux-2.6.32.48/fs/ext4/ext4.h linux-2.6.32.48/fs/ext4/ext4.h
45015--- linux-2.6.32.48/fs/ext4/ext4.h 2011-11-08 19:02:43.000000000 -0500
45016+++ linux-2.6.32.48/fs/ext4/ext4.h 2011-11-15 19:59:43.000000000 -0500
45017@@ -1077,19 +1077,19 @@ struct ext4_sb_info {
45018
45019 /* stats for buddy allocator */
45020 spinlock_t s_mb_pa_lock;
45021- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
45022- atomic_t s_bal_success; /* we found long enough chunks */
45023- atomic_t s_bal_allocated; /* in blocks */
45024- atomic_t s_bal_ex_scanned; /* total extents scanned */
45025- atomic_t s_bal_goals; /* goal hits */
45026- atomic_t s_bal_breaks; /* too long searches */
45027- atomic_t s_bal_2orders; /* 2^order hits */
45028+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
45029+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
45030+ atomic_unchecked_t s_bal_allocated; /* in blocks */
45031+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
45032+ atomic_unchecked_t s_bal_goals; /* goal hits */
45033+ atomic_unchecked_t s_bal_breaks; /* too long searches */
45034+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
45035 spinlock_t s_bal_lock;
45036 unsigned long s_mb_buddies_generated;
45037 unsigned long long s_mb_generation_time;
45038- atomic_t s_mb_lost_chunks;
45039- atomic_t s_mb_preallocated;
45040- atomic_t s_mb_discarded;
45041+ atomic_unchecked_t s_mb_lost_chunks;
45042+ atomic_unchecked_t s_mb_preallocated;
45043+ atomic_unchecked_t s_mb_discarded;
45044 atomic_t s_lock_busy;
45045
45046 /* locality groups */
45047diff -urNp linux-2.6.32.48/fs/ext4/file.c linux-2.6.32.48/fs/ext4/file.c
45048--- linux-2.6.32.48/fs/ext4/file.c 2011-11-08 19:02:43.000000000 -0500
45049+++ linux-2.6.32.48/fs/ext4/file.c 2011-11-15 19:59:43.000000000 -0500
45050@@ -122,8 +122,8 @@ static int ext4_file_open(struct inode *
45051 cp = d_path(&path, buf, sizeof(buf));
45052 path_put(&path);
45053 if (!IS_ERR(cp)) {
45054- memcpy(sbi->s_es->s_last_mounted, cp,
45055- sizeof(sbi->s_es->s_last_mounted));
45056+ strlcpy(sbi->s_es->s_last_mounted, cp,
45057+ sizeof(sbi->s_es->s_last_mounted));
45058 sb->s_dirt = 1;
45059 }
45060 }
45061diff -urNp linux-2.6.32.48/fs/ext4/mballoc.c linux-2.6.32.48/fs/ext4/mballoc.c
45062--- linux-2.6.32.48/fs/ext4/mballoc.c 2011-11-08 19:02:43.000000000 -0500
45063+++ linux-2.6.32.48/fs/ext4/mballoc.c 2011-11-15 19:59:43.000000000 -0500
45064@@ -1755,7 +1755,7 @@ void ext4_mb_simple_scan_group(struct ex
45065 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
45066
45067 if (EXT4_SB(sb)->s_mb_stats)
45068- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
45069+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
45070
45071 break;
45072 }
45073@@ -2131,7 +2131,7 @@ repeat:
45074 ac->ac_status = AC_STATUS_CONTINUE;
45075 ac->ac_flags |= EXT4_MB_HINT_FIRST;
45076 cr = 3;
45077- atomic_inc(&sbi->s_mb_lost_chunks);
45078+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
45079 goto repeat;
45080 }
45081 }
45082@@ -2174,6 +2174,8 @@ static int ext4_mb_seq_groups_show(struc
45083 ext4_grpblk_t counters[16];
45084 } sg;
45085
45086+ pax_track_stack();
45087+
45088 group--;
45089 if (group == 0)
45090 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
45091@@ -2534,25 +2536,25 @@ int ext4_mb_release(struct super_block *
45092 if (sbi->s_mb_stats) {
45093 printk(KERN_INFO
45094 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
45095- atomic_read(&sbi->s_bal_allocated),
45096- atomic_read(&sbi->s_bal_reqs),
45097- atomic_read(&sbi->s_bal_success));
45098+ atomic_read_unchecked(&sbi->s_bal_allocated),
45099+ atomic_read_unchecked(&sbi->s_bal_reqs),
45100+ atomic_read_unchecked(&sbi->s_bal_success));
45101 printk(KERN_INFO
45102 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
45103 "%u 2^N hits, %u breaks, %u lost\n",
45104- atomic_read(&sbi->s_bal_ex_scanned),
45105- atomic_read(&sbi->s_bal_goals),
45106- atomic_read(&sbi->s_bal_2orders),
45107- atomic_read(&sbi->s_bal_breaks),
45108- atomic_read(&sbi->s_mb_lost_chunks));
45109+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
45110+ atomic_read_unchecked(&sbi->s_bal_goals),
45111+ atomic_read_unchecked(&sbi->s_bal_2orders),
45112+ atomic_read_unchecked(&sbi->s_bal_breaks),
45113+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
45114 printk(KERN_INFO
45115 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
45116 sbi->s_mb_buddies_generated++,
45117 sbi->s_mb_generation_time);
45118 printk(KERN_INFO
45119 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
45120- atomic_read(&sbi->s_mb_preallocated),
45121- atomic_read(&sbi->s_mb_discarded));
45122+ atomic_read_unchecked(&sbi->s_mb_preallocated),
45123+ atomic_read_unchecked(&sbi->s_mb_discarded));
45124 }
45125
45126 free_percpu(sbi->s_locality_groups);
45127@@ -3034,16 +3036,16 @@ static void ext4_mb_collect_stats(struct
45128 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
45129
45130 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
45131- atomic_inc(&sbi->s_bal_reqs);
45132- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
45133+ atomic_inc_unchecked(&sbi->s_bal_reqs);
45134+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
45135 if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
45136- atomic_inc(&sbi->s_bal_success);
45137- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
45138+ atomic_inc_unchecked(&sbi->s_bal_success);
45139+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
45140 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
45141 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
45142- atomic_inc(&sbi->s_bal_goals);
45143+ atomic_inc_unchecked(&sbi->s_bal_goals);
45144 if (ac->ac_found > sbi->s_mb_max_to_scan)
45145- atomic_inc(&sbi->s_bal_breaks);
45146+ atomic_inc_unchecked(&sbi->s_bal_breaks);
45147 }
45148
45149 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
45150@@ -3443,7 +3445,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
45151 trace_ext4_mb_new_inode_pa(ac, pa);
45152
45153 ext4_mb_use_inode_pa(ac, pa);
45154- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
45155+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
45156
45157 ei = EXT4_I(ac->ac_inode);
45158 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
45159@@ -3503,7 +3505,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
45160 trace_ext4_mb_new_group_pa(ac, pa);
45161
45162 ext4_mb_use_group_pa(ac, pa);
45163- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
45164+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
45165
45166 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
45167 lg = ac->ac_lg;
45168@@ -3607,7 +3609,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
45169 * from the bitmap and continue.
45170 */
45171 }
45172- atomic_add(free, &sbi->s_mb_discarded);
45173+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
45174
45175 return err;
45176 }
45177@@ -3626,7 +3628,7 @@ ext4_mb_release_group_pa(struct ext4_bud
45178 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
45179 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
45180 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
45181- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
45182+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
45183
45184 if (ac) {
45185 ac->ac_sb = sb;
45186diff -urNp linux-2.6.32.48/fs/ext4/super.c linux-2.6.32.48/fs/ext4/super.c
45187--- linux-2.6.32.48/fs/ext4/super.c 2011-11-08 19:02:43.000000000 -0500
45188+++ linux-2.6.32.48/fs/ext4/super.c 2011-11-15 19:59:43.000000000 -0500
45189@@ -2287,7 +2287,7 @@ static void ext4_sb_release(struct kobje
45190 }
45191
45192
45193-static struct sysfs_ops ext4_attr_ops = {
45194+static const struct sysfs_ops ext4_attr_ops = {
45195 .show = ext4_attr_show,
45196 .store = ext4_attr_store,
45197 };
45198diff -urNp linux-2.6.32.48/fs/fcntl.c linux-2.6.32.48/fs/fcntl.c
45199--- linux-2.6.32.48/fs/fcntl.c 2011-11-08 19:02:43.000000000 -0500
45200+++ linux-2.6.32.48/fs/fcntl.c 2011-11-15 19:59:43.000000000 -0500
45201@@ -223,6 +223,11 @@ int __f_setown(struct file *filp, struct
45202 if (err)
45203 return err;
45204
45205+ if (gr_handle_chroot_fowner(pid, type))
45206+ return -ENOENT;
45207+ if (gr_check_protected_task_fowner(pid, type))
45208+ return -EACCES;
45209+
45210 f_modown(filp, pid, type, force);
45211 return 0;
45212 }
45213@@ -265,7 +270,7 @@ pid_t f_getown(struct file *filp)
45214
45215 static int f_setown_ex(struct file *filp, unsigned long arg)
45216 {
45217- struct f_owner_ex * __user owner_p = (void * __user)arg;
45218+ struct f_owner_ex __user *owner_p = (void __user *)arg;
45219 struct f_owner_ex owner;
45220 struct pid *pid;
45221 int type;
45222@@ -305,7 +310,7 @@ static int f_setown_ex(struct file *filp
45223
45224 static int f_getown_ex(struct file *filp, unsigned long arg)
45225 {
45226- struct f_owner_ex * __user owner_p = (void * __user)arg;
45227+ struct f_owner_ex __user *owner_p = (void __user *)arg;
45228 struct f_owner_ex owner;
45229 int ret = 0;
45230
45231@@ -344,6 +349,7 @@ static long do_fcntl(int fd, unsigned in
45232 switch (cmd) {
45233 case F_DUPFD:
45234 case F_DUPFD_CLOEXEC:
45235+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
45236 if (arg >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
45237 break;
45238 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
45239diff -urNp linux-2.6.32.48/fs/fifo.c linux-2.6.32.48/fs/fifo.c
45240--- linux-2.6.32.48/fs/fifo.c 2011-11-08 19:02:43.000000000 -0500
45241+++ linux-2.6.32.48/fs/fifo.c 2011-11-15 19:59:43.000000000 -0500
45242@@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode
45243 */
45244 filp->f_op = &read_pipefifo_fops;
45245 pipe->r_counter++;
45246- if (pipe->readers++ == 0)
45247+ if (atomic_inc_return(&pipe->readers) == 1)
45248 wake_up_partner(inode);
45249
45250- if (!pipe->writers) {
45251+ if (!atomic_read(&pipe->writers)) {
45252 if ((filp->f_flags & O_NONBLOCK)) {
45253 /* suppress POLLHUP until we have
45254 * seen a writer */
45255@@ -83,15 +83,15 @@ static int fifo_open(struct inode *inode
45256 * errno=ENXIO when there is no process reading the FIFO.
45257 */
45258 ret = -ENXIO;
45259- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
45260+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
45261 goto err;
45262
45263 filp->f_op = &write_pipefifo_fops;
45264 pipe->w_counter++;
45265- if (!pipe->writers++)
45266+ if (atomic_inc_return(&pipe->writers) == 1)
45267 wake_up_partner(inode);
45268
45269- if (!pipe->readers) {
45270+ if (!atomic_read(&pipe->readers)) {
45271 wait_for_partner(inode, &pipe->r_counter);
45272 if (signal_pending(current))
45273 goto err_wr;
45274@@ -107,11 +107,11 @@ static int fifo_open(struct inode *inode
45275 */
45276 filp->f_op = &rdwr_pipefifo_fops;
45277
45278- pipe->readers++;
45279- pipe->writers++;
45280+ atomic_inc(&pipe->readers);
45281+ atomic_inc(&pipe->writers);
45282 pipe->r_counter++;
45283 pipe->w_counter++;
45284- if (pipe->readers == 1 || pipe->writers == 1)
45285+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
45286 wake_up_partner(inode);
45287 break;
45288
45289@@ -125,19 +125,19 @@ static int fifo_open(struct inode *inode
45290 return 0;
45291
45292 err_rd:
45293- if (!--pipe->readers)
45294+ if (atomic_dec_and_test(&pipe->readers))
45295 wake_up_interruptible(&pipe->wait);
45296 ret = -ERESTARTSYS;
45297 goto err;
45298
45299 err_wr:
45300- if (!--pipe->writers)
45301+ if (atomic_dec_and_test(&pipe->writers))
45302 wake_up_interruptible(&pipe->wait);
45303 ret = -ERESTARTSYS;
45304 goto err;
45305
45306 err:
45307- if (!pipe->readers && !pipe->writers)
45308+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
45309 free_pipe_info(inode);
45310
45311 err_nocleanup:
45312diff -urNp linux-2.6.32.48/fs/file.c linux-2.6.32.48/fs/file.c
45313--- linux-2.6.32.48/fs/file.c 2011-11-08 19:02:43.000000000 -0500
45314+++ linux-2.6.32.48/fs/file.c 2011-11-15 19:59:43.000000000 -0500
45315@@ -14,6 +14,7 @@
45316 #include <linux/slab.h>
45317 #include <linux/vmalloc.h>
45318 #include <linux/file.h>
45319+#include <linux/security.h>
45320 #include <linux/fdtable.h>
45321 #include <linux/bitops.h>
45322 #include <linux/interrupt.h>
45323@@ -257,6 +258,8 @@ int expand_files(struct files_struct *fi
45324 * N.B. For clone tasks sharing a files structure, this test
45325 * will limit the total number of files that can be opened.
45326 */
45327+
45328+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
45329 if (nr >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
45330 return -EMFILE;
45331
45332diff -urNp linux-2.6.32.48/fs/filesystems.c linux-2.6.32.48/fs/filesystems.c
45333--- linux-2.6.32.48/fs/filesystems.c 2011-11-08 19:02:43.000000000 -0500
45334+++ linux-2.6.32.48/fs/filesystems.c 2011-11-15 19:59:43.000000000 -0500
45335@@ -272,7 +272,12 @@ struct file_system_type *get_fs_type(con
45336 int len = dot ? dot - name : strlen(name);
45337
45338 fs = __get_fs_type(name, len);
45339+
45340+#ifdef CONFIG_GRKERNSEC_MODHARDEN
45341+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
45342+#else
45343 if (!fs && (request_module("%.*s", len, name) == 0))
45344+#endif
45345 fs = __get_fs_type(name, len);
45346
45347 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
45348diff -urNp linux-2.6.32.48/fs/fscache/cookie.c linux-2.6.32.48/fs/fscache/cookie.c
45349--- linux-2.6.32.48/fs/fscache/cookie.c 2011-11-08 19:02:43.000000000 -0500
45350+++ linux-2.6.32.48/fs/fscache/cookie.c 2011-11-15 19:59:43.000000000 -0500
45351@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
45352 parent ? (char *) parent->def->name : "<no-parent>",
45353 def->name, netfs_data);
45354
45355- fscache_stat(&fscache_n_acquires);
45356+ fscache_stat_unchecked(&fscache_n_acquires);
45357
45358 /* if there's no parent cookie, then we don't create one here either */
45359 if (!parent) {
45360- fscache_stat(&fscache_n_acquires_null);
45361+ fscache_stat_unchecked(&fscache_n_acquires_null);
45362 _leave(" [no parent]");
45363 return NULL;
45364 }
45365@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
45366 /* allocate and initialise a cookie */
45367 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
45368 if (!cookie) {
45369- fscache_stat(&fscache_n_acquires_oom);
45370+ fscache_stat_unchecked(&fscache_n_acquires_oom);
45371 _leave(" [ENOMEM]");
45372 return NULL;
45373 }
45374@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
45375
45376 switch (cookie->def->type) {
45377 case FSCACHE_COOKIE_TYPE_INDEX:
45378- fscache_stat(&fscache_n_cookie_index);
45379+ fscache_stat_unchecked(&fscache_n_cookie_index);
45380 break;
45381 case FSCACHE_COOKIE_TYPE_DATAFILE:
45382- fscache_stat(&fscache_n_cookie_data);
45383+ fscache_stat_unchecked(&fscache_n_cookie_data);
45384 break;
45385 default:
45386- fscache_stat(&fscache_n_cookie_special);
45387+ fscache_stat_unchecked(&fscache_n_cookie_special);
45388 break;
45389 }
45390
45391@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
45392 if (fscache_acquire_non_index_cookie(cookie) < 0) {
45393 atomic_dec(&parent->n_children);
45394 __fscache_cookie_put(cookie);
45395- fscache_stat(&fscache_n_acquires_nobufs);
45396+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
45397 _leave(" = NULL");
45398 return NULL;
45399 }
45400 }
45401
45402- fscache_stat(&fscache_n_acquires_ok);
45403+ fscache_stat_unchecked(&fscache_n_acquires_ok);
45404 _leave(" = %p", cookie);
45405 return cookie;
45406 }
45407@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
45408 cache = fscache_select_cache_for_object(cookie->parent);
45409 if (!cache) {
45410 up_read(&fscache_addremove_sem);
45411- fscache_stat(&fscache_n_acquires_no_cache);
45412+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
45413 _leave(" = -ENOMEDIUM [no cache]");
45414 return -ENOMEDIUM;
45415 }
45416@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
45417 object = cache->ops->alloc_object(cache, cookie);
45418 fscache_stat_d(&fscache_n_cop_alloc_object);
45419 if (IS_ERR(object)) {
45420- fscache_stat(&fscache_n_object_no_alloc);
45421+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
45422 ret = PTR_ERR(object);
45423 goto error;
45424 }
45425
45426- fscache_stat(&fscache_n_object_alloc);
45427+ fscache_stat_unchecked(&fscache_n_object_alloc);
45428
45429 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
45430
45431@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
45432 struct fscache_object *object;
45433 struct hlist_node *_p;
45434
45435- fscache_stat(&fscache_n_updates);
45436+ fscache_stat_unchecked(&fscache_n_updates);
45437
45438 if (!cookie) {
45439- fscache_stat(&fscache_n_updates_null);
45440+ fscache_stat_unchecked(&fscache_n_updates_null);
45441 _leave(" [no cookie]");
45442 return;
45443 }
45444@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
45445 struct fscache_object *object;
45446 unsigned long event;
45447
45448- fscache_stat(&fscache_n_relinquishes);
45449+ fscache_stat_unchecked(&fscache_n_relinquishes);
45450 if (retire)
45451- fscache_stat(&fscache_n_relinquishes_retire);
45452+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
45453
45454 if (!cookie) {
45455- fscache_stat(&fscache_n_relinquishes_null);
45456+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
45457 _leave(" [no cookie]");
45458 return;
45459 }
45460@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
45461
45462 /* wait for the cookie to finish being instantiated (or to fail) */
45463 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
45464- fscache_stat(&fscache_n_relinquishes_waitcrt);
45465+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
45466 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
45467 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
45468 }
45469diff -urNp linux-2.6.32.48/fs/fscache/internal.h linux-2.6.32.48/fs/fscache/internal.h
45470--- linux-2.6.32.48/fs/fscache/internal.h 2011-11-08 19:02:43.000000000 -0500
45471+++ linux-2.6.32.48/fs/fscache/internal.h 2011-11-15 19:59:43.000000000 -0500
45472@@ -136,94 +136,94 @@ extern void fscache_proc_cleanup(void);
45473 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
45474 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
45475
45476-extern atomic_t fscache_n_op_pend;
45477-extern atomic_t fscache_n_op_run;
45478-extern atomic_t fscache_n_op_enqueue;
45479-extern atomic_t fscache_n_op_deferred_release;
45480-extern atomic_t fscache_n_op_release;
45481-extern atomic_t fscache_n_op_gc;
45482-extern atomic_t fscache_n_op_cancelled;
45483-extern atomic_t fscache_n_op_rejected;
45484-
45485-extern atomic_t fscache_n_attr_changed;
45486-extern atomic_t fscache_n_attr_changed_ok;
45487-extern atomic_t fscache_n_attr_changed_nobufs;
45488-extern atomic_t fscache_n_attr_changed_nomem;
45489-extern atomic_t fscache_n_attr_changed_calls;
45490-
45491-extern atomic_t fscache_n_allocs;
45492-extern atomic_t fscache_n_allocs_ok;
45493-extern atomic_t fscache_n_allocs_wait;
45494-extern atomic_t fscache_n_allocs_nobufs;
45495-extern atomic_t fscache_n_allocs_intr;
45496-extern atomic_t fscache_n_allocs_object_dead;
45497-extern atomic_t fscache_n_alloc_ops;
45498-extern atomic_t fscache_n_alloc_op_waits;
45499-
45500-extern atomic_t fscache_n_retrievals;
45501-extern atomic_t fscache_n_retrievals_ok;
45502-extern atomic_t fscache_n_retrievals_wait;
45503-extern atomic_t fscache_n_retrievals_nodata;
45504-extern atomic_t fscache_n_retrievals_nobufs;
45505-extern atomic_t fscache_n_retrievals_intr;
45506-extern atomic_t fscache_n_retrievals_nomem;
45507-extern atomic_t fscache_n_retrievals_object_dead;
45508-extern atomic_t fscache_n_retrieval_ops;
45509-extern atomic_t fscache_n_retrieval_op_waits;
45510-
45511-extern atomic_t fscache_n_stores;
45512-extern atomic_t fscache_n_stores_ok;
45513-extern atomic_t fscache_n_stores_again;
45514-extern atomic_t fscache_n_stores_nobufs;
45515-extern atomic_t fscache_n_stores_oom;
45516-extern atomic_t fscache_n_store_ops;
45517-extern atomic_t fscache_n_store_calls;
45518-extern atomic_t fscache_n_store_pages;
45519-extern atomic_t fscache_n_store_radix_deletes;
45520-extern atomic_t fscache_n_store_pages_over_limit;
45521-
45522-extern atomic_t fscache_n_store_vmscan_not_storing;
45523-extern atomic_t fscache_n_store_vmscan_gone;
45524-extern atomic_t fscache_n_store_vmscan_busy;
45525-extern atomic_t fscache_n_store_vmscan_cancelled;
45526-
45527-extern atomic_t fscache_n_marks;
45528-extern atomic_t fscache_n_uncaches;
45529-
45530-extern atomic_t fscache_n_acquires;
45531-extern atomic_t fscache_n_acquires_null;
45532-extern atomic_t fscache_n_acquires_no_cache;
45533-extern atomic_t fscache_n_acquires_ok;
45534-extern atomic_t fscache_n_acquires_nobufs;
45535-extern atomic_t fscache_n_acquires_oom;
45536-
45537-extern atomic_t fscache_n_updates;
45538-extern atomic_t fscache_n_updates_null;
45539-extern atomic_t fscache_n_updates_run;
45540-
45541-extern atomic_t fscache_n_relinquishes;
45542-extern atomic_t fscache_n_relinquishes_null;
45543-extern atomic_t fscache_n_relinquishes_waitcrt;
45544-extern atomic_t fscache_n_relinquishes_retire;
45545-
45546-extern atomic_t fscache_n_cookie_index;
45547-extern atomic_t fscache_n_cookie_data;
45548-extern atomic_t fscache_n_cookie_special;
45549-
45550-extern atomic_t fscache_n_object_alloc;
45551-extern atomic_t fscache_n_object_no_alloc;
45552-extern atomic_t fscache_n_object_lookups;
45553-extern atomic_t fscache_n_object_lookups_negative;
45554-extern atomic_t fscache_n_object_lookups_positive;
45555-extern atomic_t fscache_n_object_lookups_timed_out;
45556-extern atomic_t fscache_n_object_created;
45557-extern atomic_t fscache_n_object_avail;
45558-extern atomic_t fscache_n_object_dead;
45559-
45560-extern atomic_t fscache_n_checkaux_none;
45561-extern atomic_t fscache_n_checkaux_okay;
45562-extern atomic_t fscache_n_checkaux_update;
45563-extern atomic_t fscache_n_checkaux_obsolete;
45564+extern atomic_unchecked_t fscache_n_op_pend;
45565+extern atomic_unchecked_t fscache_n_op_run;
45566+extern atomic_unchecked_t fscache_n_op_enqueue;
45567+extern atomic_unchecked_t fscache_n_op_deferred_release;
45568+extern atomic_unchecked_t fscache_n_op_release;
45569+extern atomic_unchecked_t fscache_n_op_gc;
45570+extern atomic_unchecked_t fscache_n_op_cancelled;
45571+extern atomic_unchecked_t fscache_n_op_rejected;
45572+
45573+extern atomic_unchecked_t fscache_n_attr_changed;
45574+extern atomic_unchecked_t fscache_n_attr_changed_ok;
45575+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
45576+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
45577+extern atomic_unchecked_t fscache_n_attr_changed_calls;
45578+
45579+extern atomic_unchecked_t fscache_n_allocs;
45580+extern atomic_unchecked_t fscache_n_allocs_ok;
45581+extern atomic_unchecked_t fscache_n_allocs_wait;
45582+extern atomic_unchecked_t fscache_n_allocs_nobufs;
45583+extern atomic_unchecked_t fscache_n_allocs_intr;
45584+extern atomic_unchecked_t fscache_n_allocs_object_dead;
45585+extern atomic_unchecked_t fscache_n_alloc_ops;
45586+extern atomic_unchecked_t fscache_n_alloc_op_waits;
45587+
45588+extern atomic_unchecked_t fscache_n_retrievals;
45589+extern atomic_unchecked_t fscache_n_retrievals_ok;
45590+extern atomic_unchecked_t fscache_n_retrievals_wait;
45591+extern atomic_unchecked_t fscache_n_retrievals_nodata;
45592+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
45593+extern atomic_unchecked_t fscache_n_retrievals_intr;
45594+extern atomic_unchecked_t fscache_n_retrievals_nomem;
45595+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
45596+extern atomic_unchecked_t fscache_n_retrieval_ops;
45597+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
45598+
45599+extern atomic_unchecked_t fscache_n_stores;
45600+extern atomic_unchecked_t fscache_n_stores_ok;
45601+extern atomic_unchecked_t fscache_n_stores_again;
45602+extern atomic_unchecked_t fscache_n_stores_nobufs;
45603+extern atomic_unchecked_t fscache_n_stores_oom;
45604+extern atomic_unchecked_t fscache_n_store_ops;
45605+extern atomic_unchecked_t fscache_n_store_calls;
45606+extern atomic_unchecked_t fscache_n_store_pages;
45607+extern atomic_unchecked_t fscache_n_store_radix_deletes;
45608+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
45609+
45610+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
45611+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
45612+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
45613+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
45614+
45615+extern atomic_unchecked_t fscache_n_marks;
45616+extern atomic_unchecked_t fscache_n_uncaches;
45617+
45618+extern atomic_unchecked_t fscache_n_acquires;
45619+extern atomic_unchecked_t fscache_n_acquires_null;
45620+extern atomic_unchecked_t fscache_n_acquires_no_cache;
45621+extern atomic_unchecked_t fscache_n_acquires_ok;
45622+extern atomic_unchecked_t fscache_n_acquires_nobufs;
45623+extern atomic_unchecked_t fscache_n_acquires_oom;
45624+
45625+extern atomic_unchecked_t fscache_n_updates;
45626+extern atomic_unchecked_t fscache_n_updates_null;
45627+extern atomic_unchecked_t fscache_n_updates_run;
45628+
45629+extern atomic_unchecked_t fscache_n_relinquishes;
45630+extern atomic_unchecked_t fscache_n_relinquishes_null;
45631+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
45632+extern atomic_unchecked_t fscache_n_relinquishes_retire;
45633+
45634+extern atomic_unchecked_t fscache_n_cookie_index;
45635+extern atomic_unchecked_t fscache_n_cookie_data;
45636+extern atomic_unchecked_t fscache_n_cookie_special;
45637+
45638+extern atomic_unchecked_t fscache_n_object_alloc;
45639+extern atomic_unchecked_t fscache_n_object_no_alloc;
45640+extern atomic_unchecked_t fscache_n_object_lookups;
45641+extern atomic_unchecked_t fscache_n_object_lookups_negative;
45642+extern atomic_unchecked_t fscache_n_object_lookups_positive;
45643+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
45644+extern atomic_unchecked_t fscache_n_object_created;
45645+extern atomic_unchecked_t fscache_n_object_avail;
45646+extern atomic_unchecked_t fscache_n_object_dead;
45647+
45648+extern atomic_unchecked_t fscache_n_checkaux_none;
45649+extern atomic_unchecked_t fscache_n_checkaux_okay;
45650+extern atomic_unchecked_t fscache_n_checkaux_update;
45651+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
45652
45653 extern atomic_t fscache_n_cop_alloc_object;
45654 extern atomic_t fscache_n_cop_lookup_object;
45655@@ -247,6 +247,11 @@ static inline void fscache_stat(atomic_t
45656 atomic_inc(stat);
45657 }
45658
45659+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
45660+{
45661+ atomic_inc_unchecked(stat);
45662+}
45663+
45664 static inline void fscache_stat_d(atomic_t *stat)
45665 {
45666 atomic_dec(stat);
45667@@ -259,6 +264,7 @@ extern const struct file_operations fsca
45668
45669 #define __fscache_stat(stat) (NULL)
45670 #define fscache_stat(stat) do {} while (0)
45671+#define fscache_stat_unchecked(stat) do {} while (0)
45672 #define fscache_stat_d(stat) do {} while (0)
45673 #endif
45674
45675diff -urNp linux-2.6.32.48/fs/fscache/object.c linux-2.6.32.48/fs/fscache/object.c
45676--- linux-2.6.32.48/fs/fscache/object.c 2011-11-08 19:02:43.000000000 -0500
45677+++ linux-2.6.32.48/fs/fscache/object.c 2011-11-15 19:59:43.000000000 -0500
45678@@ -144,7 +144,7 @@ static void fscache_object_state_machine
45679 /* update the object metadata on disk */
45680 case FSCACHE_OBJECT_UPDATING:
45681 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
45682- fscache_stat(&fscache_n_updates_run);
45683+ fscache_stat_unchecked(&fscache_n_updates_run);
45684 fscache_stat(&fscache_n_cop_update_object);
45685 object->cache->ops->update_object(object);
45686 fscache_stat_d(&fscache_n_cop_update_object);
45687@@ -233,7 +233,7 @@ static void fscache_object_state_machine
45688 spin_lock(&object->lock);
45689 object->state = FSCACHE_OBJECT_DEAD;
45690 spin_unlock(&object->lock);
45691- fscache_stat(&fscache_n_object_dead);
45692+ fscache_stat_unchecked(&fscache_n_object_dead);
45693 goto terminal_transit;
45694
45695 /* handle the parent cache of this object being withdrawn from
45696@@ -248,7 +248,7 @@ static void fscache_object_state_machine
45697 spin_lock(&object->lock);
45698 object->state = FSCACHE_OBJECT_DEAD;
45699 spin_unlock(&object->lock);
45700- fscache_stat(&fscache_n_object_dead);
45701+ fscache_stat_unchecked(&fscache_n_object_dead);
45702 goto terminal_transit;
45703
45704 /* complain about the object being woken up once it is
45705@@ -492,7 +492,7 @@ static void fscache_lookup_object(struct
45706 parent->cookie->def->name, cookie->def->name,
45707 object->cache->tag->name);
45708
45709- fscache_stat(&fscache_n_object_lookups);
45710+ fscache_stat_unchecked(&fscache_n_object_lookups);
45711 fscache_stat(&fscache_n_cop_lookup_object);
45712 ret = object->cache->ops->lookup_object(object);
45713 fscache_stat_d(&fscache_n_cop_lookup_object);
45714@@ -503,7 +503,7 @@ static void fscache_lookup_object(struct
45715 if (ret == -ETIMEDOUT) {
45716 /* probably stuck behind another object, so move this one to
45717 * the back of the queue */
45718- fscache_stat(&fscache_n_object_lookups_timed_out);
45719+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
45720 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
45721 }
45722
45723@@ -526,7 +526,7 @@ void fscache_object_lookup_negative(stru
45724
45725 spin_lock(&object->lock);
45726 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
45727- fscache_stat(&fscache_n_object_lookups_negative);
45728+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
45729
45730 /* transit here to allow write requests to begin stacking up
45731 * and read requests to begin returning ENODATA */
45732@@ -572,7 +572,7 @@ void fscache_obtained_object(struct fsca
45733 * result, in which case there may be data available */
45734 spin_lock(&object->lock);
45735 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
45736- fscache_stat(&fscache_n_object_lookups_positive);
45737+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
45738
45739 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
45740
45741@@ -586,7 +586,7 @@ void fscache_obtained_object(struct fsca
45742 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
45743 } else {
45744 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
45745- fscache_stat(&fscache_n_object_created);
45746+ fscache_stat_unchecked(&fscache_n_object_created);
45747
45748 object->state = FSCACHE_OBJECT_AVAILABLE;
45749 spin_unlock(&object->lock);
45750@@ -633,7 +633,7 @@ static void fscache_object_available(str
45751 fscache_enqueue_dependents(object);
45752
45753 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
45754- fscache_stat(&fscache_n_object_avail);
45755+ fscache_stat_unchecked(&fscache_n_object_avail);
45756
45757 _leave("");
45758 }
45759@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
45760 enum fscache_checkaux result;
45761
45762 if (!object->cookie->def->check_aux) {
45763- fscache_stat(&fscache_n_checkaux_none);
45764+ fscache_stat_unchecked(&fscache_n_checkaux_none);
45765 return FSCACHE_CHECKAUX_OKAY;
45766 }
45767
45768@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
45769 switch (result) {
45770 /* entry okay as is */
45771 case FSCACHE_CHECKAUX_OKAY:
45772- fscache_stat(&fscache_n_checkaux_okay);
45773+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
45774 break;
45775
45776 /* entry requires update */
45777 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
45778- fscache_stat(&fscache_n_checkaux_update);
45779+ fscache_stat_unchecked(&fscache_n_checkaux_update);
45780 break;
45781
45782 /* entry requires deletion */
45783 case FSCACHE_CHECKAUX_OBSOLETE:
45784- fscache_stat(&fscache_n_checkaux_obsolete);
45785+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
45786 break;
45787
45788 default:
45789diff -urNp linux-2.6.32.48/fs/fscache/operation.c linux-2.6.32.48/fs/fscache/operation.c
45790--- linux-2.6.32.48/fs/fscache/operation.c 2011-11-08 19:02:43.000000000 -0500
45791+++ linux-2.6.32.48/fs/fscache/operation.c 2011-11-15 19:59:43.000000000 -0500
45792@@ -16,7 +16,7 @@
45793 #include <linux/seq_file.h>
45794 #include "internal.h"
45795
45796-atomic_t fscache_op_debug_id;
45797+atomic_unchecked_t fscache_op_debug_id;
45798 EXPORT_SYMBOL(fscache_op_debug_id);
45799
45800 /**
45801@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fs
45802 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
45803 ASSERTCMP(atomic_read(&op->usage), >, 0);
45804
45805- fscache_stat(&fscache_n_op_enqueue);
45806+ fscache_stat_unchecked(&fscache_n_op_enqueue);
45807 switch (op->flags & FSCACHE_OP_TYPE) {
45808 case FSCACHE_OP_FAST:
45809 _debug("queue fast");
45810@@ -76,7 +76,7 @@ static void fscache_run_op(struct fscach
45811 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
45812 if (op->processor)
45813 fscache_enqueue_operation(op);
45814- fscache_stat(&fscache_n_op_run);
45815+ fscache_stat_unchecked(&fscache_n_op_run);
45816 }
45817
45818 /*
45819@@ -107,11 +107,11 @@ int fscache_submit_exclusive_op(struct f
45820 if (object->n_ops > 0) {
45821 atomic_inc(&op->usage);
45822 list_add_tail(&op->pend_link, &object->pending_ops);
45823- fscache_stat(&fscache_n_op_pend);
45824+ fscache_stat_unchecked(&fscache_n_op_pend);
45825 } else if (!list_empty(&object->pending_ops)) {
45826 atomic_inc(&op->usage);
45827 list_add_tail(&op->pend_link, &object->pending_ops);
45828- fscache_stat(&fscache_n_op_pend);
45829+ fscache_stat_unchecked(&fscache_n_op_pend);
45830 fscache_start_operations(object);
45831 } else {
45832 ASSERTCMP(object->n_in_progress, ==, 0);
45833@@ -127,7 +127,7 @@ int fscache_submit_exclusive_op(struct f
45834 object->n_exclusive++; /* reads and writes must wait */
45835 atomic_inc(&op->usage);
45836 list_add_tail(&op->pend_link, &object->pending_ops);
45837- fscache_stat(&fscache_n_op_pend);
45838+ fscache_stat_unchecked(&fscache_n_op_pend);
45839 ret = 0;
45840 } else {
45841 /* not allowed to submit ops in any other state */
45842@@ -214,11 +214,11 @@ int fscache_submit_op(struct fscache_obj
45843 if (object->n_exclusive > 0) {
45844 atomic_inc(&op->usage);
45845 list_add_tail(&op->pend_link, &object->pending_ops);
45846- fscache_stat(&fscache_n_op_pend);
45847+ fscache_stat_unchecked(&fscache_n_op_pend);
45848 } else if (!list_empty(&object->pending_ops)) {
45849 atomic_inc(&op->usage);
45850 list_add_tail(&op->pend_link, &object->pending_ops);
45851- fscache_stat(&fscache_n_op_pend);
45852+ fscache_stat_unchecked(&fscache_n_op_pend);
45853 fscache_start_operations(object);
45854 } else {
45855 ASSERTCMP(object->n_exclusive, ==, 0);
45856@@ -230,12 +230,12 @@ int fscache_submit_op(struct fscache_obj
45857 object->n_ops++;
45858 atomic_inc(&op->usage);
45859 list_add_tail(&op->pend_link, &object->pending_ops);
45860- fscache_stat(&fscache_n_op_pend);
45861+ fscache_stat_unchecked(&fscache_n_op_pend);
45862 ret = 0;
45863 } else if (object->state == FSCACHE_OBJECT_DYING ||
45864 object->state == FSCACHE_OBJECT_LC_DYING ||
45865 object->state == FSCACHE_OBJECT_WITHDRAWING) {
45866- fscache_stat(&fscache_n_op_rejected);
45867+ fscache_stat_unchecked(&fscache_n_op_rejected);
45868 ret = -ENOBUFS;
45869 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
45870 fscache_report_unexpected_submission(object, op, ostate);
45871@@ -305,7 +305,7 @@ int fscache_cancel_op(struct fscache_ope
45872
45873 ret = -EBUSY;
45874 if (!list_empty(&op->pend_link)) {
45875- fscache_stat(&fscache_n_op_cancelled);
45876+ fscache_stat_unchecked(&fscache_n_op_cancelled);
45877 list_del_init(&op->pend_link);
45878 object->n_ops--;
45879 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
45880@@ -344,7 +344,7 @@ void fscache_put_operation(struct fscach
45881 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
45882 BUG();
45883
45884- fscache_stat(&fscache_n_op_release);
45885+ fscache_stat_unchecked(&fscache_n_op_release);
45886
45887 if (op->release) {
45888 op->release(op);
45889@@ -361,7 +361,7 @@ void fscache_put_operation(struct fscach
45890 * lock, and defer it otherwise */
45891 if (!spin_trylock(&object->lock)) {
45892 _debug("defer put");
45893- fscache_stat(&fscache_n_op_deferred_release);
45894+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
45895
45896 cache = object->cache;
45897 spin_lock(&cache->op_gc_list_lock);
45898@@ -423,7 +423,7 @@ void fscache_operation_gc(struct work_st
45899
45900 _debug("GC DEFERRED REL OBJ%x OP%x",
45901 object->debug_id, op->debug_id);
45902- fscache_stat(&fscache_n_op_gc);
45903+ fscache_stat_unchecked(&fscache_n_op_gc);
45904
45905 ASSERTCMP(atomic_read(&op->usage), ==, 0);
45906
45907diff -urNp linux-2.6.32.48/fs/fscache/page.c linux-2.6.32.48/fs/fscache/page.c
45908--- linux-2.6.32.48/fs/fscache/page.c 2011-11-08 19:02:43.000000000 -0500
45909+++ linux-2.6.32.48/fs/fscache/page.c 2011-11-15 19:59:43.000000000 -0500
45910@@ -59,7 +59,7 @@ bool __fscache_maybe_release_page(struct
45911 val = radix_tree_lookup(&cookie->stores, page->index);
45912 if (!val) {
45913 rcu_read_unlock();
45914- fscache_stat(&fscache_n_store_vmscan_not_storing);
45915+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
45916 __fscache_uncache_page(cookie, page);
45917 return true;
45918 }
45919@@ -89,11 +89,11 @@ bool __fscache_maybe_release_page(struct
45920 spin_unlock(&cookie->stores_lock);
45921
45922 if (xpage) {
45923- fscache_stat(&fscache_n_store_vmscan_cancelled);
45924- fscache_stat(&fscache_n_store_radix_deletes);
45925+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
45926+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
45927 ASSERTCMP(xpage, ==, page);
45928 } else {
45929- fscache_stat(&fscache_n_store_vmscan_gone);
45930+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
45931 }
45932
45933 wake_up_bit(&cookie->flags, 0);
45934@@ -106,7 +106,7 @@ page_busy:
45935 /* we might want to wait here, but that could deadlock the allocator as
45936 * the slow-work threads writing to the cache may all end up sleeping
45937 * on memory allocation */
45938- fscache_stat(&fscache_n_store_vmscan_busy);
45939+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
45940 return false;
45941 }
45942 EXPORT_SYMBOL(__fscache_maybe_release_page);
45943@@ -130,7 +130,7 @@ static void fscache_end_page_write(struc
45944 FSCACHE_COOKIE_STORING_TAG);
45945 if (!radix_tree_tag_get(&cookie->stores, page->index,
45946 FSCACHE_COOKIE_PENDING_TAG)) {
45947- fscache_stat(&fscache_n_store_radix_deletes);
45948+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
45949 xpage = radix_tree_delete(&cookie->stores, page->index);
45950 }
45951 spin_unlock(&cookie->stores_lock);
45952@@ -151,7 +151,7 @@ static void fscache_attr_changed_op(stru
45953
45954 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
45955
45956- fscache_stat(&fscache_n_attr_changed_calls);
45957+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
45958
45959 if (fscache_object_is_active(object)) {
45960 fscache_set_op_state(op, "CallFS");
45961@@ -178,11 +178,11 @@ int __fscache_attr_changed(struct fscach
45962
45963 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
45964
45965- fscache_stat(&fscache_n_attr_changed);
45966+ fscache_stat_unchecked(&fscache_n_attr_changed);
45967
45968 op = kzalloc(sizeof(*op), GFP_KERNEL);
45969 if (!op) {
45970- fscache_stat(&fscache_n_attr_changed_nomem);
45971+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
45972 _leave(" = -ENOMEM");
45973 return -ENOMEM;
45974 }
45975@@ -202,7 +202,7 @@ int __fscache_attr_changed(struct fscach
45976 if (fscache_submit_exclusive_op(object, op) < 0)
45977 goto nobufs;
45978 spin_unlock(&cookie->lock);
45979- fscache_stat(&fscache_n_attr_changed_ok);
45980+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
45981 fscache_put_operation(op);
45982 _leave(" = 0");
45983 return 0;
45984@@ -210,7 +210,7 @@ int __fscache_attr_changed(struct fscach
45985 nobufs:
45986 spin_unlock(&cookie->lock);
45987 kfree(op);
45988- fscache_stat(&fscache_n_attr_changed_nobufs);
45989+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
45990 _leave(" = %d", -ENOBUFS);
45991 return -ENOBUFS;
45992 }
45993@@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache
45994 /* allocate a retrieval operation and attempt to submit it */
45995 op = kzalloc(sizeof(*op), GFP_NOIO);
45996 if (!op) {
45997- fscache_stat(&fscache_n_retrievals_nomem);
45998+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
45999 return NULL;
46000 }
46001
46002@@ -294,13 +294,13 @@ static int fscache_wait_for_deferred_loo
46003 return 0;
46004 }
46005
46006- fscache_stat(&fscache_n_retrievals_wait);
46007+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
46008
46009 jif = jiffies;
46010 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
46011 fscache_wait_bit_interruptible,
46012 TASK_INTERRUPTIBLE) != 0) {
46013- fscache_stat(&fscache_n_retrievals_intr);
46014+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
46015 _leave(" = -ERESTARTSYS");
46016 return -ERESTARTSYS;
46017 }
46018@@ -318,8 +318,8 @@ static int fscache_wait_for_deferred_loo
46019 */
46020 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
46021 struct fscache_retrieval *op,
46022- atomic_t *stat_op_waits,
46023- atomic_t *stat_object_dead)
46024+ atomic_unchecked_t *stat_op_waits,
46025+ atomic_unchecked_t *stat_object_dead)
46026 {
46027 int ret;
46028
46029@@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_ac
46030 goto check_if_dead;
46031
46032 _debug(">>> WT");
46033- fscache_stat(stat_op_waits);
46034+ fscache_stat_unchecked(stat_op_waits);
46035 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
46036 fscache_wait_bit_interruptible,
46037 TASK_INTERRUPTIBLE) < 0) {
46038@@ -344,7 +344,7 @@ static int fscache_wait_for_retrieval_ac
46039
46040 check_if_dead:
46041 if (unlikely(fscache_object_is_dead(object))) {
46042- fscache_stat(stat_object_dead);
46043+ fscache_stat_unchecked(stat_object_dead);
46044 return -ENOBUFS;
46045 }
46046 return 0;
46047@@ -371,7 +371,7 @@ int __fscache_read_or_alloc_page(struct
46048
46049 _enter("%p,%p,,,", cookie, page);
46050
46051- fscache_stat(&fscache_n_retrievals);
46052+ fscache_stat_unchecked(&fscache_n_retrievals);
46053
46054 if (hlist_empty(&cookie->backing_objects))
46055 goto nobufs;
46056@@ -405,7 +405,7 @@ int __fscache_read_or_alloc_page(struct
46057 goto nobufs_unlock;
46058 spin_unlock(&cookie->lock);
46059
46060- fscache_stat(&fscache_n_retrieval_ops);
46061+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
46062
46063 /* pin the netfs read context in case we need to do the actual netfs
46064 * read because we've encountered a cache read failure */
46065@@ -435,15 +435,15 @@ int __fscache_read_or_alloc_page(struct
46066
46067 error:
46068 if (ret == -ENOMEM)
46069- fscache_stat(&fscache_n_retrievals_nomem);
46070+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
46071 else if (ret == -ERESTARTSYS)
46072- fscache_stat(&fscache_n_retrievals_intr);
46073+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
46074 else if (ret == -ENODATA)
46075- fscache_stat(&fscache_n_retrievals_nodata);
46076+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
46077 else if (ret < 0)
46078- fscache_stat(&fscache_n_retrievals_nobufs);
46079+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
46080 else
46081- fscache_stat(&fscache_n_retrievals_ok);
46082+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
46083
46084 fscache_put_retrieval(op);
46085 _leave(" = %d", ret);
46086@@ -453,7 +453,7 @@ nobufs_unlock:
46087 spin_unlock(&cookie->lock);
46088 kfree(op);
46089 nobufs:
46090- fscache_stat(&fscache_n_retrievals_nobufs);
46091+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
46092 _leave(" = -ENOBUFS");
46093 return -ENOBUFS;
46094 }
46095@@ -491,7 +491,7 @@ int __fscache_read_or_alloc_pages(struct
46096
46097 _enter("%p,,%d,,,", cookie, *nr_pages);
46098
46099- fscache_stat(&fscache_n_retrievals);
46100+ fscache_stat_unchecked(&fscache_n_retrievals);
46101
46102 if (hlist_empty(&cookie->backing_objects))
46103 goto nobufs;
46104@@ -522,7 +522,7 @@ int __fscache_read_or_alloc_pages(struct
46105 goto nobufs_unlock;
46106 spin_unlock(&cookie->lock);
46107
46108- fscache_stat(&fscache_n_retrieval_ops);
46109+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
46110
46111 /* pin the netfs read context in case we need to do the actual netfs
46112 * read because we've encountered a cache read failure */
46113@@ -552,15 +552,15 @@ int __fscache_read_or_alloc_pages(struct
46114
46115 error:
46116 if (ret == -ENOMEM)
46117- fscache_stat(&fscache_n_retrievals_nomem);
46118+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
46119 else if (ret == -ERESTARTSYS)
46120- fscache_stat(&fscache_n_retrievals_intr);
46121+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
46122 else if (ret == -ENODATA)
46123- fscache_stat(&fscache_n_retrievals_nodata);
46124+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
46125 else if (ret < 0)
46126- fscache_stat(&fscache_n_retrievals_nobufs);
46127+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
46128 else
46129- fscache_stat(&fscache_n_retrievals_ok);
46130+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
46131
46132 fscache_put_retrieval(op);
46133 _leave(" = %d", ret);
46134@@ -570,7 +570,7 @@ nobufs_unlock:
46135 spin_unlock(&cookie->lock);
46136 kfree(op);
46137 nobufs:
46138- fscache_stat(&fscache_n_retrievals_nobufs);
46139+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
46140 _leave(" = -ENOBUFS");
46141 return -ENOBUFS;
46142 }
46143@@ -594,7 +594,7 @@ int __fscache_alloc_page(struct fscache_
46144
46145 _enter("%p,%p,,,", cookie, page);
46146
46147- fscache_stat(&fscache_n_allocs);
46148+ fscache_stat_unchecked(&fscache_n_allocs);
46149
46150 if (hlist_empty(&cookie->backing_objects))
46151 goto nobufs;
46152@@ -621,7 +621,7 @@ int __fscache_alloc_page(struct fscache_
46153 goto nobufs_unlock;
46154 spin_unlock(&cookie->lock);
46155
46156- fscache_stat(&fscache_n_alloc_ops);
46157+ fscache_stat_unchecked(&fscache_n_alloc_ops);
46158
46159 ret = fscache_wait_for_retrieval_activation(
46160 object, op,
46161@@ -637,11 +637,11 @@ int __fscache_alloc_page(struct fscache_
46162
46163 error:
46164 if (ret == -ERESTARTSYS)
46165- fscache_stat(&fscache_n_allocs_intr);
46166+ fscache_stat_unchecked(&fscache_n_allocs_intr);
46167 else if (ret < 0)
46168- fscache_stat(&fscache_n_allocs_nobufs);
46169+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
46170 else
46171- fscache_stat(&fscache_n_allocs_ok);
46172+ fscache_stat_unchecked(&fscache_n_allocs_ok);
46173
46174 fscache_put_retrieval(op);
46175 _leave(" = %d", ret);
46176@@ -651,7 +651,7 @@ nobufs_unlock:
46177 spin_unlock(&cookie->lock);
46178 kfree(op);
46179 nobufs:
46180- fscache_stat(&fscache_n_allocs_nobufs);
46181+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
46182 _leave(" = -ENOBUFS");
46183 return -ENOBUFS;
46184 }
46185@@ -694,7 +694,7 @@ static void fscache_write_op(struct fsca
46186
46187 spin_lock(&cookie->stores_lock);
46188
46189- fscache_stat(&fscache_n_store_calls);
46190+ fscache_stat_unchecked(&fscache_n_store_calls);
46191
46192 /* find a page to store */
46193 page = NULL;
46194@@ -705,7 +705,7 @@ static void fscache_write_op(struct fsca
46195 page = results[0];
46196 _debug("gang %d [%lx]", n, page->index);
46197 if (page->index > op->store_limit) {
46198- fscache_stat(&fscache_n_store_pages_over_limit);
46199+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
46200 goto superseded;
46201 }
46202
46203@@ -721,7 +721,7 @@ static void fscache_write_op(struct fsca
46204
46205 if (page) {
46206 fscache_set_op_state(&op->op, "Store");
46207- fscache_stat(&fscache_n_store_pages);
46208+ fscache_stat_unchecked(&fscache_n_store_pages);
46209 fscache_stat(&fscache_n_cop_write_page);
46210 ret = object->cache->ops->write_page(op, page);
46211 fscache_stat_d(&fscache_n_cop_write_page);
46212@@ -792,7 +792,7 @@ int __fscache_write_page(struct fscache_
46213 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
46214 ASSERT(PageFsCache(page));
46215
46216- fscache_stat(&fscache_n_stores);
46217+ fscache_stat_unchecked(&fscache_n_stores);
46218
46219 op = kzalloc(sizeof(*op), GFP_NOIO);
46220 if (!op)
46221@@ -844,7 +844,7 @@ int __fscache_write_page(struct fscache_
46222 spin_unlock(&cookie->stores_lock);
46223 spin_unlock(&object->lock);
46224
46225- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
46226+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
46227 op->store_limit = object->store_limit;
46228
46229 if (fscache_submit_op(object, &op->op) < 0)
46230@@ -852,8 +852,8 @@ int __fscache_write_page(struct fscache_
46231
46232 spin_unlock(&cookie->lock);
46233 radix_tree_preload_end();
46234- fscache_stat(&fscache_n_store_ops);
46235- fscache_stat(&fscache_n_stores_ok);
46236+ fscache_stat_unchecked(&fscache_n_store_ops);
46237+ fscache_stat_unchecked(&fscache_n_stores_ok);
46238
46239 /* the slow work queue now carries its own ref on the object */
46240 fscache_put_operation(&op->op);
46241@@ -861,14 +861,14 @@ int __fscache_write_page(struct fscache_
46242 return 0;
46243
46244 already_queued:
46245- fscache_stat(&fscache_n_stores_again);
46246+ fscache_stat_unchecked(&fscache_n_stores_again);
46247 already_pending:
46248 spin_unlock(&cookie->stores_lock);
46249 spin_unlock(&object->lock);
46250 spin_unlock(&cookie->lock);
46251 radix_tree_preload_end();
46252 kfree(op);
46253- fscache_stat(&fscache_n_stores_ok);
46254+ fscache_stat_unchecked(&fscache_n_stores_ok);
46255 _leave(" = 0");
46256 return 0;
46257
46258@@ -886,14 +886,14 @@ nobufs:
46259 spin_unlock(&cookie->lock);
46260 radix_tree_preload_end();
46261 kfree(op);
46262- fscache_stat(&fscache_n_stores_nobufs);
46263+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
46264 _leave(" = -ENOBUFS");
46265 return -ENOBUFS;
46266
46267 nomem_free:
46268 kfree(op);
46269 nomem:
46270- fscache_stat(&fscache_n_stores_oom);
46271+ fscache_stat_unchecked(&fscache_n_stores_oom);
46272 _leave(" = -ENOMEM");
46273 return -ENOMEM;
46274 }
46275@@ -911,7 +911,7 @@ void __fscache_uncache_page(struct fscac
46276 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
46277 ASSERTCMP(page, !=, NULL);
46278
46279- fscache_stat(&fscache_n_uncaches);
46280+ fscache_stat_unchecked(&fscache_n_uncaches);
46281
46282 /* cache withdrawal may beat us to it */
46283 if (!PageFsCache(page))
46284@@ -964,7 +964,7 @@ void fscache_mark_pages_cached(struct fs
46285 unsigned long loop;
46286
46287 #ifdef CONFIG_FSCACHE_STATS
46288- atomic_add(pagevec->nr, &fscache_n_marks);
46289+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
46290 #endif
46291
46292 for (loop = 0; loop < pagevec->nr; loop++) {
46293diff -urNp linux-2.6.32.48/fs/fscache/stats.c linux-2.6.32.48/fs/fscache/stats.c
46294--- linux-2.6.32.48/fs/fscache/stats.c 2011-11-08 19:02:43.000000000 -0500
46295+++ linux-2.6.32.48/fs/fscache/stats.c 2011-11-15 19:59:43.000000000 -0500
46296@@ -18,95 +18,95 @@
46297 /*
46298 * operation counters
46299 */
46300-atomic_t fscache_n_op_pend;
46301-atomic_t fscache_n_op_run;
46302-atomic_t fscache_n_op_enqueue;
46303-atomic_t fscache_n_op_requeue;
46304-atomic_t fscache_n_op_deferred_release;
46305-atomic_t fscache_n_op_release;
46306-atomic_t fscache_n_op_gc;
46307-atomic_t fscache_n_op_cancelled;
46308-atomic_t fscache_n_op_rejected;
46309-
46310-atomic_t fscache_n_attr_changed;
46311-atomic_t fscache_n_attr_changed_ok;
46312-atomic_t fscache_n_attr_changed_nobufs;
46313-atomic_t fscache_n_attr_changed_nomem;
46314-atomic_t fscache_n_attr_changed_calls;
46315-
46316-atomic_t fscache_n_allocs;
46317-atomic_t fscache_n_allocs_ok;
46318-atomic_t fscache_n_allocs_wait;
46319-atomic_t fscache_n_allocs_nobufs;
46320-atomic_t fscache_n_allocs_intr;
46321-atomic_t fscache_n_allocs_object_dead;
46322-atomic_t fscache_n_alloc_ops;
46323-atomic_t fscache_n_alloc_op_waits;
46324-
46325-atomic_t fscache_n_retrievals;
46326-atomic_t fscache_n_retrievals_ok;
46327-atomic_t fscache_n_retrievals_wait;
46328-atomic_t fscache_n_retrievals_nodata;
46329-atomic_t fscache_n_retrievals_nobufs;
46330-atomic_t fscache_n_retrievals_intr;
46331-atomic_t fscache_n_retrievals_nomem;
46332-atomic_t fscache_n_retrievals_object_dead;
46333-atomic_t fscache_n_retrieval_ops;
46334-atomic_t fscache_n_retrieval_op_waits;
46335-
46336-atomic_t fscache_n_stores;
46337-atomic_t fscache_n_stores_ok;
46338-atomic_t fscache_n_stores_again;
46339-atomic_t fscache_n_stores_nobufs;
46340-atomic_t fscache_n_stores_oom;
46341-atomic_t fscache_n_store_ops;
46342-atomic_t fscache_n_store_calls;
46343-atomic_t fscache_n_store_pages;
46344-atomic_t fscache_n_store_radix_deletes;
46345-atomic_t fscache_n_store_pages_over_limit;
46346-
46347-atomic_t fscache_n_store_vmscan_not_storing;
46348-atomic_t fscache_n_store_vmscan_gone;
46349-atomic_t fscache_n_store_vmscan_busy;
46350-atomic_t fscache_n_store_vmscan_cancelled;
46351-
46352-atomic_t fscache_n_marks;
46353-atomic_t fscache_n_uncaches;
46354-
46355-atomic_t fscache_n_acquires;
46356-atomic_t fscache_n_acquires_null;
46357-atomic_t fscache_n_acquires_no_cache;
46358-atomic_t fscache_n_acquires_ok;
46359-atomic_t fscache_n_acquires_nobufs;
46360-atomic_t fscache_n_acquires_oom;
46361-
46362-atomic_t fscache_n_updates;
46363-atomic_t fscache_n_updates_null;
46364-atomic_t fscache_n_updates_run;
46365-
46366-atomic_t fscache_n_relinquishes;
46367-atomic_t fscache_n_relinquishes_null;
46368-atomic_t fscache_n_relinquishes_waitcrt;
46369-atomic_t fscache_n_relinquishes_retire;
46370-
46371-atomic_t fscache_n_cookie_index;
46372-atomic_t fscache_n_cookie_data;
46373-atomic_t fscache_n_cookie_special;
46374-
46375-atomic_t fscache_n_object_alloc;
46376-atomic_t fscache_n_object_no_alloc;
46377-atomic_t fscache_n_object_lookups;
46378-atomic_t fscache_n_object_lookups_negative;
46379-atomic_t fscache_n_object_lookups_positive;
46380-atomic_t fscache_n_object_lookups_timed_out;
46381-atomic_t fscache_n_object_created;
46382-atomic_t fscache_n_object_avail;
46383-atomic_t fscache_n_object_dead;
46384-
46385-atomic_t fscache_n_checkaux_none;
46386-atomic_t fscache_n_checkaux_okay;
46387-atomic_t fscache_n_checkaux_update;
46388-atomic_t fscache_n_checkaux_obsolete;
46389+atomic_unchecked_t fscache_n_op_pend;
46390+atomic_unchecked_t fscache_n_op_run;
46391+atomic_unchecked_t fscache_n_op_enqueue;
46392+atomic_unchecked_t fscache_n_op_requeue;
46393+atomic_unchecked_t fscache_n_op_deferred_release;
46394+atomic_unchecked_t fscache_n_op_release;
46395+atomic_unchecked_t fscache_n_op_gc;
46396+atomic_unchecked_t fscache_n_op_cancelled;
46397+atomic_unchecked_t fscache_n_op_rejected;
46398+
46399+atomic_unchecked_t fscache_n_attr_changed;
46400+atomic_unchecked_t fscache_n_attr_changed_ok;
46401+atomic_unchecked_t fscache_n_attr_changed_nobufs;
46402+atomic_unchecked_t fscache_n_attr_changed_nomem;
46403+atomic_unchecked_t fscache_n_attr_changed_calls;
46404+
46405+atomic_unchecked_t fscache_n_allocs;
46406+atomic_unchecked_t fscache_n_allocs_ok;
46407+atomic_unchecked_t fscache_n_allocs_wait;
46408+atomic_unchecked_t fscache_n_allocs_nobufs;
46409+atomic_unchecked_t fscache_n_allocs_intr;
46410+atomic_unchecked_t fscache_n_allocs_object_dead;
46411+atomic_unchecked_t fscache_n_alloc_ops;
46412+atomic_unchecked_t fscache_n_alloc_op_waits;
46413+
46414+atomic_unchecked_t fscache_n_retrievals;
46415+atomic_unchecked_t fscache_n_retrievals_ok;
46416+atomic_unchecked_t fscache_n_retrievals_wait;
46417+atomic_unchecked_t fscache_n_retrievals_nodata;
46418+atomic_unchecked_t fscache_n_retrievals_nobufs;
46419+atomic_unchecked_t fscache_n_retrievals_intr;
46420+atomic_unchecked_t fscache_n_retrievals_nomem;
46421+atomic_unchecked_t fscache_n_retrievals_object_dead;
46422+atomic_unchecked_t fscache_n_retrieval_ops;
46423+atomic_unchecked_t fscache_n_retrieval_op_waits;
46424+
46425+atomic_unchecked_t fscache_n_stores;
46426+atomic_unchecked_t fscache_n_stores_ok;
46427+atomic_unchecked_t fscache_n_stores_again;
46428+atomic_unchecked_t fscache_n_stores_nobufs;
46429+atomic_unchecked_t fscache_n_stores_oom;
46430+atomic_unchecked_t fscache_n_store_ops;
46431+atomic_unchecked_t fscache_n_store_calls;
46432+atomic_unchecked_t fscache_n_store_pages;
46433+atomic_unchecked_t fscache_n_store_radix_deletes;
46434+atomic_unchecked_t fscache_n_store_pages_over_limit;
46435+
46436+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
46437+atomic_unchecked_t fscache_n_store_vmscan_gone;
46438+atomic_unchecked_t fscache_n_store_vmscan_busy;
46439+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
46440+
46441+atomic_unchecked_t fscache_n_marks;
46442+atomic_unchecked_t fscache_n_uncaches;
46443+
46444+atomic_unchecked_t fscache_n_acquires;
46445+atomic_unchecked_t fscache_n_acquires_null;
46446+atomic_unchecked_t fscache_n_acquires_no_cache;
46447+atomic_unchecked_t fscache_n_acquires_ok;
46448+atomic_unchecked_t fscache_n_acquires_nobufs;
46449+atomic_unchecked_t fscache_n_acquires_oom;
46450+
46451+atomic_unchecked_t fscache_n_updates;
46452+atomic_unchecked_t fscache_n_updates_null;
46453+atomic_unchecked_t fscache_n_updates_run;
46454+
46455+atomic_unchecked_t fscache_n_relinquishes;
46456+atomic_unchecked_t fscache_n_relinquishes_null;
46457+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
46458+atomic_unchecked_t fscache_n_relinquishes_retire;
46459+
46460+atomic_unchecked_t fscache_n_cookie_index;
46461+atomic_unchecked_t fscache_n_cookie_data;
46462+atomic_unchecked_t fscache_n_cookie_special;
46463+
46464+atomic_unchecked_t fscache_n_object_alloc;
46465+atomic_unchecked_t fscache_n_object_no_alloc;
46466+atomic_unchecked_t fscache_n_object_lookups;
46467+atomic_unchecked_t fscache_n_object_lookups_negative;
46468+atomic_unchecked_t fscache_n_object_lookups_positive;
46469+atomic_unchecked_t fscache_n_object_lookups_timed_out;
46470+atomic_unchecked_t fscache_n_object_created;
46471+atomic_unchecked_t fscache_n_object_avail;
46472+atomic_unchecked_t fscache_n_object_dead;
46473+
46474+atomic_unchecked_t fscache_n_checkaux_none;
46475+atomic_unchecked_t fscache_n_checkaux_okay;
46476+atomic_unchecked_t fscache_n_checkaux_update;
46477+atomic_unchecked_t fscache_n_checkaux_obsolete;
46478
46479 atomic_t fscache_n_cop_alloc_object;
46480 atomic_t fscache_n_cop_lookup_object;
46481@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
46482 seq_puts(m, "FS-Cache statistics\n");
46483
46484 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
46485- atomic_read(&fscache_n_cookie_index),
46486- atomic_read(&fscache_n_cookie_data),
46487- atomic_read(&fscache_n_cookie_special));
46488+ atomic_read_unchecked(&fscache_n_cookie_index),
46489+ atomic_read_unchecked(&fscache_n_cookie_data),
46490+ atomic_read_unchecked(&fscache_n_cookie_special));
46491
46492 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
46493- atomic_read(&fscache_n_object_alloc),
46494- atomic_read(&fscache_n_object_no_alloc),
46495- atomic_read(&fscache_n_object_avail),
46496- atomic_read(&fscache_n_object_dead));
46497+ atomic_read_unchecked(&fscache_n_object_alloc),
46498+ atomic_read_unchecked(&fscache_n_object_no_alloc),
46499+ atomic_read_unchecked(&fscache_n_object_avail),
46500+ atomic_read_unchecked(&fscache_n_object_dead));
46501 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
46502- atomic_read(&fscache_n_checkaux_none),
46503- atomic_read(&fscache_n_checkaux_okay),
46504- atomic_read(&fscache_n_checkaux_update),
46505- atomic_read(&fscache_n_checkaux_obsolete));
46506+ atomic_read_unchecked(&fscache_n_checkaux_none),
46507+ atomic_read_unchecked(&fscache_n_checkaux_okay),
46508+ atomic_read_unchecked(&fscache_n_checkaux_update),
46509+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
46510
46511 seq_printf(m, "Pages : mrk=%u unc=%u\n",
46512- atomic_read(&fscache_n_marks),
46513- atomic_read(&fscache_n_uncaches));
46514+ atomic_read_unchecked(&fscache_n_marks),
46515+ atomic_read_unchecked(&fscache_n_uncaches));
46516
46517 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
46518 " oom=%u\n",
46519- atomic_read(&fscache_n_acquires),
46520- atomic_read(&fscache_n_acquires_null),
46521- atomic_read(&fscache_n_acquires_no_cache),
46522- atomic_read(&fscache_n_acquires_ok),
46523- atomic_read(&fscache_n_acquires_nobufs),
46524- atomic_read(&fscache_n_acquires_oom));
46525+ atomic_read_unchecked(&fscache_n_acquires),
46526+ atomic_read_unchecked(&fscache_n_acquires_null),
46527+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
46528+ atomic_read_unchecked(&fscache_n_acquires_ok),
46529+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
46530+ atomic_read_unchecked(&fscache_n_acquires_oom));
46531
46532 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
46533- atomic_read(&fscache_n_object_lookups),
46534- atomic_read(&fscache_n_object_lookups_negative),
46535- atomic_read(&fscache_n_object_lookups_positive),
46536- atomic_read(&fscache_n_object_lookups_timed_out),
46537- atomic_read(&fscache_n_object_created));
46538+ atomic_read_unchecked(&fscache_n_object_lookups),
46539+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
46540+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
46541+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out),
46542+ atomic_read_unchecked(&fscache_n_object_created));
46543
46544 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
46545- atomic_read(&fscache_n_updates),
46546- atomic_read(&fscache_n_updates_null),
46547- atomic_read(&fscache_n_updates_run));
46548+ atomic_read_unchecked(&fscache_n_updates),
46549+ atomic_read_unchecked(&fscache_n_updates_null),
46550+ atomic_read_unchecked(&fscache_n_updates_run));
46551
46552 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
46553- atomic_read(&fscache_n_relinquishes),
46554- atomic_read(&fscache_n_relinquishes_null),
46555- atomic_read(&fscache_n_relinquishes_waitcrt),
46556- atomic_read(&fscache_n_relinquishes_retire));
46557+ atomic_read_unchecked(&fscache_n_relinquishes),
46558+ atomic_read_unchecked(&fscache_n_relinquishes_null),
46559+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
46560+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
46561
46562 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
46563- atomic_read(&fscache_n_attr_changed),
46564- atomic_read(&fscache_n_attr_changed_ok),
46565- atomic_read(&fscache_n_attr_changed_nobufs),
46566- atomic_read(&fscache_n_attr_changed_nomem),
46567- atomic_read(&fscache_n_attr_changed_calls));
46568+ atomic_read_unchecked(&fscache_n_attr_changed),
46569+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
46570+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
46571+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
46572+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
46573
46574 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
46575- atomic_read(&fscache_n_allocs),
46576- atomic_read(&fscache_n_allocs_ok),
46577- atomic_read(&fscache_n_allocs_wait),
46578- atomic_read(&fscache_n_allocs_nobufs),
46579- atomic_read(&fscache_n_allocs_intr));
46580+ atomic_read_unchecked(&fscache_n_allocs),
46581+ atomic_read_unchecked(&fscache_n_allocs_ok),
46582+ atomic_read_unchecked(&fscache_n_allocs_wait),
46583+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
46584+ atomic_read_unchecked(&fscache_n_allocs_intr));
46585 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
46586- atomic_read(&fscache_n_alloc_ops),
46587- atomic_read(&fscache_n_alloc_op_waits),
46588- atomic_read(&fscache_n_allocs_object_dead));
46589+ atomic_read_unchecked(&fscache_n_alloc_ops),
46590+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
46591+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
46592
46593 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
46594 " int=%u oom=%u\n",
46595- atomic_read(&fscache_n_retrievals),
46596- atomic_read(&fscache_n_retrievals_ok),
46597- atomic_read(&fscache_n_retrievals_wait),
46598- atomic_read(&fscache_n_retrievals_nodata),
46599- atomic_read(&fscache_n_retrievals_nobufs),
46600- atomic_read(&fscache_n_retrievals_intr),
46601- atomic_read(&fscache_n_retrievals_nomem));
46602+ atomic_read_unchecked(&fscache_n_retrievals),
46603+ atomic_read_unchecked(&fscache_n_retrievals_ok),
46604+ atomic_read_unchecked(&fscache_n_retrievals_wait),
46605+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
46606+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
46607+ atomic_read_unchecked(&fscache_n_retrievals_intr),
46608+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
46609 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
46610- atomic_read(&fscache_n_retrieval_ops),
46611- atomic_read(&fscache_n_retrieval_op_waits),
46612- atomic_read(&fscache_n_retrievals_object_dead));
46613+ atomic_read_unchecked(&fscache_n_retrieval_ops),
46614+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
46615+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
46616
46617 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
46618- atomic_read(&fscache_n_stores),
46619- atomic_read(&fscache_n_stores_ok),
46620- atomic_read(&fscache_n_stores_again),
46621- atomic_read(&fscache_n_stores_nobufs),
46622- atomic_read(&fscache_n_stores_oom));
46623+ atomic_read_unchecked(&fscache_n_stores),
46624+ atomic_read_unchecked(&fscache_n_stores_ok),
46625+ atomic_read_unchecked(&fscache_n_stores_again),
46626+ atomic_read_unchecked(&fscache_n_stores_nobufs),
46627+ atomic_read_unchecked(&fscache_n_stores_oom));
46628 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
46629- atomic_read(&fscache_n_store_ops),
46630- atomic_read(&fscache_n_store_calls),
46631- atomic_read(&fscache_n_store_pages),
46632- atomic_read(&fscache_n_store_radix_deletes),
46633- atomic_read(&fscache_n_store_pages_over_limit));
46634+ atomic_read_unchecked(&fscache_n_store_ops),
46635+ atomic_read_unchecked(&fscache_n_store_calls),
46636+ atomic_read_unchecked(&fscache_n_store_pages),
46637+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
46638+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
46639
46640 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
46641- atomic_read(&fscache_n_store_vmscan_not_storing),
46642- atomic_read(&fscache_n_store_vmscan_gone),
46643- atomic_read(&fscache_n_store_vmscan_busy),
46644- atomic_read(&fscache_n_store_vmscan_cancelled));
46645+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
46646+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
46647+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
46648+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
46649
46650 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
46651- atomic_read(&fscache_n_op_pend),
46652- atomic_read(&fscache_n_op_run),
46653- atomic_read(&fscache_n_op_enqueue),
46654- atomic_read(&fscache_n_op_cancelled),
46655- atomic_read(&fscache_n_op_rejected));
46656+ atomic_read_unchecked(&fscache_n_op_pend),
46657+ atomic_read_unchecked(&fscache_n_op_run),
46658+ atomic_read_unchecked(&fscache_n_op_enqueue),
46659+ atomic_read_unchecked(&fscache_n_op_cancelled),
46660+ atomic_read_unchecked(&fscache_n_op_rejected));
46661 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
46662- atomic_read(&fscache_n_op_deferred_release),
46663- atomic_read(&fscache_n_op_release),
46664- atomic_read(&fscache_n_op_gc));
46665+ atomic_read_unchecked(&fscache_n_op_deferred_release),
46666+ atomic_read_unchecked(&fscache_n_op_release),
46667+ atomic_read_unchecked(&fscache_n_op_gc));
46668
46669 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
46670 atomic_read(&fscache_n_cop_alloc_object),
46671diff -urNp linux-2.6.32.48/fs/fs_struct.c linux-2.6.32.48/fs/fs_struct.c
46672--- linux-2.6.32.48/fs/fs_struct.c 2011-11-08 19:02:43.000000000 -0500
46673+++ linux-2.6.32.48/fs/fs_struct.c 2011-11-15 19:59:43.000000000 -0500
46674@@ -4,6 +4,7 @@
46675 #include <linux/path.h>
46676 #include <linux/slab.h>
46677 #include <linux/fs_struct.h>
46678+#include <linux/grsecurity.h>
46679
46680 /*
46681 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
46682@@ -17,6 +18,7 @@ void set_fs_root(struct fs_struct *fs, s
46683 old_root = fs->root;
46684 fs->root = *path;
46685 path_get(path);
46686+ gr_set_chroot_entries(current, path);
46687 write_unlock(&fs->lock);
46688 if (old_root.dentry)
46689 path_put(&old_root);
46690@@ -56,6 +58,7 @@ void chroot_fs_refs(struct path *old_roo
46691 && fs->root.mnt == old_root->mnt) {
46692 path_get(new_root);
46693 fs->root = *new_root;
46694+ gr_set_chroot_entries(p, new_root);
46695 count++;
46696 }
46697 if (fs->pwd.dentry == old_root->dentry
46698@@ -89,7 +92,8 @@ void exit_fs(struct task_struct *tsk)
46699 task_lock(tsk);
46700 write_lock(&fs->lock);
46701 tsk->fs = NULL;
46702- kill = !--fs->users;
46703+ gr_clear_chroot_entries(tsk);
46704+ kill = !atomic_dec_return(&fs->users);
46705 write_unlock(&fs->lock);
46706 task_unlock(tsk);
46707 if (kill)
46708@@ -102,7 +106,7 @@ struct fs_struct *copy_fs_struct(struct
46709 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
46710 /* We don't need to lock fs - think why ;-) */
46711 if (fs) {
46712- fs->users = 1;
46713+ atomic_set(&fs->users, 1);
46714 fs->in_exec = 0;
46715 rwlock_init(&fs->lock);
46716 fs->umask = old->umask;
46717@@ -127,8 +131,9 @@ int unshare_fs_struct(void)
46718
46719 task_lock(current);
46720 write_lock(&fs->lock);
46721- kill = !--fs->users;
46722+ kill = !atomic_dec_return(&fs->users);
46723 current->fs = new_fs;
46724+ gr_set_chroot_entries(current, &new_fs->root);
46725 write_unlock(&fs->lock);
46726 task_unlock(current);
46727
46728@@ -147,7 +152,7 @@ EXPORT_SYMBOL(current_umask);
46729
46730 /* to be mentioned only in INIT_TASK */
46731 struct fs_struct init_fs = {
46732- .users = 1,
46733+ .users = ATOMIC_INIT(1),
46734 .lock = __RW_LOCK_UNLOCKED(init_fs.lock),
46735 .umask = 0022,
46736 };
46737@@ -162,12 +167,13 @@ void daemonize_fs_struct(void)
46738 task_lock(current);
46739
46740 write_lock(&init_fs.lock);
46741- init_fs.users++;
46742+ atomic_inc(&init_fs.users);
46743 write_unlock(&init_fs.lock);
46744
46745 write_lock(&fs->lock);
46746 current->fs = &init_fs;
46747- kill = !--fs->users;
46748+ gr_set_chroot_entries(current, &current->fs->root);
46749+ kill = !atomic_dec_return(&fs->users);
46750 write_unlock(&fs->lock);
46751
46752 task_unlock(current);
46753diff -urNp linux-2.6.32.48/fs/fuse/cuse.c linux-2.6.32.48/fs/fuse/cuse.c
46754--- linux-2.6.32.48/fs/fuse/cuse.c 2011-11-08 19:02:43.000000000 -0500
46755+++ linux-2.6.32.48/fs/fuse/cuse.c 2011-11-15 19:59:43.000000000 -0500
46756@@ -576,10 +576,12 @@ static int __init cuse_init(void)
46757 INIT_LIST_HEAD(&cuse_conntbl[i]);
46758
46759 /* inherit and extend fuse_dev_operations */
46760- cuse_channel_fops = fuse_dev_operations;
46761- cuse_channel_fops.owner = THIS_MODULE;
46762- cuse_channel_fops.open = cuse_channel_open;
46763- cuse_channel_fops.release = cuse_channel_release;
46764+ pax_open_kernel();
46765+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
46766+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
46767+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
46768+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
46769+ pax_close_kernel();
46770
46771 cuse_class = class_create(THIS_MODULE, "cuse");
46772 if (IS_ERR(cuse_class))
46773diff -urNp linux-2.6.32.48/fs/fuse/dev.c linux-2.6.32.48/fs/fuse/dev.c
46774--- linux-2.6.32.48/fs/fuse/dev.c 2011-11-08 19:02:43.000000000 -0500
46775+++ linux-2.6.32.48/fs/fuse/dev.c 2011-11-15 19:59:43.000000000 -0500
46776@@ -885,7 +885,7 @@ static int fuse_notify_inval_entry(struc
46777 {
46778 struct fuse_notify_inval_entry_out outarg;
46779 int err = -EINVAL;
46780- char buf[FUSE_NAME_MAX+1];
46781+ char *buf = NULL;
46782 struct qstr name;
46783
46784 if (size < sizeof(outarg))
46785@@ -899,6 +899,11 @@ static int fuse_notify_inval_entry(struc
46786 if (outarg.namelen > FUSE_NAME_MAX)
46787 goto err;
46788
46789+ err = -ENOMEM;
46790+ buf = kmalloc(FUSE_NAME_MAX+1, GFP_KERNEL);
46791+ if (!buf)
46792+ goto err;
46793+
46794 err = -EINVAL;
46795 if (size != sizeof(outarg) + outarg.namelen + 1)
46796 goto err;
46797@@ -914,17 +919,15 @@ static int fuse_notify_inval_entry(struc
46798
46799 down_read(&fc->killsb);
46800 err = -ENOENT;
46801- if (!fc->sb)
46802- goto err_unlock;
46803-
46804- err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
46805-
46806-err_unlock:
46807+ if (fc->sb)
46808+ err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
46809 up_read(&fc->killsb);
46810+ kfree(buf);
46811 return err;
46812
46813 err:
46814 fuse_copy_finish(cs);
46815+ kfree(buf);
46816 return err;
46817 }
46818
46819diff -urNp linux-2.6.32.48/fs/fuse/dir.c linux-2.6.32.48/fs/fuse/dir.c
46820--- linux-2.6.32.48/fs/fuse/dir.c 2011-11-08 19:02:43.000000000 -0500
46821+++ linux-2.6.32.48/fs/fuse/dir.c 2011-11-15 19:59:43.000000000 -0500
46822@@ -1127,7 +1127,7 @@ static char *read_link(struct dentry *de
46823 return link;
46824 }
46825
46826-static void free_link(char *link)
46827+static void free_link(const char *link)
46828 {
46829 if (!IS_ERR(link))
46830 free_page((unsigned long) link);
46831diff -urNp linux-2.6.32.48/fs/gfs2/ops_inode.c linux-2.6.32.48/fs/gfs2/ops_inode.c
46832--- linux-2.6.32.48/fs/gfs2/ops_inode.c 2011-11-08 19:02:43.000000000 -0500
46833+++ linux-2.6.32.48/fs/gfs2/ops_inode.c 2011-11-15 19:59:43.000000000 -0500
46834@@ -752,6 +752,8 @@ static int gfs2_rename(struct inode *odi
46835 unsigned int x;
46836 int error;
46837
46838+ pax_track_stack();
46839+
46840 if (ndentry->d_inode) {
46841 nip = GFS2_I(ndentry->d_inode);
46842 if (ip == nip)
46843diff -urNp linux-2.6.32.48/fs/gfs2/sys.c linux-2.6.32.48/fs/gfs2/sys.c
46844--- linux-2.6.32.48/fs/gfs2/sys.c 2011-11-08 19:02:43.000000000 -0500
46845+++ linux-2.6.32.48/fs/gfs2/sys.c 2011-11-15 19:59:43.000000000 -0500
46846@@ -49,7 +49,7 @@ static ssize_t gfs2_attr_store(struct ko
46847 return a->store ? a->store(sdp, buf, len) : len;
46848 }
46849
46850-static struct sysfs_ops gfs2_attr_ops = {
46851+static const struct sysfs_ops gfs2_attr_ops = {
46852 .show = gfs2_attr_show,
46853 .store = gfs2_attr_store,
46854 };
46855@@ -584,7 +584,7 @@ static int gfs2_uevent(struct kset *kset
46856 return 0;
46857 }
46858
46859-static struct kset_uevent_ops gfs2_uevent_ops = {
46860+static const struct kset_uevent_ops gfs2_uevent_ops = {
46861 .uevent = gfs2_uevent,
46862 };
46863
46864diff -urNp linux-2.6.32.48/fs/hfsplus/catalog.c linux-2.6.32.48/fs/hfsplus/catalog.c
46865--- linux-2.6.32.48/fs/hfsplus/catalog.c 2011-11-08 19:02:43.000000000 -0500
46866+++ linux-2.6.32.48/fs/hfsplus/catalog.c 2011-11-15 19:59:43.000000000 -0500
46867@@ -157,6 +157,8 @@ int hfsplus_find_cat(struct super_block
46868 int err;
46869 u16 type;
46870
46871+ pax_track_stack();
46872+
46873 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
46874 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
46875 if (err)
46876@@ -186,6 +188,8 @@ int hfsplus_create_cat(u32 cnid, struct
46877 int entry_size;
46878 int err;
46879
46880+ pax_track_stack();
46881+
46882 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink);
46883 sb = dir->i_sb;
46884 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
46885@@ -318,6 +322,8 @@ int hfsplus_rename_cat(u32 cnid,
46886 int entry_size, type;
46887 int err = 0;
46888
46889+ pax_track_stack();
46890+
46891 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name,
46892 dst_dir->i_ino, dst_name->name);
46893 sb = src_dir->i_sb;
46894diff -urNp linux-2.6.32.48/fs/hfsplus/dir.c linux-2.6.32.48/fs/hfsplus/dir.c
46895--- linux-2.6.32.48/fs/hfsplus/dir.c 2011-11-08 19:02:43.000000000 -0500
46896+++ linux-2.6.32.48/fs/hfsplus/dir.c 2011-11-15 19:59:43.000000000 -0500
46897@@ -121,6 +121,8 @@ static int hfsplus_readdir(struct file *
46898 struct hfsplus_readdir_data *rd;
46899 u16 type;
46900
46901+ pax_track_stack();
46902+
46903 if (filp->f_pos >= inode->i_size)
46904 return 0;
46905
46906diff -urNp linux-2.6.32.48/fs/hfsplus/inode.c linux-2.6.32.48/fs/hfsplus/inode.c
46907--- linux-2.6.32.48/fs/hfsplus/inode.c 2011-11-08 19:02:43.000000000 -0500
46908+++ linux-2.6.32.48/fs/hfsplus/inode.c 2011-11-15 19:59:43.000000000 -0500
46909@@ -399,6 +399,8 @@ int hfsplus_cat_read_inode(struct inode
46910 int res = 0;
46911 u16 type;
46912
46913+ pax_track_stack();
46914+
46915 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
46916
46917 HFSPLUS_I(inode).dev = 0;
46918@@ -461,6 +463,8 @@ int hfsplus_cat_write_inode(struct inode
46919 struct hfs_find_data fd;
46920 hfsplus_cat_entry entry;
46921
46922+ pax_track_stack();
46923+
46924 if (HFSPLUS_IS_RSRC(inode))
46925 main_inode = HFSPLUS_I(inode).rsrc_inode;
46926
46927diff -urNp linux-2.6.32.48/fs/hfsplus/ioctl.c linux-2.6.32.48/fs/hfsplus/ioctl.c
46928--- linux-2.6.32.48/fs/hfsplus/ioctl.c 2011-11-08 19:02:43.000000000 -0500
46929+++ linux-2.6.32.48/fs/hfsplus/ioctl.c 2011-11-15 19:59:43.000000000 -0500
46930@@ -101,6 +101,8 @@ int hfsplus_setxattr(struct dentry *dent
46931 struct hfsplus_cat_file *file;
46932 int res;
46933
46934+ pax_track_stack();
46935+
46936 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
46937 return -EOPNOTSUPP;
46938
46939@@ -143,6 +145,8 @@ ssize_t hfsplus_getxattr(struct dentry *
46940 struct hfsplus_cat_file *file;
46941 ssize_t res = 0;
46942
46943+ pax_track_stack();
46944+
46945 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
46946 return -EOPNOTSUPP;
46947
46948diff -urNp linux-2.6.32.48/fs/hfsplus/super.c linux-2.6.32.48/fs/hfsplus/super.c
46949--- linux-2.6.32.48/fs/hfsplus/super.c 2011-11-08 19:02:43.000000000 -0500
46950+++ linux-2.6.32.48/fs/hfsplus/super.c 2011-11-15 19:59:43.000000000 -0500
46951@@ -312,6 +312,8 @@ static int hfsplus_fill_super(struct sup
46952 struct nls_table *nls = NULL;
46953 int err = -EINVAL;
46954
46955+ pax_track_stack();
46956+
46957 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
46958 if (!sbi)
46959 return -ENOMEM;
46960diff -urNp linux-2.6.32.48/fs/hugetlbfs/inode.c linux-2.6.32.48/fs/hugetlbfs/inode.c
46961--- linux-2.6.32.48/fs/hugetlbfs/inode.c 2011-11-08 19:02:43.000000000 -0500
46962+++ linux-2.6.32.48/fs/hugetlbfs/inode.c 2011-11-15 19:59:43.000000000 -0500
46963@@ -909,7 +909,7 @@ static struct file_system_type hugetlbfs
46964 .kill_sb = kill_litter_super,
46965 };
46966
46967-static struct vfsmount *hugetlbfs_vfsmount;
46968+struct vfsmount *hugetlbfs_vfsmount;
46969
46970 static int can_do_hugetlb_shm(void)
46971 {
46972diff -urNp linux-2.6.32.48/fs/ioctl.c linux-2.6.32.48/fs/ioctl.c
46973--- linux-2.6.32.48/fs/ioctl.c 2011-11-08 19:02:43.000000000 -0500
46974+++ linux-2.6.32.48/fs/ioctl.c 2011-11-15 19:59:43.000000000 -0500
46975@@ -97,7 +97,7 @@ int fiemap_fill_next_extent(struct fiema
46976 u64 phys, u64 len, u32 flags)
46977 {
46978 struct fiemap_extent extent;
46979- struct fiemap_extent *dest = fieinfo->fi_extents_start;
46980+ struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
46981
46982 /* only count the extents */
46983 if (fieinfo->fi_extents_max == 0) {
46984@@ -207,7 +207,7 @@ static int ioctl_fiemap(struct file *fil
46985
46986 fieinfo.fi_flags = fiemap.fm_flags;
46987 fieinfo.fi_extents_max = fiemap.fm_extent_count;
46988- fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap));
46989+ fieinfo.fi_extents_start = (struct fiemap_extent __user *)(arg + sizeof(fiemap));
46990
46991 if (fiemap.fm_extent_count != 0 &&
46992 !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start,
46993@@ -220,7 +220,7 @@ static int ioctl_fiemap(struct file *fil
46994 error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
46995 fiemap.fm_flags = fieinfo.fi_flags;
46996 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
46997- if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap)))
46998+ if (copy_to_user((__force char __user *)arg, &fiemap, sizeof(fiemap)))
46999 error = -EFAULT;
47000
47001 return error;
47002diff -urNp linux-2.6.32.48/fs/jbd/checkpoint.c linux-2.6.32.48/fs/jbd/checkpoint.c
47003--- linux-2.6.32.48/fs/jbd/checkpoint.c 2011-11-08 19:02:43.000000000 -0500
47004+++ linux-2.6.32.48/fs/jbd/checkpoint.c 2011-11-15 19:59:43.000000000 -0500
47005@@ -348,6 +348,8 @@ int log_do_checkpoint(journal_t *journal
47006 tid_t this_tid;
47007 int result;
47008
47009+ pax_track_stack();
47010+
47011 jbd_debug(1, "Start checkpoint\n");
47012
47013 /*
47014diff -urNp linux-2.6.32.48/fs/jffs2/compr_rtime.c linux-2.6.32.48/fs/jffs2/compr_rtime.c
47015--- linux-2.6.32.48/fs/jffs2/compr_rtime.c 2011-11-08 19:02:43.000000000 -0500
47016+++ linux-2.6.32.48/fs/jffs2/compr_rtime.c 2011-11-15 19:59:43.000000000 -0500
47017@@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
47018 int outpos = 0;
47019 int pos=0;
47020
47021+ pax_track_stack();
47022+
47023 memset(positions,0,sizeof(positions));
47024
47025 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
47026@@ -79,6 +81,8 @@ static int jffs2_rtime_decompress(unsign
47027 int outpos = 0;
47028 int pos=0;
47029
47030+ pax_track_stack();
47031+
47032 memset(positions,0,sizeof(positions));
47033
47034 while (outpos<destlen) {
47035diff -urNp linux-2.6.32.48/fs/jffs2/compr_rubin.c linux-2.6.32.48/fs/jffs2/compr_rubin.c
47036--- linux-2.6.32.48/fs/jffs2/compr_rubin.c 2011-11-08 19:02:43.000000000 -0500
47037+++ linux-2.6.32.48/fs/jffs2/compr_rubin.c 2011-11-15 19:59:43.000000000 -0500
47038@@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
47039 int ret;
47040 uint32_t mysrclen, mydstlen;
47041
47042+ pax_track_stack();
47043+
47044 mysrclen = *sourcelen;
47045 mydstlen = *dstlen - 8;
47046
47047diff -urNp linux-2.6.32.48/fs/jffs2/erase.c linux-2.6.32.48/fs/jffs2/erase.c
47048--- linux-2.6.32.48/fs/jffs2/erase.c 2011-11-08 19:02:43.000000000 -0500
47049+++ linux-2.6.32.48/fs/jffs2/erase.c 2011-11-15 19:59:43.000000000 -0500
47050@@ -434,7 +434,8 @@ static void jffs2_mark_erased_block(stru
47051 struct jffs2_unknown_node marker = {
47052 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
47053 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
47054- .totlen = cpu_to_je32(c->cleanmarker_size)
47055+ .totlen = cpu_to_je32(c->cleanmarker_size),
47056+ .hdr_crc = cpu_to_je32(0)
47057 };
47058
47059 jffs2_prealloc_raw_node_refs(c, jeb, 1);
47060diff -urNp linux-2.6.32.48/fs/jffs2/wbuf.c linux-2.6.32.48/fs/jffs2/wbuf.c
47061--- linux-2.6.32.48/fs/jffs2/wbuf.c 2011-11-08 19:02:43.000000000 -0500
47062+++ linux-2.6.32.48/fs/jffs2/wbuf.c 2011-11-15 19:59:43.000000000 -0500
47063@@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
47064 {
47065 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
47066 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
47067- .totlen = constant_cpu_to_je32(8)
47068+ .totlen = constant_cpu_to_je32(8),
47069+ .hdr_crc = constant_cpu_to_je32(0)
47070 };
47071
47072 /*
47073diff -urNp linux-2.6.32.48/fs/jffs2/xattr.c linux-2.6.32.48/fs/jffs2/xattr.c
47074--- linux-2.6.32.48/fs/jffs2/xattr.c 2011-11-08 19:02:43.000000000 -0500
47075+++ linux-2.6.32.48/fs/jffs2/xattr.c 2011-11-15 19:59:43.000000000 -0500
47076@@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct
47077
47078 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
47079
47080+ pax_track_stack();
47081+
47082 /* Phase.1 : Merge same xref */
47083 for (i=0; i < XREF_TMPHASH_SIZE; i++)
47084 xref_tmphash[i] = NULL;
47085diff -urNp linux-2.6.32.48/fs/jfs/super.c linux-2.6.32.48/fs/jfs/super.c
47086--- linux-2.6.32.48/fs/jfs/super.c 2011-11-08 19:02:43.000000000 -0500
47087+++ linux-2.6.32.48/fs/jfs/super.c 2011-11-15 19:59:43.000000000 -0500
47088@@ -793,7 +793,7 @@ static int __init init_jfs_fs(void)
47089
47090 jfs_inode_cachep =
47091 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
47092- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
47093+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
47094 init_once);
47095 if (jfs_inode_cachep == NULL)
47096 return -ENOMEM;
47097diff -urNp linux-2.6.32.48/fs/Kconfig.binfmt linux-2.6.32.48/fs/Kconfig.binfmt
47098--- linux-2.6.32.48/fs/Kconfig.binfmt 2011-11-08 19:02:43.000000000 -0500
47099+++ linux-2.6.32.48/fs/Kconfig.binfmt 2011-11-15 19:59:43.000000000 -0500
47100@@ -86,7 +86,7 @@ config HAVE_AOUT
47101
47102 config BINFMT_AOUT
47103 tristate "Kernel support for a.out and ECOFF binaries"
47104- depends on HAVE_AOUT
47105+ depends on HAVE_AOUT && BROKEN
47106 ---help---
47107 A.out (Assembler.OUTput) is a set of formats for libraries and
47108 executables used in the earliest versions of UNIX. Linux used
47109diff -urNp linux-2.6.32.48/fs/libfs.c linux-2.6.32.48/fs/libfs.c
47110--- linux-2.6.32.48/fs/libfs.c 2011-11-08 19:02:43.000000000 -0500
47111+++ linux-2.6.32.48/fs/libfs.c 2011-11-15 19:59:43.000000000 -0500
47112@@ -157,12 +157,20 @@ int dcache_readdir(struct file * filp, v
47113
47114 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
47115 struct dentry *next;
47116+ char d_name[sizeof(next->d_iname)];
47117+ const unsigned char *name;
47118+
47119 next = list_entry(p, struct dentry, d_u.d_child);
47120 if (d_unhashed(next) || !next->d_inode)
47121 continue;
47122
47123 spin_unlock(&dcache_lock);
47124- if (filldir(dirent, next->d_name.name,
47125+ name = next->d_name.name;
47126+ if (name == next->d_iname) {
47127+ memcpy(d_name, name, next->d_name.len);
47128+ name = d_name;
47129+ }
47130+ if (filldir(dirent, name,
47131 next->d_name.len, filp->f_pos,
47132 next->d_inode->i_ino,
47133 dt_type(next->d_inode)) < 0)
47134diff -urNp linux-2.6.32.48/fs/lockd/clntproc.c linux-2.6.32.48/fs/lockd/clntproc.c
47135--- linux-2.6.32.48/fs/lockd/clntproc.c 2011-11-08 19:02:43.000000000 -0500
47136+++ linux-2.6.32.48/fs/lockd/clntproc.c 2011-11-15 19:59:43.000000000 -0500
47137@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
47138 /*
47139 * Cookie counter for NLM requests
47140 */
47141-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
47142+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
47143
47144 void nlmclnt_next_cookie(struct nlm_cookie *c)
47145 {
47146- u32 cookie = atomic_inc_return(&nlm_cookie);
47147+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
47148
47149 memcpy(c->data, &cookie, 4);
47150 c->len=4;
47151@@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
47152 struct nlm_rqst reqst, *req;
47153 int status;
47154
47155+ pax_track_stack();
47156+
47157 req = &reqst;
47158 memset(req, 0, sizeof(*req));
47159 locks_init_lock(&req->a_args.lock.fl);
47160diff -urNp linux-2.6.32.48/fs/lockd/svc.c linux-2.6.32.48/fs/lockd/svc.c
47161--- linux-2.6.32.48/fs/lockd/svc.c 2011-11-08 19:02:43.000000000 -0500
47162+++ linux-2.6.32.48/fs/lockd/svc.c 2011-11-15 19:59:43.000000000 -0500
47163@@ -43,7 +43,7 @@
47164
47165 static struct svc_program nlmsvc_program;
47166
47167-struct nlmsvc_binding * nlmsvc_ops;
47168+const struct nlmsvc_binding * nlmsvc_ops;
47169 EXPORT_SYMBOL_GPL(nlmsvc_ops);
47170
47171 static DEFINE_MUTEX(nlmsvc_mutex);
47172diff -urNp linux-2.6.32.48/fs/locks.c linux-2.6.32.48/fs/locks.c
47173--- linux-2.6.32.48/fs/locks.c 2011-11-08 19:02:43.000000000 -0500
47174+++ linux-2.6.32.48/fs/locks.c 2011-11-15 19:59:43.000000000 -0500
47175@@ -145,10 +145,28 @@ static LIST_HEAD(blocked_list);
47176
47177 static struct kmem_cache *filelock_cache __read_mostly;
47178
47179+static void locks_init_lock_always(struct file_lock *fl)
47180+{
47181+ fl->fl_next = NULL;
47182+ fl->fl_fasync = NULL;
47183+ fl->fl_owner = NULL;
47184+ fl->fl_pid = 0;
47185+ fl->fl_nspid = NULL;
47186+ fl->fl_file = NULL;
47187+ fl->fl_flags = 0;
47188+ fl->fl_type = 0;
47189+ fl->fl_start = fl->fl_end = 0;
47190+}
47191+
47192 /* Allocate an empty lock structure. */
47193 static struct file_lock *locks_alloc_lock(void)
47194 {
47195- return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
47196+ struct file_lock *fl = kmem_cache_alloc(filelock_cache, GFP_KERNEL);
47197+
47198+ if (fl)
47199+ locks_init_lock_always(fl);
47200+
47201+ return fl;
47202 }
47203
47204 void locks_release_private(struct file_lock *fl)
47205@@ -183,17 +201,9 @@ void locks_init_lock(struct file_lock *f
47206 INIT_LIST_HEAD(&fl->fl_link);
47207 INIT_LIST_HEAD(&fl->fl_block);
47208 init_waitqueue_head(&fl->fl_wait);
47209- fl->fl_next = NULL;
47210- fl->fl_fasync = NULL;
47211- fl->fl_owner = NULL;
47212- fl->fl_pid = 0;
47213- fl->fl_nspid = NULL;
47214- fl->fl_file = NULL;
47215- fl->fl_flags = 0;
47216- fl->fl_type = 0;
47217- fl->fl_start = fl->fl_end = 0;
47218 fl->fl_ops = NULL;
47219 fl->fl_lmops = NULL;
47220+ locks_init_lock_always(fl);
47221 }
47222
47223 EXPORT_SYMBOL(locks_init_lock);
47224@@ -2007,16 +2017,16 @@ void locks_remove_flock(struct file *fil
47225 return;
47226
47227 if (filp->f_op && filp->f_op->flock) {
47228- struct file_lock fl = {
47229+ struct file_lock flock = {
47230 .fl_pid = current->tgid,
47231 .fl_file = filp,
47232 .fl_flags = FL_FLOCK,
47233 .fl_type = F_UNLCK,
47234 .fl_end = OFFSET_MAX,
47235 };
47236- filp->f_op->flock(filp, F_SETLKW, &fl);
47237- if (fl.fl_ops && fl.fl_ops->fl_release_private)
47238- fl.fl_ops->fl_release_private(&fl);
47239+ filp->f_op->flock(filp, F_SETLKW, &flock);
47240+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
47241+ flock.fl_ops->fl_release_private(&flock);
47242 }
47243
47244 lock_kernel();
47245diff -urNp linux-2.6.32.48/fs/mbcache.c linux-2.6.32.48/fs/mbcache.c
47246--- linux-2.6.32.48/fs/mbcache.c 2011-11-08 19:02:43.000000000 -0500
47247+++ linux-2.6.32.48/fs/mbcache.c 2011-11-15 19:59:43.000000000 -0500
47248@@ -266,9 +266,9 @@ mb_cache_create(const char *name, struct
47249 if (!cache)
47250 goto fail;
47251 cache->c_name = name;
47252- cache->c_op.free = NULL;
47253+ *(void **)&cache->c_op.free = NULL;
47254 if (cache_op)
47255- cache->c_op.free = cache_op->free;
47256+ *(void **)&cache->c_op.free = cache_op->free;
47257 atomic_set(&cache->c_entry_count, 0);
47258 cache->c_bucket_bits = bucket_bits;
47259 #ifdef MB_CACHE_INDEXES_COUNT
47260diff -urNp linux-2.6.32.48/fs/namei.c linux-2.6.32.48/fs/namei.c
47261--- linux-2.6.32.48/fs/namei.c 2011-11-08 19:02:43.000000000 -0500
47262+++ linux-2.6.32.48/fs/namei.c 2011-11-16 17:53:55.000000000 -0500
47263@@ -224,14 +224,6 @@ int generic_permission(struct inode *ino
47264 return ret;
47265
47266 /*
47267- * Read/write DACs are always overridable.
47268- * Executable DACs are overridable if at least one exec bit is set.
47269- */
47270- if (!(mask & MAY_EXEC) || execute_ok(inode))
47271- if (capable(CAP_DAC_OVERRIDE))
47272- return 0;
47273-
47274- /*
47275 * Searching includes executable on directories, else just read.
47276 */
47277 mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
47278@@ -239,6 +231,14 @@ int generic_permission(struct inode *ino
47279 if (capable(CAP_DAC_READ_SEARCH))
47280 return 0;
47281
47282+ /*
47283+ * Read/write DACs are always overridable.
47284+ * Executable DACs are overridable if at least one exec bit is set.
47285+ */
47286+ if (!(mask & MAY_EXEC) || execute_ok(inode))
47287+ if (capable(CAP_DAC_OVERRIDE))
47288+ return 0;
47289+
47290 return -EACCES;
47291 }
47292
47293@@ -458,7 +458,8 @@ static int exec_permission_lite(struct i
47294 if (!ret)
47295 goto ok;
47296
47297- if (capable(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH))
47298+ if (capable_nolog(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH) ||
47299+ capable(CAP_DAC_OVERRIDE))
47300 goto ok;
47301
47302 return ret;
47303@@ -638,7 +639,7 @@ static __always_inline int __do_follow_l
47304 cookie = dentry->d_inode->i_op->follow_link(dentry, nd);
47305 error = PTR_ERR(cookie);
47306 if (!IS_ERR(cookie)) {
47307- char *s = nd_get_link(nd);
47308+ const char *s = nd_get_link(nd);
47309 error = 0;
47310 if (s)
47311 error = __vfs_follow_link(nd, s);
47312@@ -669,6 +670,13 @@ static inline int do_follow_link(struct
47313 err = security_inode_follow_link(path->dentry, nd);
47314 if (err)
47315 goto loop;
47316+
47317+ if (gr_handle_follow_link(path->dentry->d_parent->d_inode,
47318+ path->dentry->d_inode, path->dentry, nd->path.mnt)) {
47319+ err = -EACCES;
47320+ goto loop;
47321+ }
47322+
47323 current->link_count++;
47324 current->total_link_count++;
47325 nd->depth++;
47326@@ -1016,11 +1024,19 @@ return_reval:
47327 break;
47328 }
47329 return_base:
47330+ if (!(nd->flags & (LOOKUP_CONTINUE | LOOKUP_PARENT)) &&
47331+ !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
47332+ path_put(&nd->path);
47333+ return -ENOENT;
47334+ }
47335 return 0;
47336 out_dput:
47337 path_put_conditional(&next, nd);
47338 break;
47339 }
47340+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
47341+ err = -ENOENT;
47342+
47343 path_put(&nd->path);
47344 return_err:
47345 return err;
47346@@ -1091,13 +1107,20 @@ static int do_path_lookup(int dfd, const
47347 int retval = path_init(dfd, name, flags, nd);
47348 if (!retval)
47349 retval = path_walk(name, nd);
47350- if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
47351- nd->path.dentry->d_inode))
47352- audit_inode(name, nd->path.dentry);
47353+
47354+ if (likely(!retval)) {
47355+ if (nd->path.dentry && nd->path.dentry->d_inode) {
47356+ if (*name != '/' && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
47357+ retval = -ENOENT;
47358+ if (!audit_dummy_context())
47359+ audit_inode(name, nd->path.dentry);
47360+ }
47361+ }
47362 if (nd->root.mnt) {
47363 path_put(&nd->root);
47364 nd->root.mnt = NULL;
47365 }
47366+
47367 return retval;
47368 }
47369
47370@@ -1576,6 +1599,20 @@ int may_open(struct path *path, int acc_
47371 if (error)
47372 goto err_out;
47373
47374+
47375+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
47376+ error = -EPERM;
47377+ goto err_out;
47378+ }
47379+ if (gr_handle_rawio(inode)) {
47380+ error = -EPERM;
47381+ goto err_out;
47382+ }
47383+ if (!gr_acl_handle_open(dentry, path->mnt, flag)) {
47384+ error = -EACCES;
47385+ goto err_out;
47386+ }
47387+
47388 if (flag & O_TRUNC) {
47389 error = get_write_access(inode);
47390 if (error)
47391@@ -1621,12 +1658,19 @@ static int __open_namei_create(struct na
47392 int error;
47393 struct dentry *dir = nd->path.dentry;
47394
47395+ if (!gr_acl_handle_creat(path->dentry, dir, nd->path.mnt, flag, mode)) {
47396+ error = -EACCES;
47397+ goto out_unlock;
47398+ }
47399+
47400 if (!IS_POSIXACL(dir->d_inode))
47401 mode &= ~current_umask();
47402 error = security_path_mknod(&nd->path, path->dentry, mode, 0);
47403 if (error)
47404 goto out_unlock;
47405 error = vfs_create(dir->d_inode, path->dentry, mode, nd);
47406+ if (!error)
47407+ gr_handle_create(path->dentry, nd->path.mnt);
47408 out_unlock:
47409 mutex_unlock(&dir->d_inode->i_mutex);
47410 dput(nd->path.dentry);
47411@@ -1709,6 +1753,22 @@ struct file *do_filp_open(int dfd, const
47412 &nd, flag);
47413 if (error)
47414 return ERR_PTR(error);
47415+
47416+ if (gr_handle_rofs_blockwrite(nd.path.dentry, nd.path.mnt, acc_mode)) {
47417+ error = -EPERM;
47418+ goto exit;
47419+ }
47420+
47421+ if (gr_handle_rawio(nd.path.dentry->d_inode)) {
47422+ error = -EPERM;
47423+ goto exit;
47424+ }
47425+
47426+ if (!gr_acl_handle_open(nd.path.dentry, nd.path.mnt, flag)) {
47427+ error = -EACCES;
47428+ goto exit;
47429+ }
47430+
47431 goto ok;
47432 }
47433
47434@@ -1795,6 +1855,19 @@ do_last:
47435 /*
47436 * It already exists.
47437 */
47438+
47439+ if (!gr_acl_handle_hidden_file(path.dentry, path.mnt)) {
47440+ error = -ENOENT;
47441+ goto exit_mutex_unlock;
47442+ }
47443+
47444+ /* only check if O_CREAT is specified, all other checks need
47445+ to go into may_open */
47446+ if (gr_handle_fifo(path.dentry, path.mnt, dir, flag, acc_mode)) {
47447+ error = -EACCES;
47448+ goto exit_mutex_unlock;
47449+ }
47450+
47451 mutex_unlock(&dir->d_inode->i_mutex);
47452 audit_inode(pathname, path.dentry);
47453
47454@@ -1887,6 +1960,13 @@ do_link:
47455 error = security_inode_follow_link(path.dentry, &nd);
47456 if (error)
47457 goto exit_dput;
47458+
47459+ if (gr_handle_follow_link(path.dentry->d_parent->d_inode, path.dentry->d_inode,
47460+ path.dentry, nd.path.mnt)) {
47461+ error = -EACCES;
47462+ goto exit_dput;
47463+ }
47464+
47465 error = __do_follow_link(&path, &nd);
47466 if (error) {
47467 /* Does someone understand code flow here? Or it is only
47468@@ -1984,6 +2064,10 @@ struct dentry *lookup_create(struct name
47469 }
47470 return dentry;
47471 eexist:
47472+ if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
47473+ dput(dentry);
47474+ return ERR_PTR(-ENOENT);
47475+ }
47476 dput(dentry);
47477 dentry = ERR_PTR(-EEXIST);
47478 fail:
47479@@ -2061,6 +2145,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
47480 error = may_mknod(mode);
47481 if (error)
47482 goto out_dput;
47483+
47484+ if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
47485+ error = -EPERM;
47486+ goto out_dput;
47487+ }
47488+
47489+ if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
47490+ error = -EACCES;
47491+ goto out_dput;
47492+ }
47493+
47494 error = mnt_want_write(nd.path.mnt);
47495 if (error)
47496 goto out_dput;
47497@@ -2081,6 +2176,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
47498 }
47499 out_drop_write:
47500 mnt_drop_write(nd.path.mnt);
47501+
47502+ if (!error)
47503+ gr_handle_create(dentry, nd.path.mnt);
47504 out_dput:
47505 dput(dentry);
47506 out_unlock:
47507@@ -2134,6 +2232,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
47508 if (IS_ERR(dentry))
47509 goto out_unlock;
47510
47511+ if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
47512+ error = -EACCES;
47513+ goto out_dput;
47514+ }
47515+
47516 if (!IS_POSIXACL(nd.path.dentry->d_inode))
47517 mode &= ~current_umask();
47518 error = mnt_want_write(nd.path.mnt);
47519@@ -2145,6 +2248,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
47520 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
47521 out_drop_write:
47522 mnt_drop_write(nd.path.mnt);
47523+
47524+ if (!error)
47525+ gr_handle_create(dentry, nd.path.mnt);
47526+
47527 out_dput:
47528 dput(dentry);
47529 out_unlock:
47530@@ -2226,6 +2333,8 @@ static long do_rmdir(int dfd, const char
47531 char * name;
47532 struct dentry *dentry;
47533 struct nameidata nd;
47534+ ino_t saved_ino = 0;
47535+ dev_t saved_dev = 0;
47536
47537 error = user_path_parent(dfd, pathname, &nd, &name);
47538 if (error)
47539@@ -2250,6 +2359,17 @@ static long do_rmdir(int dfd, const char
47540 error = PTR_ERR(dentry);
47541 if (IS_ERR(dentry))
47542 goto exit2;
47543+
47544+ if (dentry->d_inode != NULL) {
47545+ saved_ino = dentry->d_inode->i_ino;
47546+ saved_dev = gr_get_dev_from_dentry(dentry);
47547+
47548+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
47549+ error = -EACCES;
47550+ goto exit3;
47551+ }
47552+ }
47553+
47554 error = mnt_want_write(nd.path.mnt);
47555 if (error)
47556 goto exit3;
47557@@ -2257,6 +2377,8 @@ static long do_rmdir(int dfd, const char
47558 if (error)
47559 goto exit4;
47560 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
47561+ if (!error && (saved_dev || saved_ino))
47562+ gr_handle_delete(saved_ino, saved_dev);
47563 exit4:
47564 mnt_drop_write(nd.path.mnt);
47565 exit3:
47566@@ -2318,6 +2440,8 @@ static long do_unlinkat(int dfd, const c
47567 struct dentry *dentry;
47568 struct nameidata nd;
47569 struct inode *inode = NULL;
47570+ ino_t saved_ino = 0;
47571+ dev_t saved_dev = 0;
47572
47573 error = user_path_parent(dfd, pathname, &nd, &name);
47574 if (error)
47575@@ -2337,8 +2461,19 @@ static long do_unlinkat(int dfd, const c
47576 if (nd.last.name[nd.last.len])
47577 goto slashes;
47578 inode = dentry->d_inode;
47579- if (inode)
47580+ if (inode) {
47581+ if (inode->i_nlink <= 1) {
47582+ saved_ino = inode->i_ino;
47583+ saved_dev = gr_get_dev_from_dentry(dentry);
47584+ }
47585+
47586 atomic_inc(&inode->i_count);
47587+
47588+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
47589+ error = -EACCES;
47590+ goto exit2;
47591+ }
47592+ }
47593 error = mnt_want_write(nd.path.mnt);
47594 if (error)
47595 goto exit2;
47596@@ -2346,6 +2481,8 @@ static long do_unlinkat(int dfd, const c
47597 if (error)
47598 goto exit3;
47599 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
47600+ if (!error && (saved_ino || saved_dev))
47601+ gr_handle_delete(saved_ino, saved_dev);
47602 exit3:
47603 mnt_drop_write(nd.path.mnt);
47604 exit2:
47605@@ -2424,6 +2561,11 @@ SYSCALL_DEFINE3(symlinkat, const char __
47606 if (IS_ERR(dentry))
47607 goto out_unlock;
47608
47609+ if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
47610+ error = -EACCES;
47611+ goto out_dput;
47612+ }
47613+
47614 error = mnt_want_write(nd.path.mnt);
47615 if (error)
47616 goto out_dput;
47617@@ -2431,6 +2573,8 @@ SYSCALL_DEFINE3(symlinkat, const char __
47618 if (error)
47619 goto out_drop_write;
47620 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
47621+ if (!error)
47622+ gr_handle_create(dentry, nd.path.mnt);
47623 out_drop_write:
47624 mnt_drop_write(nd.path.mnt);
47625 out_dput:
47626@@ -2524,6 +2668,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
47627 error = PTR_ERR(new_dentry);
47628 if (IS_ERR(new_dentry))
47629 goto out_unlock;
47630+
47631+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
47632+ old_path.dentry->d_inode,
47633+ old_path.dentry->d_inode->i_mode, to)) {
47634+ error = -EACCES;
47635+ goto out_dput;
47636+ }
47637+
47638+ if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
47639+ old_path.dentry, old_path.mnt, to)) {
47640+ error = -EACCES;
47641+ goto out_dput;
47642+ }
47643+
47644 error = mnt_want_write(nd.path.mnt);
47645 if (error)
47646 goto out_dput;
47647@@ -2531,6 +2689,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
47648 if (error)
47649 goto out_drop_write;
47650 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
47651+ if (!error)
47652+ gr_handle_create(new_dentry, nd.path.mnt);
47653 out_drop_write:
47654 mnt_drop_write(nd.path.mnt);
47655 out_dput:
47656@@ -2708,6 +2868,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
47657 char *to;
47658 int error;
47659
47660+ pax_track_stack();
47661+
47662 error = user_path_parent(olddfd, oldname, &oldnd, &from);
47663 if (error)
47664 goto exit;
47665@@ -2764,6 +2926,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
47666 if (new_dentry == trap)
47667 goto exit5;
47668
47669+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
47670+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
47671+ to);
47672+ if (error)
47673+ goto exit5;
47674+
47675 error = mnt_want_write(oldnd.path.mnt);
47676 if (error)
47677 goto exit5;
47678@@ -2773,6 +2941,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
47679 goto exit6;
47680 error = vfs_rename(old_dir->d_inode, old_dentry,
47681 new_dir->d_inode, new_dentry);
47682+ if (!error)
47683+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
47684+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
47685 exit6:
47686 mnt_drop_write(oldnd.path.mnt);
47687 exit5:
47688@@ -2798,6 +2969,8 @@ SYSCALL_DEFINE2(rename, const char __use
47689
47690 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
47691 {
47692+ char tmpbuf[64];
47693+ const char *newlink;
47694 int len;
47695
47696 len = PTR_ERR(link);
47697@@ -2807,7 +2980,14 @@ int vfs_readlink(struct dentry *dentry,
47698 len = strlen(link);
47699 if (len > (unsigned) buflen)
47700 len = buflen;
47701- if (copy_to_user(buffer, link, len))
47702+
47703+ if (len < sizeof(tmpbuf)) {
47704+ memcpy(tmpbuf, link, len);
47705+ newlink = tmpbuf;
47706+ } else
47707+ newlink = link;
47708+
47709+ if (copy_to_user(buffer, newlink, len))
47710 len = -EFAULT;
47711 out:
47712 return len;
47713diff -urNp linux-2.6.32.48/fs/namespace.c linux-2.6.32.48/fs/namespace.c
47714--- linux-2.6.32.48/fs/namespace.c 2011-11-08 19:02:43.000000000 -0500
47715+++ linux-2.6.32.48/fs/namespace.c 2011-11-15 19:59:43.000000000 -0500
47716@@ -1083,6 +1083,9 @@ static int do_umount(struct vfsmount *mn
47717 if (!(sb->s_flags & MS_RDONLY))
47718 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
47719 up_write(&sb->s_umount);
47720+
47721+ gr_log_remount(mnt->mnt_devname, retval);
47722+
47723 return retval;
47724 }
47725
47726@@ -1104,6 +1107,9 @@ static int do_umount(struct vfsmount *mn
47727 security_sb_umount_busy(mnt);
47728 up_write(&namespace_sem);
47729 release_mounts(&umount_list);
47730+
47731+ gr_log_unmount(mnt->mnt_devname, retval);
47732+
47733 return retval;
47734 }
47735
47736@@ -1962,6 +1968,16 @@ long do_mount(char *dev_name, char *dir_
47737 if (retval)
47738 goto dput_out;
47739
47740+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
47741+ retval = -EPERM;
47742+ goto dput_out;
47743+ }
47744+
47745+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
47746+ retval = -EPERM;
47747+ goto dput_out;
47748+ }
47749+
47750 if (flags & MS_REMOUNT)
47751 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
47752 data_page);
47753@@ -1976,6 +1992,9 @@ long do_mount(char *dev_name, char *dir_
47754 dev_name, data_page);
47755 dput_out:
47756 path_put(&path);
47757+
47758+ gr_log_mount(dev_name, dir_name, retval);
47759+
47760 return retval;
47761 }
47762
47763@@ -2182,6 +2201,12 @@ SYSCALL_DEFINE2(pivot_root, const char _
47764 goto out1;
47765 }
47766
47767+ if (gr_handle_chroot_pivot()) {
47768+ error = -EPERM;
47769+ path_put(&old);
47770+ goto out1;
47771+ }
47772+
47773 read_lock(&current->fs->lock);
47774 root = current->fs->root;
47775 path_get(&current->fs->root);
47776diff -urNp linux-2.6.32.48/fs/ncpfs/dir.c linux-2.6.32.48/fs/ncpfs/dir.c
47777--- linux-2.6.32.48/fs/ncpfs/dir.c 2011-11-08 19:02:43.000000000 -0500
47778+++ linux-2.6.32.48/fs/ncpfs/dir.c 2011-11-15 19:59:43.000000000 -0500
47779@@ -275,6 +275,8 @@ __ncp_lookup_validate(struct dentry *den
47780 int res, val = 0, len;
47781 __u8 __name[NCP_MAXPATHLEN + 1];
47782
47783+ pax_track_stack();
47784+
47785 parent = dget_parent(dentry);
47786 dir = parent->d_inode;
47787
47788@@ -799,6 +801,8 @@ static struct dentry *ncp_lookup(struct
47789 int error, res, len;
47790 __u8 __name[NCP_MAXPATHLEN + 1];
47791
47792+ pax_track_stack();
47793+
47794 lock_kernel();
47795 error = -EIO;
47796 if (!ncp_conn_valid(server))
47797@@ -883,10 +887,12 @@ int ncp_create_new(struct inode *dir, st
47798 int error, result, len;
47799 int opmode;
47800 __u8 __name[NCP_MAXPATHLEN + 1];
47801-
47802+
47803 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
47804 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
47805
47806+ pax_track_stack();
47807+
47808 error = -EIO;
47809 lock_kernel();
47810 if (!ncp_conn_valid(server))
47811@@ -952,6 +958,8 @@ static int ncp_mkdir(struct inode *dir,
47812 int error, len;
47813 __u8 __name[NCP_MAXPATHLEN + 1];
47814
47815+ pax_track_stack();
47816+
47817 DPRINTK("ncp_mkdir: making %s/%s\n",
47818 dentry->d_parent->d_name.name, dentry->d_name.name);
47819
47820@@ -960,6 +968,8 @@ static int ncp_mkdir(struct inode *dir,
47821 if (!ncp_conn_valid(server))
47822 goto out;
47823
47824+ pax_track_stack();
47825+
47826 ncp_age_dentry(server, dentry);
47827 len = sizeof(__name);
47828 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
47829@@ -1114,6 +1124,8 @@ static int ncp_rename(struct inode *old_
47830 int old_len, new_len;
47831 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
47832
47833+ pax_track_stack();
47834+
47835 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
47836 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
47837 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
47838diff -urNp linux-2.6.32.48/fs/ncpfs/inode.c linux-2.6.32.48/fs/ncpfs/inode.c
47839--- linux-2.6.32.48/fs/ncpfs/inode.c 2011-11-08 19:02:43.000000000 -0500
47840+++ linux-2.6.32.48/fs/ncpfs/inode.c 2011-11-15 19:59:43.000000000 -0500
47841@@ -445,6 +445,8 @@ static int ncp_fill_super(struct super_b
47842 #endif
47843 struct ncp_entry_info finfo;
47844
47845+ pax_track_stack();
47846+
47847 data.wdog_pid = NULL;
47848 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
47849 if (!server)
47850diff -urNp linux-2.6.32.48/fs/nfs/inode.c linux-2.6.32.48/fs/nfs/inode.c
47851--- linux-2.6.32.48/fs/nfs/inode.c 2011-11-08 19:02:43.000000000 -0500
47852+++ linux-2.6.32.48/fs/nfs/inode.c 2011-11-15 19:59:43.000000000 -0500
47853@@ -156,7 +156,7 @@ static void nfs_zap_caches_locked(struct
47854 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
47855 nfsi->attrtimeo_timestamp = jiffies;
47856
47857- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
47858+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
47859 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
47860 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
47861 else
47862@@ -973,16 +973,16 @@ static int nfs_size_need_update(const st
47863 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
47864 }
47865
47866-static atomic_long_t nfs_attr_generation_counter;
47867+static atomic_long_unchecked_t nfs_attr_generation_counter;
47868
47869 static unsigned long nfs_read_attr_generation_counter(void)
47870 {
47871- return atomic_long_read(&nfs_attr_generation_counter);
47872+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
47873 }
47874
47875 unsigned long nfs_inc_attr_generation_counter(void)
47876 {
47877- return atomic_long_inc_return(&nfs_attr_generation_counter);
47878+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
47879 }
47880
47881 void nfs_fattr_init(struct nfs_fattr *fattr)
47882diff -urNp linux-2.6.32.48/fs/nfsd/lockd.c linux-2.6.32.48/fs/nfsd/lockd.c
47883--- linux-2.6.32.48/fs/nfsd/lockd.c 2011-11-08 19:02:43.000000000 -0500
47884+++ linux-2.6.32.48/fs/nfsd/lockd.c 2011-11-15 19:59:43.000000000 -0500
47885@@ -66,7 +66,7 @@ nlm_fclose(struct file *filp)
47886 fput(filp);
47887 }
47888
47889-static struct nlmsvc_binding nfsd_nlm_ops = {
47890+static const struct nlmsvc_binding nfsd_nlm_ops = {
47891 .fopen = nlm_fopen, /* open file for locking */
47892 .fclose = nlm_fclose, /* close file */
47893 };
47894diff -urNp linux-2.6.32.48/fs/nfsd/nfs4state.c linux-2.6.32.48/fs/nfsd/nfs4state.c
47895--- linux-2.6.32.48/fs/nfsd/nfs4state.c 2011-11-08 19:02:43.000000000 -0500
47896+++ linux-2.6.32.48/fs/nfsd/nfs4state.c 2011-11-15 19:59:43.000000000 -0500
47897@@ -3459,6 +3459,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
47898 unsigned int cmd;
47899 int err;
47900
47901+ pax_track_stack();
47902+
47903 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
47904 (long long) lock->lk_offset,
47905 (long long) lock->lk_length);
47906diff -urNp linux-2.6.32.48/fs/nfsd/nfs4xdr.c linux-2.6.32.48/fs/nfsd/nfs4xdr.c
47907--- linux-2.6.32.48/fs/nfsd/nfs4xdr.c 2011-11-08 19:02:43.000000000 -0500
47908+++ linux-2.6.32.48/fs/nfsd/nfs4xdr.c 2011-11-15 19:59:43.000000000 -0500
47909@@ -1751,6 +1751,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
47910 struct nfsd4_compoundres *resp = rqstp->rq_resp;
47911 u32 minorversion = resp->cstate.minorversion;
47912
47913+ pax_track_stack();
47914+
47915 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
47916 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
47917 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
47918diff -urNp linux-2.6.32.48/fs/nfsd/vfs.c linux-2.6.32.48/fs/nfsd/vfs.c
47919--- linux-2.6.32.48/fs/nfsd/vfs.c 2011-11-08 19:02:43.000000000 -0500
47920+++ linux-2.6.32.48/fs/nfsd/vfs.c 2011-11-15 19:59:43.000000000 -0500
47921@@ -937,7 +937,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
47922 } else {
47923 oldfs = get_fs();
47924 set_fs(KERNEL_DS);
47925- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
47926+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
47927 set_fs(oldfs);
47928 }
47929
47930@@ -1060,7 +1060,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
47931
47932 /* Write the data. */
47933 oldfs = get_fs(); set_fs(KERNEL_DS);
47934- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
47935+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
47936 set_fs(oldfs);
47937 if (host_err < 0)
47938 goto out_nfserr;
47939@@ -1542,7 +1542,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
47940 */
47941
47942 oldfs = get_fs(); set_fs(KERNEL_DS);
47943- host_err = inode->i_op->readlink(dentry, buf, *lenp);
47944+ host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
47945 set_fs(oldfs);
47946
47947 if (host_err < 0)
47948diff -urNp linux-2.6.32.48/fs/nilfs2/ioctl.c linux-2.6.32.48/fs/nilfs2/ioctl.c
47949--- linux-2.6.32.48/fs/nilfs2/ioctl.c 2011-11-08 19:02:43.000000000 -0500
47950+++ linux-2.6.32.48/fs/nilfs2/ioctl.c 2011-11-15 19:59:43.000000000 -0500
47951@@ -480,7 +480,7 @@ static int nilfs_ioctl_clean_segments(st
47952 unsigned int cmd, void __user *argp)
47953 {
47954 struct nilfs_argv argv[5];
47955- const static size_t argsz[5] = {
47956+ static const size_t argsz[5] = {
47957 sizeof(struct nilfs_vdesc),
47958 sizeof(struct nilfs_period),
47959 sizeof(__u64),
47960diff -urNp linux-2.6.32.48/fs/notify/dnotify/dnotify.c linux-2.6.32.48/fs/notify/dnotify/dnotify.c
47961--- linux-2.6.32.48/fs/notify/dnotify/dnotify.c 2011-11-08 19:02:43.000000000 -0500
47962+++ linux-2.6.32.48/fs/notify/dnotify/dnotify.c 2011-11-15 19:59:43.000000000 -0500
47963@@ -173,7 +173,7 @@ static void dnotify_free_mark(struct fsn
47964 kmem_cache_free(dnotify_mark_entry_cache, dnentry);
47965 }
47966
47967-static struct fsnotify_ops dnotify_fsnotify_ops = {
47968+static const struct fsnotify_ops dnotify_fsnotify_ops = {
47969 .handle_event = dnotify_handle_event,
47970 .should_send_event = dnotify_should_send_event,
47971 .free_group_priv = NULL,
47972diff -urNp linux-2.6.32.48/fs/notify/notification.c linux-2.6.32.48/fs/notify/notification.c
47973--- linux-2.6.32.48/fs/notify/notification.c 2011-11-08 19:02:43.000000000 -0500
47974+++ linux-2.6.32.48/fs/notify/notification.c 2011-11-15 19:59:43.000000000 -0500
47975@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
47976 * get set to 0 so it will never get 'freed'
47977 */
47978 static struct fsnotify_event q_overflow_event;
47979-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
47980+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
47981
47982 /**
47983 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
47984@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
47985 */
47986 u32 fsnotify_get_cookie(void)
47987 {
47988- return atomic_inc_return(&fsnotify_sync_cookie);
47989+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
47990 }
47991 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
47992
47993diff -urNp linux-2.6.32.48/fs/ntfs/dir.c linux-2.6.32.48/fs/ntfs/dir.c
47994--- linux-2.6.32.48/fs/ntfs/dir.c 2011-11-08 19:02:43.000000000 -0500
47995+++ linux-2.6.32.48/fs/ntfs/dir.c 2011-11-15 19:59:43.000000000 -0500
47996@@ -1328,7 +1328,7 @@ find_next_index_buffer:
47997 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
47998 ~(s64)(ndir->itype.index.block_size - 1)));
47999 /* Bounds checks. */
48000- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
48001+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
48002 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
48003 "inode 0x%lx or driver bug.", vdir->i_ino);
48004 goto err_out;
48005diff -urNp linux-2.6.32.48/fs/ntfs/file.c linux-2.6.32.48/fs/ntfs/file.c
48006--- linux-2.6.32.48/fs/ntfs/file.c 2011-11-08 19:02:43.000000000 -0500
48007+++ linux-2.6.32.48/fs/ntfs/file.c 2011-11-15 19:59:43.000000000 -0500
48008@@ -2243,6 +2243,6 @@ const struct inode_operations ntfs_file_
48009 #endif /* NTFS_RW */
48010 };
48011
48012-const struct file_operations ntfs_empty_file_ops = {};
48013+const struct file_operations ntfs_empty_file_ops __read_only;
48014
48015-const struct inode_operations ntfs_empty_inode_ops = {};
48016+const struct inode_operations ntfs_empty_inode_ops __read_only;
48017diff -urNp linux-2.6.32.48/fs/ocfs2/cluster/masklog.c linux-2.6.32.48/fs/ocfs2/cluster/masklog.c
48018--- linux-2.6.32.48/fs/ocfs2/cluster/masklog.c 2011-11-08 19:02:43.000000000 -0500
48019+++ linux-2.6.32.48/fs/ocfs2/cluster/masklog.c 2011-11-15 19:59:43.000000000 -0500
48020@@ -135,7 +135,7 @@ static ssize_t mlog_store(struct kobject
48021 return mlog_mask_store(mlog_attr->mask, buf, count);
48022 }
48023
48024-static struct sysfs_ops mlog_attr_ops = {
48025+static const struct sysfs_ops mlog_attr_ops = {
48026 .show = mlog_show,
48027 .store = mlog_store,
48028 };
48029diff -urNp linux-2.6.32.48/fs/ocfs2/localalloc.c linux-2.6.32.48/fs/ocfs2/localalloc.c
48030--- linux-2.6.32.48/fs/ocfs2/localalloc.c 2011-11-08 19:02:43.000000000 -0500
48031+++ linux-2.6.32.48/fs/ocfs2/localalloc.c 2011-11-15 19:59:43.000000000 -0500
48032@@ -1188,7 +1188,7 @@ static int ocfs2_local_alloc_slide_windo
48033 goto bail;
48034 }
48035
48036- atomic_inc(&osb->alloc_stats.moves);
48037+ atomic_inc_unchecked(&osb->alloc_stats.moves);
48038
48039 status = 0;
48040 bail:
48041diff -urNp linux-2.6.32.48/fs/ocfs2/namei.c linux-2.6.32.48/fs/ocfs2/namei.c
48042--- linux-2.6.32.48/fs/ocfs2/namei.c 2011-11-08 19:02:43.000000000 -0500
48043+++ linux-2.6.32.48/fs/ocfs2/namei.c 2011-11-15 19:59:43.000000000 -0500
48044@@ -1043,6 +1043,8 @@ static int ocfs2_rename(struct inode *ol
48045 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
48046 struct ocfs2_dir_lookup_result target_insert = { NULL, };
48047
48048+ pax_track_stack();
48049+
48050 /* At some point it might be nice to break this function up a
48051 * bit. */
48052
48053diff -urNp linux-2.6.32.48/fs/ocfs2/ocfs2.h linux-2.6.32.48/fs/ocfs2/ocfs2.h
48054--- linux-2.6.32.48/fs/ocfs2/ocfs2.h 2011-11-08 19:02:43.000000000 -0500
48055+++ linux-2.6.32.48/fs/ocfs2/ocfs2.h 2011-11-15 19:59:43.000000000 -0500
48056@@ -217,11 +217,11 @@ enum ocfs2_vol_state
48057
48058 struct ocfs2_alloc_stats
48059 {
48060- atomic_t moves;
48061- atomic_t local_data;
48062- atomic_t bitmap_data;
48063- atomic_t bg_allocs;
48064- atomic_t bg_extends;
48065+ atomic_unchecked_t moves;
48066+ atomic_unchecked_t local_data;
48067+ atomic_unchecked_t bitmap_data;
48068+ atomic_unchecked_t bg_allocs;
48069+ atomic_unchecked_t bg_extends;
48070 };
48071
48072 enum ocfs2_local_alloc_state
48073diff -urNp linux-2.6.32.48/fs/ocfs2/suballoc.c linux-2.6.32.48/fs/ocfs2/suballoc.c
48074--- linux-2.6.32.48/fs/ocfs2/suballoc.c 2011-11-08 19:02:43.000000000 -0500
48075+++ linux-2.6.32.48/fs/ocfs2/suballoc.c 2011-11-15 19:59:43.000000000 -0500
48076@@ -623,7 +623,7 @@ static int ocfs2_reserve_suballoc_bits(s
48077 mlog_errno(status);
48078 goto bail;
48079 }
48080- atomic_inc(&osb->alloc_stats.bg_extends);
48081+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
48082
48083 /* You should never ask for this much metadata */
48084 BUG_ON(bits_wanted >
48085@@ -1654,7 +1654,7 @@ int ocfs2_claim_metadata(struct ocfs2_su
48086 mlog_errno(status);
48087 goto bail;
48088 }
48089- atomic_inc(&osb->alloc_stats.bg_allocs);
48090+ atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
48091
48092 *blkno_start = bg_blkno + (u64) *suballoc_bit_start;
48093 ac->ac_bits_given += (*num_bits);
48094@@ -1728,7 +1728,7 @@ int ocfs2_claim_new_inode(struct ocfs2_s
48095 mlog_errno(status);
48096 goto bail;
48097 }
48098- atomic_inc(&osb->alloc_stats.bg_allocs);
48099+ atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
48100
48101 BUG_ON(num_bits != 1);
48102
48103@@ -1830,7 +1830,7 @@ int __ocfs2_claim_clusters(struct ocfs2_
48104 cluster_start,
48105 num_clusters);
48106 if (!status)
48107- atomic_inc(&osb->alloc_stats.local_data);
48108+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
48109 } else {
48110 if (min_clusters > (osb->bitmap_cpg - 1)) {
48111 /* The only paths asking for contiguousness
48112@@ -1858,7 +1858,7 @@ int __ocfs2_claim_clusters(struct ocfs2_
48113 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
48114 bg_blkno,
48115 bg_bit_off);
48116- atomic_inc(&osb->alloc_stats.bitmap_data);
48117+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
48118 }
48119 }
48120 if (status < 0) {
48121diff -urNp linux-2.6.32.48/fs/ocfs2/super.c linux-2.6.32.48/fs/ocfs2/super.c
48122--- linux-2.6.32.48/fs/ocfs2/super.c 2011-11-08 19:02:43.000000000 -0500
48123+++ linux-2.6.32.48/fs/ocfs2/super.c 2011-11-15 19:59:43.000000000 -0500
48124@@ -284,11 +284,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
48125 "%10s => GlobalAllocs: %d LocalAllocs: %d "
48126 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
48127 "Stats",
48128- atomic_read(&osb->alloc_stats.bitmap_data),
48129- atomic_read(&osb->alloc_stats.local_data),
48130- atomic_read(&osb->alloc_stats.bg_allocs),
48131- atomic_read(&osb->alloc_stats.moves),
48132- atomic_read(&osb->alloc_stats.bg_extends));
48133+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
48134+ atomic_read_unchecked(&osb->alloc_stats.local_data),
48135+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
48136+ atomic_read_unchecked(&osb->alloc_stats.moves),
48137+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
48138
48139 out += snprintf(buf + out, len - out,
48140 "%10s => State: %u Descriptor: %llu Size: %u bits "
48141@@ -2002,11 +2002,11 @@ static int ocfs2_initialize_super(struct
48142 spin_lock_init(&osb->osb_xattr_lock);
48143 ocfs2_init_inode_steal_slot(osb);
48144
48145- atomic_set(&osb->alloc_stats.moves, 0);
48146- atomic_set(&osb->alloc_stats.local_data, 0);
48147- atomic_set(&osb->alloc_stats.bitmap_data, 0);
48148- atomic_set(&osb->alloc_stats.bg_allocs, 0);
48149- atomic_set(&osb->alloc_stats.bg_extends, 0);
48150+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
48151+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
48152+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
48153+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
48154+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
48155
48156 /* Copy the blockcheck stats from the superblock probe */
48157 osb->osb_ecc_stats = *stats;
48158diff -urNp linux-2.6.32.48/fs/open.c linux-2.6.32.48/fs/open.c
48159--- linux-2.6.32.48/fs/open.c 2011-11-08 19:02:43.000000000 -0500
48160+++ linux-2.6.32.48/fs/open.c 2011-11-15 19:59:43.000000000 -0500
48161@@ -275,6 +275,10 @@ static long do_sys_truncate(const char _
48162 error = locks_verify_truncate(inode, NULL, length);
48163 if (!error)
48164 error = security_path_truncate(&path, length, 0);
48165+
48166+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
48167+ error = -EACCES;
48168+
48169 if (!error) {
48170 vfs_dq_init(inode);
48171 error = do_truncate(path.dentry, length, 0, NULL);
48172@@ -511,6 +515,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
48173 if (__mnt_is_readonly(path.mnt))
48174 res = -EROFS;
48175
48176+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
48177+ res = -EACCES;
48178+
48179 out_path_release:
48180 path_put(&path);
48181 out:
48182@@ -537,6 +544,8 @@ SYSCALL_DEFINE1(chdir, const char __user
48183 if (error)
48184 goto dput_and_out;
48185
48186+ gr_log_chdir(path.dentry, path.mnt);
48187+
48188 set_fs_pwd(current->fs, &path);
48189
48190 dput_and_out:
48191@@ -563,6 +572,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
48192 goto out_putf;
48193
48194 error = inode_permission(inode, MAY_EXEC | MAY_ACCESS);
48195+
48196+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
48197+ error = -EPERM;
48198+
48199+ if (!error)
48200+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
48201+
48202 if (!error)
48203 set_fs_pwd(current->fs, &file->f_path);
48204 out_putf:
48205@@ -588,7 +604,13 @@ SYSCALL_DEFINE1(chroot, const char __use
48206 if (!capable(CAP_SYS_CHROOT))
48207 goto dput_and_out;
48208
48209+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
48210+ goto dput_and_out;
48211+
48212 set_fs_root(current->fs, &path);
48213+
48214+ gr_handle_chroot_chdir(&path);
48215+
48216 error = 0;
48217 dput_and_out:
48218 path_put(&path);
48219@@ -616,12 +638,27 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
48220 err = mnt_want_write_file(file);
48221 if (err)
48222 goto out_putf;
48223+
48224 mutex_lock(&inode->i_mutex);
48225+
48226+ if (!gr_acl_handle_fchmod(dentry, file->f_path.mnt, mode)) {
48227+ err = -EACCES;
48228+ goto out_unlock;
48229+ }
48230+
48231 if (mode == (mode_t) -1)
48232 mode = inode->i_mode;
48233+
48234+ if (gr_handle_chroot_chmod(dentry, file->f_path.mnt, mode)) {
48235+ err = -EPERM;
48236+ goto out_unlock;
48237+ }
48238+
48239 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
48240 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
48241 err = notify_change(dentry, &newattrs);
48242+
48243+out_unlock:
48244 mutex_unlock(&inode->i_mutex);
48245 mnt_drop_write(file->f_path.mnt);
48246 out_putf:
48247@@ -645,12 +682,27 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
48248 error = mnt_want_write(path.mnt);
48249 if (error)
48250 goto dput_and_out;
48251+
48252 mutex_lock(&inode->i_mutex);
48253+
48254+ if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
48255+ error = -EACCES;
48256+ goto out_unlock;
48257+ }
48258+
48259 if (mode == (mode_t) -1)
48260 mode = inode->i_mode;
48261+
48262+ if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
48263+ error = -EACCES;
48264+ goto out_unlock;
48265+ }
48266+
48267 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
48268 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
48269 error = notify_change(path.dentry, &newattrs);
48270+
48271+out_unlock:
48272 mutex_unlock(&inode->i_mutex);
48273 mnt_drop_write(path.mnt);
48274 dput_and_out:
48275@@ -664,12 +716,15 @@ SYSCALL_DEFINE2(chmod, const char __user
48276 return sys_fchmodat(AT_FDCWD, filename, mode);
48277 }
48278
48279-static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
48280+static int chown_common(struct dentry * dentry, uid_t user, gid_t group, struct vfsmount *mnt)
48281 {
48282 struct inode *inode = dentry->d_inode;
48283 int error;
48284 struct iattr newattrs;
48285
48286+ if (!gr_acl_handle_chown(dentry, mnt))
48287+ return -EACCES;
48288+
48289 newattrs.ia_valid = ATTR_CTIME;
48290 if (user != (uid_t) -1) {
48291 newattrs.ia_valid |= ATTR_UID;
48292@@ -700,7 +755,7 @@ SYSCALL_DEFINE3(chown, const char __user
48293 error = mnt_want_write(path.mnt);
48294 if (error)
48295 goto out_release;
48296- error = chown_common(path.dentry, user, group);
48297+ error = chown_common(path.dentry, user, group, path.mnt);
48298 mnt_drop_write(path.mnt);
48299 out_release:
48300 path_put(&path);
48301@@ -725,7 +780,7 @@ SYSCALL_DEFINE5(fchownat, int, dfd, cons
48302 error = mnt_want_write(path.mnt);
48303 if (error)
48304 goto out_release;
48305- error = chown_common(path.dentry, user, group);
48306+ error = chown_common(path.dentry, user, group, path.mnt);
48307 mnt_drop_write(path.mnt);
48308 out_release:
48309 path_put(&path);
48310@@ -744,7 +799,7 @@ SYSCALL_DEFINE3(lchown, const char __use
48311 error = mnt_want_write(path.mnt);
48312 if (error)
48313 goto out_release;
48314- error = chown_common(path.dentry, user, group);
48315+ error = chown_common(path.dentry, user, group, path.mnt);
48316 mnt_drop_write(path.mnt);
48317 out_release:
48318 path_put(&path);
48319@@ -767,7 +822,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd
48320 goto out_fput;
48321 dentry = file->f_path.dentry;
48322 audit_inode(NULL, dentry);
48323- error = chown_common(dentry, user, group);
48324+ error = chown_common(dentry, user, group, file->f_path.mnt);
48325 mnt_drop_write(file->f_path.mnt);
48326 out_fput:
48327 fput(file);
48328@@ -1036,7 +1091,10 @@ long do_sys_open(int dfd, const char __u
48329 if (!IS_ERR(tmp)) {
48330 fd = get_unused_fd_flags(flags);
48331 if (fd >= 0) {
48332- struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
48333+ struct file *f;
48334+ /* don't allow to be set by userland */
48335+ flags &= ~FMODE_GREXEC;
48336+ f = do_filp_open(dfd, tmp, flags, mode, 0);
48337 if (IS_ERR(f)) {
48338 put_unused_fd(fd);
48339 fd = PTR_ERR(f);
48340diff -urNp linux-2.6.32.48/fs/partitions/ldm.c linux-2.6.32.48/fs/partitions/ldm.c
48341--- linux-2.6.32.48/fs/partitions/ldm.c 2011-11-08 19:02:43.000000000 -0500
48342+++ linux-2.6.32.48/fs/partitions/ldm.c 2011-11-15 19:59:43.000000000 -0500
48343@@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data
48344 ldm_error ("A VBLK claims to have %d parts.", num);
48345 return false;
48346 }
48347+
48348 if (rec >= num) {
48349 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
48350 return false;
48351@@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data
48352 goto found;
48353 }
48354
48355- f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
48356+ f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
48357 if (!f) {
48358 ldm_crit ("Out of memory.");
48359 return false;
48360diff -urNp linux-2.6.32.48/fs/partitions/mac.c linux-2.6.32.48/fs/partitions/mac.c
48361--- linux-2.6.32.48/fs/partitions/mac.c 2011-11-08 19:02:43.000000000 -0500
48362+++ linux-2.6.32.48/fs/partitions/mac.c 2011-11-15 19:59:43.000000000 -0500
48363@@ -59,11 +59,11 @@ int mac_partition(struct parsed_partitio
48364 return 0; /* not a MacOS disk */
48365 }
48366 blocks_in_map = be32_to_cpu(part->map_count);
48367+ printk(" [mac]");
48368 if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
48369 put_dev_sector(sect);
48370 return 0;
48371 }
48372- printk(" [mac]");
48373 for (slot = 1; slot <= blocks_in_map; ++slot) {
48374 int pos = slot * secsize;
48375 put_dev_sector(sect);
48376diff -urNp linux-2.6.32.48/fs/pipe.c linux-2.6.32.48/fs/pipe.c
48377--- linux-2.6.32.48/fs/pipe.c 2011-11-08 19:02:43.000000000 -0500
48378+++ linux-2.6.32.48/fs/pipe.c 2011-11-15 19:59:43.000000000 -0500
48379@@ -401,9 +401,9 @@ redo:
48380 }
48381 if (bufs) /* More to do? */
48382 continue;
48383- if (!pipe->writers)
48384+ if (!atomic_read(&pipe->writers))
48385 break;
48386- if (!pipe->waiting_writers) {
48387+ if (!atomic_read(&pipe->waiting_writers)) {
48388 /* syscall merging: Usually we must not sleep
48389 * if O_NONBLOCK is set, or if we got some data.
48390 * But if a writer sleeps in kernel space, then
48391@@ -462,7 +462,7 @@ pipe_write(struct kiocb *iocb, const str
48392 mutex_lock(&inode->i_mutex);
48393 pipe = inode->i_pipe;
48394
48395- if (!pipe->readers) {
48396+ if (!atomic_read(&pipe->readers)) {
48397 send_sig(SIGPIPE, current, 0);
48398 ret = -EPIPE;
48399 goto out;
48400@@ -511,7 +511,7 @@ redo1:
48401 for (;;) {
48402 int bufs;
48403
48404- if (!pipe->readers) {
48405+ if (!atomic_read(&pipe->readers)) {
48406 send_sig(SIGPIPE, current, 0);
48407 if (!ret)
48408 ret = -EPIPE;
48409@@ -597,9 +597,9 @@ redo2:
48410 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
48411 do_wakeup = 0;
48412 }
48413- pipe->waiting_writers++;
48414+ atomic_inc(&pipe->waiting_writers);
48415 pipe_wait(pipe);
48416- pipe->waiting_writers--;
48417+ atomic_dec(&pipe->waiting_writers);
48418 }
48419 out:
48420 mutex_unlock(&inode->i_mutex);
48421@@ -666,7 +666,7 @@ pipe_poll(struct file *filp, poll_table
48422 mask = 0;
48423 if (filp->f_mode & FMODE_READ) {
48424 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
48425- if (!pipe->writers && filp->f_version != pipe->w_counter)
48426+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
48427 mask |= POLLHUP;
48428 }
48429
48430@@ -676,7 +676,7 @@ pipe_poll(struct file *filp, poll_table
48431 * Most Unices do not set POLLERR for FIFOs but on Linux they
48432 * behave exactly like pipes for poll().
48433 */
48434- if (!pipe->readers)
48435+ if (!atomic_read(&pipe->readers))
48436 mask |= POLLERR;
48437 }
48438
48439@@ -690,10 +690,10 @@ pipe_release(struct inode *inode, int de
48440
48441 mutex_lock(&inode->i_mutex);
48442 pipe = inode->i_pipe;
48443- pipe->readers -= decr;
48444- pipe->writers -= decw;
48445+ atomic_sub(decr, &pipe->readers);
48446+ atomic_sub(decw, &pipe->writers);
48447
48448- if (!pipe->readers && !pipe->writers) {
48449+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
48450 free_pipe_info(inode);
48451 } else {
48452 wake_up_interruptible_sync(&pipe->wait);
48453@@ -783,7 +783,7 @@ pipe_read_open(struct inode *inode, stru
48454
48455 if (inode->i_pipe) {
48456 ret = 0;
48457- inode->i_pipe->readers++;
48458+ atomic_inc(&inode->i_pipe->readers);
48459 }
48460
48461 mutex_unlock(&inode->i_mutex);
48462@@ -800,7 +800,7 @@ pipe_write_open(struct inode *inode, str
48463
48464 if (inode->i_pipe) {
48465 ret = 0;
48466- inode->i_pipe->writers++;
48467+ atomic_inc(&inode->i_pipe->writers);
48468 }
48469
48470 mutex_unlock(&inode->i_mutex);
48471@@ -818,9 +818,9 @@ pipe_rdwr_open(struct inode *inode, stru
48472 if (inode->i_pipe) {
48473 ret = 0;
48474 if (filp->f_mode & FMODE_READ)
48475- inode->i_pipe->readers++;
48476+ atomic_inc(&inode->i_pipe->readers);
48477 if (filp->f_mode & FMODE_WRITE)
48478- inode->i_pipe->writers++;
48479+ atomic_inc(&inode->i_pipe->writers);
48480 }
48481
48482 mutex_unlock(&inode->i_mutex);
48483@@ -905,7 +905,7 @@ void free_pipe_info(struct inode *inode)
48484 inode->i_pipe = NULL;
48485 }
48486
48487-static struct vfsmount *pipe_mnt __read_mostly;
48488+struct vfsmount *pipe_mnt __read_mostly;
48489 static int pipefs_delete_dentry(struct dentry *dentry)
48490 {
48491 /*
48492@@ -945,7 +945,8 @@ static struct inode * get_pipe_inode(voi
48493 goto fail_iput;
48494 inode->i_pipe = pipe;
48495
48496- pipe->readers = pipe->writers = 1;
48497+ atomic_set(&pipe->readers, 1);
48498+ atomic_set(&pipe->writers, 1);
48499 inode->i_fop = &rdwr_pipefifo_fops;
48500
48501 /*
48502diff -urNp linux-2.6.32.48/fs/proc/array.c linux-2.6.32.48/fs/proc/array.c
48503--- linux-2.6.32.48/fs/proc/array.c 2011-11-08 19:02:43.000000000 -0500
48504+++ linux-2.6.32.48/fs/proc/array.c 2011-11-15 19:59:43.000000000 -0500
48505@@ -60,6 +60,7 @@
48506 #include <linux/tty.h>
48507 #include <linux/string.h>
48508 #include <linux/mman.h>
48509+#include <linux/grsecurity.h>
48510 #include <linux/proc_fs.h>
48511 #include <linux/ioport.h>
48512 #include <linux/uaccess.h>
48513@@ -321,6 +322,21 @@ static inline void task_context_switch_c
48514 p->nivcsw);
48515 }
48516
48517+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
48518+static inline void task_pax(struct seq_file *m, struct task_struct *p)
48519+{
48520+ if (p->mm)
48521+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
48522+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
48523+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
48524+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
48525+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
48526+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
48527+ else
48528+ seq_printf(m, "PaX:\t-----\n");
48529+}
48530+#endif
48531+
48532 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
48533 struct pid *pid, struct task_struct *task)
48534 {
48535@@ -337,9 +353,24 @@ int proc_pid_status(struct seq_file *m,
48536 task_cap(m, task);
48537 cpuset_task_status_allowed(m, task);
48538 task_context_switch_counts(m, task);
48539+
48540+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
48541+ task_pax(m, task);
48542+#endif
48543+
48544+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
48545+ task_grsec_rbac(m, task);
48546+#endif
48547+
48548 return 0;
48549 }
48550
48551+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48552+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
48553+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
48554+ _mm->pax_flags & MF_PAX_SEGMEXEC))
48555+#endif
48556+
48557 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
48558 struct pid *pid, struct task_struct *task, int whole)
48559 {
48560@@ -358,9 +389,11 @@ static int do_task_stat(struct seq_file
48561 cputime_t cutime, cstime, utime, stime;
48562 cputime_t cgtime, gtime;
48563 unsigned long rsslim = 0;
48564- char tcomm[sizeof(task->comm)];
48565+ char tcomm[sizeof(task->comm)] = { 0 };
48566 unsigned long flags;
48567
48568+ pax_track_stack();
48569+
48570 state = *get_task_state(task);
48571 vsize = eip = esp = 0;
48572 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
48573@@ -433,6 +466,19 @@ static int do_task_stat(struct seq_file
48574 gtime = task_gtime(task);
48575 }
48576
48577+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48578+ if (PAX_RAND_FLAGS(mm)) {
48579+ eip = 0;
48580+ esp = 0;
48581+ wchan = 0;
48582+ }
48583+#endif
48584+#ifdef CONFIG_GRKERNSEC_HIDESYM
48585+ wchan = 0;
48586+ eip =0;
48587+ esp =0;
48588+#endif
48589+
48590 /* scale priority and nice values from timeslices to -20..20 */
48591 /* to make it look like a "normal" Unix priority/nice value */
48592 priority = task_prio(task);
48593@@ -473,9 +519,15 @@ static int do_task_stat(struct seq_file
48594 vsize,
48595 mm ? get_mm_rss(mm) : 0,
48596 rsslim,
48597+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48598+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
48599+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
48600+ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
48601+#else
48602 mm ? (permitted ? mm->start_code : 1) : 0,
48603 mm ? (permitted ? mm->end_code : 1) : 0,
48604 (permitted && mm) ? mm->start_stack : 0,
48605+#endif
48606 esp,
48607 eip,
48608 /* The signal information here is obsolete.
48609@@ -528,3 +580,18 @@ int proc_pid_statm(struct seq_file *m, s
48610
48611 return 0;
48612 }
48613+
48614+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
48615+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
48616+{
48617+ u32 curr_ip = 0;
48618+ unsigned long flags;
48619+
48620+ if (lock_task_sighand(task, &flags)) {
48621+ curr_ip = task->signal->curr_ip;
48622+ unlock_task_sighand(task, &flags);
48623+ }
48624+
48625+ return sprintf(buffer, "%pI4\n", &curr_ip);
48626+}
48627+#endif
48628diff -urNp linux-2.6.32.48/fs/proc/base.c linux-2.6.32.48/fs/proc/base.c
48629--- linux-2.6.32.48/fs/proc/base.c 2011-11-08 19:02:43.000000000 -0500
48630+++ linux-2.6.32.48/fs/proc/base.c 2011-11-15 19:59:43.000000000 -0500
48631@@ -102,6 +102,22 @@ struct pid_entry {
48632 union proc_op op;
48633 };
48634
48635+struct getdents_callback {
48636+ struct linux_dirent __user * current_dir;
48637+ struct linux_dirent __user * previous;
48638+ struct file * file;
48639+ int count;
48640+ int error;
48641+};
48642+
48643+static int gr_fake_filldir(void * __buf, const char *name, int namlen,
48644+ loff_t offset, u64 ino, unsigned int d_type)
48645+{
48646+ struct getdents_callback * buf = (struct getdents_callback *) __buf;
48647+ buf->error = -EINVAL;
48648+ return 0;
48649+}
48650+
48651 #define NOD(NAME, MODE, IOP, FOP, OP) { \
48652 .name = (NAME), \
48653 .len = sizeof(NAME) - 1, \
48654@@ -213,6 +229,9 @@ static int check_mem_permission(struct t
48655 if (task == current)
48656 return 0;
48657
48658+ if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
48659+ return -EPERM;
48660+
48661 /*
48662 * If current is actively ptrace'ing, and would also be
48663 * permitted to freshly attach with ptrace now, permit it.
48664@@ -260,6 +279,9 @@ static int proc_pid_cmdline(struct task_
48665 if (!mm->arg_end)
48666 goto out_mm; /* Shh! No looking before we're done */
48667
48668+ if (gr_acl_handle_procpidmem(task))
48669+ goto out_mm;
48670+
48671 len = mm->arg_end - mm->arg_start;
48672
48673 if (len > PAGE_SIZE)
48674@@ -287,12 +309,28 @@ out:
48675 return res;
48676 }
48677
48678+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48679+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
48680+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
48681+ _mm->pax_flags & MF_PAX_SEGMEXEC))
48682+#endif
48683+
48684 static int proc_pid_auxv(struct task_struct *task, char *buffer)
48685 {
48686 int res = 0;
48687 struct mm_struct *mm = get_task_mm(task);
48688 if (mm) {
48689 unsigned int nwords = 0;
48690+
48691+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48692+ /* allow if we're currently ptracing this task */
48693+ if (PAX_RAND_FLAGS(mm) &&
48694+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
48695+ mmput(mm);
48696+ return 0;
48697+ }
48698+#endif
48699+
48700 do {
48701 nwords += 2;
48702 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
48703@@ -306,7 +344,7 @@ static int proc_pid_auxv(struct task_str
48704 }
48705
48706
48707-#ifdef CONFIG_KALLSYMS
48708+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48709 /*
48710 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
48711 * Returns the resolved symbol. If that fails, simply return the address.
48712@@ -345,7 +383,7 @@ static void unlock_trace(struct task_str
48713 mutex_unlock(&task->cred_guard_mutex);
48714 }
48715
48716-#ifdef CONFIG_STACKTRACE
48717+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48718
48719 #define MAX_STACK_TRACE_DEPTH 64
48720
48721@@ -545,7 +583,7 @@ static int proc_pid_limits(struct task_s
48722 return count;
48723 }
48724
48725-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
48726+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
48727 static int proc_pid_syscall(struct task_struct *task, char *buffer)
48728 {
48729 long nr;
48730@@ -574,7 +612,7 @@ static int proc_pid_syscall(struct task_
48731 /************************************************************************/
48732
48733 /* permission checks */
48734-static int proc_fd_access_allowed(struct inode *inode)
48735+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
48736 {
48737 struct task_struct *task;
48738 int allowed = 0;
48739@@ -584,7 +622,10 @@ static int proc_fd_access_allowed(struct
48740 */
48741 task = get_proc_task(inode);
48742 if (task) {
48743- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
48744+ if (log)
48745+ allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
48746+ else
48747+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
48748 put_task_struct(task);
48749 }
48750 return allowed;
48751@@ -963,6 +1004,9 @@ static ssize_t environ_read(struct file
48752 if (!task)
48753 goto out_no_task;
48754
48755+ if (gr_acl_handle_procpidmem(task))
48756+ goto out;
48757+
48758 if (!ptrace_may_access(task, PTRACE_MODE_READ))
48759 goto out;
48760
48761@@ -1377,7 +1421,7 @@ static void *proc_pid_follow_link(struct
48762 path_put(&nd->path);
48763
48764 /* Are we allowed to snoop on the tasks file descriptors? */
48765- if (!proc_fd_access_allowed(inode))
48766+ if (!proc_fd_access_allowed(inode,0))
48767 goto out;
48768
48769 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
48770@@ -1417,8 +1461,18 @@ static int proc_pid_readlink(struct dent
48771 struct path path;
48772
48773 /* Are we allowed to snoop on the tasks file descriptors? */
48774- if (!proc_fd_access_allowed(inode))
48775- goto out;
48776+ /* logging this is needed for learning on chromium to work properly,
48777+ but we don't want to flood the logs from 'ps' which does a readlink
48778+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
48779+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
48780+ */
48781+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
48782+ if (!proc_fd_access_allowed(inode,0))
48783+ goto out;
48784+ } else {
48785+ if (!proc_fd_access_allowed(inode,1))
48786+ goto out;
48787+ }
48788
48789 error = PROC_I(inode)->op.proc_get_link(inode, &path);
48790 if (error)
48791@@ -1483,7 +1537,11 @@ static struct inode *proc_pid_make_inode
48792 rcu_read_lock();
48793 cred = __task_cred(task);
48794 inode->i_uid = cred->euid;
48795+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48796+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48797+#else
48798 inode->i_gid = cred->egid;
48799+#endif
48800 rcu_read_unlock();
48801 }
48802 security_task_to_inode(task, inode);
48803@@ -1501,6 +1559,9 @@ static int pid_getattr(struct vfsmount *
48804 struct inode *inode = dentry->d_inode;
48805 struct task_struct *task;
48806 const struct cred *cred;
48807+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48808+ const struct cred *tmpcred = current_cred();
48809+#endif
48810
48811 generic_fillattr(inode, stat);
48812
48813@@ -1508,13 +1569,41 @@ static int pid_getattr(struct vfsmount *
48814 stat->uid = 0;
48815 stat->gid = 0;
48816 task = pid_task(proc_pid(inode), PIDTYPE_PID);
48817+
48818+ if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
48819+ rcu_read_unlock();
48820+ return -ENOENT;
48821+ }
48822+
48823 if (task) {
48824+ cred = __task_cred(task);
48825+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48826+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
48827+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48828+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
48829+#endif
48830+ ) {
48831+#endif
48832 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
48833+#ifdef CONFIG_GRKERNSEC_PROC_USER
48834+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
48835+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48836+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
48837+#endif
48838 task_dumpable(task)) {
48839- cred = __task_cred(task);
48840 stat->uid = cred->euid;
48841+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48842+ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
48843+#else
48844 stat->gid = cred->egid;
48845+#endif
48846 }
48847+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48848+ } else {
48849+ rcu_read_unlock();
48850+ return -ENOENT;
48851+ }
48852+#endif
48853 }
48854 rcu_read_unlock();
48855 return 0;
48856@@ -1545,11 +1634,20 @@ static int pid_revalidate(struct dentry
48857
48858 if (task) {
48859 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
48860+#ifdef CONFIG_GRKERNSEC_PROC_USER
48861+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
48862+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48863+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
48864+#endif
48865 task_dumpable(task)) {
48866 rcu_read_lock();
48867 cred = __task_cred(task);
48868 inode->i_uid = cred->euid;
48869+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48870+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48871+#else
48872 inode->i_gid = cred->egid;
48873+#endif
48874 rcu_read_unlock();
48875 } else {
48876 inode->i_uid = 0;
48877@@ -1670,7 +1768,8 @@ static int proc_fd_info(struct inode *in
48878 int fd = proc_fd(inode);
48879
48880 if (task) {
48881- files = get_files_struct(task);
48882+ if (!gr_acl_handle_procpidmem(task))
48883+ files = get_files_struct(task);
48884 put_task_struct(task);
48885 }
48886 if (files) {
48887@@ -1922,12 +2021,22 @@ static const struct file_operations proc
48888 static int proc_fd_permission(struct inode *inode, int mask)
48889 {
48890 int rv;
48891+ struct task_struct *task;
48892
48893 rv = generic_permission(inode, mask, NULL);
48894- if (rv == 0)
48895- return 0;
48896+
48897 if (task_pid(current) == proc_pid(inode))
48898 rv = 0;
48899+
48900+ task = get_proc_task(inode);
48901+ if (task == NULL)
48902+ return rv;
48903+
48904+ if (gr_acl_handle_procpidmem(task))
48905+ rv = -EACCES;
48906+
48907+ put_task_struct(task);
48908+
48909 return rv;
48910 }
48911
48912@@ -2036,6 +2145,9 @@ static struct dentry *proc_pident_lookup
48913 if (!task)
48914 goto out_no_task;
48915
48916+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
48917+ goto out;
48918+
48919 /*
48920 * Yes, it does not scale. And it should not. Don't add
48921 * new entries into /proc/<tgid>/ without very good reasons.
48922@@ -2080,6 +2192,9 @@ static int proc_pident_readdir(struct fi
48923 if (!task)
48924 goto out_no_task;
48925
48926+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
48927+ goto out;
48928+
48929 ret = 0;
48930 i = filp->f_pos;
48931 switch (i) {
48932@@ -2347,7 +2462,7 @@ static void *proc_self_follow_link(struc
48933 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
48934 void *cookie)
48935 {
48936- char *s = nd_get_link(nd);
48937+ const char *s = nd_get_link(nd);
48938 if (!IS_ERR(s))
48939 __putname(s);
48940 }
48941@@ -2553,7 +2668,7 @@ static const struct pid_entry tgid_base_
48942 #ifdef CONFIG_SCHED_DEBUG
48943 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
48944 #endif
48945-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
48946+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
48947 INF("syscall", S_IRUGO, proc_pid_syscall),
48948 #endif
48949 INF("cmdline", S_IRUGO, proc_pid_cmdline),
48950@@ -2578,10 +2693,10 @@ static const struct pid_entry tgid_base_
48951 #ifdef CONFIG_SECURITY
48952 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
48953 #endif
48954-#ifdef CONFIG_KALLSYMS
48955+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48956 INF("wchan", S_IRUGO, proc_pid_wchan),
48957 #endif
48958-#ifdef CONFIG_STACKTRACE
48959+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48960 ONE("stack", S_IRUGO, proc_pid_stack),
48961 #endif
48962 #ifdef CONFIG_SCHEDSTATS
48963@@ -2611,6 +2726,9 @@ static const struct pid_entry tgid_base_
48964 #ifdef CONFIG_TASK_IO_ACCOUNTING
48965 INF("io", S_IRUSR, proc_tgid_io_accounting),
48966 #endif
48967+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
48968+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
48969+#endif
48970 };
48971
48972 static int proc_tgid_base_readdir(struct file * filp,
48973@@ -2735,7 +2853,14 @@ static struct dentry *proc_pid_instantia
48974 if (!inode)
48975 goto out;
48976
48977+#ifdef CONFIG_GRKERNSEC_PROC_USER
48978+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
48979+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48980+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48981+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
48982+#else
48983 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
48984+#endif
48985 inode->i_op = &proc_tgid_base_inode_operations;
48986 inode->i_fop = &proc_tgid_base_operations;
48987 inode->i_flags|=S_IMMUTABLE;
48988@@ -2777,7 +2902,14 @@ struct dentry *proc_pid_lookup(struct in
48989 if (!task)
48990 goto out;
48991
48992+ if (!has_group_leader_pid(task))
48993+ goto out_put_task;
48994+
48995+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
48996+ goto out_put_task;
48997+
48998 result = proc_pid_instantiate(dir, dentry, task, NULL);
48999+out_put_task:
49000 put_task_struct(task);
49001 out:
49002 return result;
49003@@ -2842,6 +2974,11 @@ int proc_pid_readdir(struct file * filp,
49004 {
49005 unsigned int nr;
49006 struct task_struct *reaper;
49007+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
49008+ const struct cred *tmpcred = current_cred();
49009+ const struct cred *itercred;
49010+#endif
49011+ filldir_t __filldir = filldir;
49012 struct tgid_iter iter;
49013 struct pid_namespace *ns;
49014
49015@@ -2865,8 +3002,27 @@ int proc_pid_readdir(struct file * filp,
49016 for (iter = next_tgid(ns, iter);
49017 iter.task;
49018 iter.tgid += 1, iter = next_tgid(ns, iter)) {
49019+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
49020+ rcu_read_lock();
49021+ itercred = __task_cred(iter.task);
49022+#endif
49023+ if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
49024+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
49025+ || (tmpcred->uid && (itercred->uid != tmpcred->uid)
49026+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
49027+ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
49028+#endif
49029+ )
49030+#endif
49031+ )
49032+ __filldir = &gr_fake_filldir;
49033+ else
49034+ __filldir = filldir;
49035+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
49036+ rcu_read_unlock();
49037+#endif
49038 filp->f_pos = iter.tgid + TGID_OFFSET;
49039- if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
49040+ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
49041 put_task_struct(iter.task);
49042 goto out;
49043 }
49044@@ -2892,7 +3048,7 @@ static const struct pid_entry tid_base_s
49045 #ifdef CONFIG_SCHED_DEBUG
49046 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
49047 #endif
49048-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
49049+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
49050 INF("syscall", S_IRUGO, proc_pid_syscall),
49051 #endif
49052 INF("cmdline", S_IRUGO, proc_pid_cmdline),
49053@@ -2916,10 +3072,10 @@ static const struct pid_entry tid_base_s
49054 #ifdef CONFIG_SECURITY
49055 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
49056 #endif
49057-#ifdef CONFIG_KALLSYMS
49058+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
49059 INF("wchan", S_IRUGO, proc_pid_wchan),
49060 #endif
49061-#ifdef CONFIG_STACKTRACE
49062+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
49063 ONE("stack", S_IRUGO, proc_pid_stack),
49064 #endif
49065 #ifdef CONFIG_SCHEDSTATS
49066diff -urNp linux-2.6.32.48/fs/proc/cmdline.c linux-2.6.32.48/fs/proc/cmdline.c
49067--- linux-2.6.32.48/fs/proc/cmdline.c 2011-11-08 19:02:43.000000000 -0500
49068+++ linux-2.6.32.48/fs/proc/cmdline.c 2011-11-15 19:59:43.000000000 -0500
49069@@ -23,7 +23,11 @@ static const struct file_operations cmdl
49070
49071 static int __init proc_cmdline_init(void)
49072 {
49073+#ifdef CONFIG_GRKERNSEC_PROC_ADD
49074+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
49075+#else
49076 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
49077+#endif
49078 return 0;
49079 }
49080 module_init(proc_cmdline_init);
49081diff -urNp linux-2.6.32.48/fs/proc/devices.c linux-2.6.32.48/fs/proc/devices.c
49082--- linux-2.6.32.48/fs/proc/devices.c 2011-11-08 19:02:43.000000000 -0500
49083+++ linux-2.6.32.48/fs/proc/devices.c 2011-11-15 19:59:43.000000000 -0500
49084@@ -64,7 +64,11 @@ static const struct file_operations proc
49085
49086 static int __init proc_devices_init(void)
49087 {
49088+#ifdef CONFIG_GRKERNSEC_PROC_ADD
49089+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
49090+#else
49091 proc_create("devices", 0, NULL, &proc_devinfo_operations);
49092+#endif
49093 return 0;
49094 }
49095 module_init(proc_devices_init);
49096diff -urNp linux-2.6.32.48/fs/proc/inode.c linux-2.6.32.48/fs/proc/inode.c
49097--- linux-2.6.32.48/fs/proc/inode.c 2011-11-08 19:02:43.000000000 -0500
49098+++ linux-2.6.32.48/fs/proc/inode.c 2011-11-15 19:59:43.000000000 -0500
49099@@ -18,12 +18,19 @@
49100 #include <linux/module.h>
49101 #include <linux/smp_lock.h>
49102 #include <linux/sysctl.h>
49103+#include <linux/grsecurity.h>
49104
49105 #include <asm/system.h>
49106 #include <asm/uaccess.h>
49107
49108 #include "internal.h"
49109
49110+#ifdef CONFIG_PROC_SYSCTL
49111+extern const struct inode_operations proc_sys_inode_operations;
49112+extern const struct inode_operations proc_sys_dir_operations;
49113+#endif
49114+
49115+
49116 struct proc_dir_entry *de_get(struct proc_dir_entry *de)
49117 {
49118 atomic_inc(&de->count);
49119@@ -62,6 +69,13 @@ static void proc_delete_inode(struct ino
49120 de_put(de);
49121 if (PROC_I(inode)->sysctl)
49122 sysctl_head_put(PROC_I(inode)->sysctl);
49123+
49124+#ifdef CONFIG_PROC_SYSCTL
49125+ if (inode->i_op == &proc_sys_inode_operations ||
49126+ inode->i_op == &proc_sys_dir_operations)
49127+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
49128+#endif
49129+
49130 clear_inode(inode);
49131 }
49132
49133@@ -457,7 +471,11 @@ struct inode *proc_get_inode(struct supe
49134 if (de->mode) {
49135 inode->i_mode = de->mode;
49136 inode->i_uid = de->uid;
49137+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
49138+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
49139+#else
49140 inode->i_gid = de->gid;
49141+#endif
49142 }
49143 if (de->size)
49144 inode->i_size = de->size;
49145diff -urNp linux-2.6.32.48/fs/proc/internal.h linux-2.6.32.48/fs/proc/internal.h
49146--- linux-2.6.32.48/fs/proc/internal.h 2011-11-08 19:02:43.000000000 -0500
49147+++ linux-2.6.32.48/fs/proc/internal.h 2011-11-15 19:59:43.000000000 -0500
49148@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
49149 struct pid *pid, struct task_struct *task);
49150 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
49151 struct pid *pid, struct task_struct *task);
49152+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
49153+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
49154+#endif
49155 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
49156
49157 extern const struct file_operations proc_maps_operations;
49158diff -urNp linux-2.6.32.48/fs/proc/Kconfig linux-2.6.32.48/fs/proc/Kconfig
49159--- linux-2.6.32.48/fs/proc/Kconfig 2011-11-08 19:02:43.000000000 -0500
49160+++ linux-2.6.32.48/fs/proc/Kconfig 2011-11-15 19:59:43.000000000 -0500
49161@@ -30,12 +30,12 @@ config PROC_FS
49162
49163 config PROC_KCORE
49164 bool "/proc/kcore support" if !ARM
49165- depends on PROC_FS && MMU
49166+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
49167
49168 config PROC_VMCORE
49169 bool "/proc/vmcore support (EXPERIMENTAL)"
49170- depends on PROC_FS && CRASH_DUMP
49171- default y
49172+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
49173+ default n
49174 help
49175 Exports the dump image of crashed kernel in ELF format.
49176
49177@@ -59,8 +59,8 @@ config PROC_SYSCTL
49178 limited in memory.
49179
49180 config PROC_PAGE_MONITOR
49181- default y
49182- depends on PROC_FS && MMU
49183+ default n
49184+ depends on PROC_FS && MMU && !GRKERNSEC
49185 bool "Enable /proc page monitoring" if EMBEDDED
49186 help
49187 Various /proc files exist to monitor process memory utilization:
49188diff -urNp linux-2.6.32.48/fs/proc/kcore.c linux-2.6.32.48/fs/proc/kcore.c
49189--- linux-2.6.32.48/fs/proc/kcore.c 2011-11-08 19:02:43.000000000 -0500
49190+++ linux-2.6.32.48/fs/proc/kcore.c 2011-11-15 19:59:43.000000000 -0500
49191@@ -320,6 +320,8 @@ static void elf_kcore_store_hdr(char *bu
49192 off_t offset = 0;
49193 struct kcore_list *m;
49194
49195+ pax_track_stack();
49196+
49197 /* setup ELF header */
49198 elf = (struct elfhdr *) bufp;
49199 bufp += sizeof(struct elfhdr);
49200@@ -477,9 +479,10 @@ read_kcore(struct file *file, char __use
49201 * the addresses in the elf_phdr on our list.
49202 */
49203 start = kc_offset_to_vaddr(*fpos - elf_buflen);
49204- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
49205+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
49206+ if (tsz > buflen)
49207 tsz = buflen;
49208-
49209+
49210 while (buflen) {
49211 struct kcore_list *m;
49212
49213@@ -508,20 +511,23 @@ read_kcore(struct file *file, char __use
49214 kfree(elf_buf);
49215 } else {
49216 if (kern_addr_valid(start)) {
49217- unsigned long n;
49218+ char *elf_buf;
49219+ mm_segment_t oldfs;
49220
49221- n = copy_to_user(buffer, (char *)start, tsz);
49222- /*
49223- * We cannot distingush between fault on source
49224- * and fault on destination. When this happens
49225- * we clear too and hope it will trigger the
49226- * EFAULT again.
49227- */
49228- if (n) {
49229- if (clear_user(buffer + tsz - n,
49230- n))
49231+ elf_buf = kmalloc(tsz, GFP_KERNEL);
49232+ if (!elf_buf)
49233+ return -ENOMEM;
49234+ oldfs = get_fs();
49235+ set_fs(KERNEL_DS);
49236+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
49237+ set_fs(oldfs);
49238+ if (copy_to_user(buffer, elf_buf, tsz)) {
49239+ kfree(elf_buf);
49240 return -EFAULT;
49241+ }
49242 }
49243+ set_fs(oldfs);
49244+ kfree(elf_buf);
49245 } else {
49246 if (clear_user(buffer, tsz))
49247 return -EFAULT;
49248@@ -541,6 +547,9 @@ read_kcore(struct file *file, char __use
49249
49250 static int open_kcore(struct inode *inode, struct file *filp)
49251 {
49252+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
49253+ return -EPERM;
49254+#endif
49255 if (!capable(CAP_SYS_RAWIO))
49256 return -EPERM;
49257 if (kcore_need_update)
49258diff -urNp linux-2.6.32.48/fs/proc/meminfo.c linux-2.6.32.48/fs/proc/meminfo.c
49259--- linux-2.6.32.48/fs/proc/meminfo.c 2011-11-08 19:02:43.000000000 -0500
49260+++ linux-2.6.32.48/fs/proc/meminfo.c 2011-11-15 19:59:43.000000000 -0500
49261@@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
49262 unsigned long pages[NR_LRU_LISTS];
49263 int lru;
49264
49265+ pax_track_stack();
49266+
49267 /*
49268 * display in kilobytes.
49269 */
49270@@ -149,7 +151,7 @@ static int meminfo_proc_show(struct seq_
49271 vmi.used >> 10,
49272 vmi.largest_chunk >> 10
49273 #ifdef CONFIG_MEMORY_FAILURE
49274- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
49275+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
49276 #endif
49277 );
49278
49279diff -urNp linux-2.6.32.48/fs/proc/nommu.c linux-2.6.32.48/fs/proc/nommu.c
49280--- linux-2.6.32.48/fs/proc/nommu.c 2011-11-08 19:02:43.000000000 -0500
49281+++ linux-2.6.32.48/fs/proc/nommu.c 2011-11-15 19:59:43.000000000 -0500
49282@@ -67,7 +67,7 @@ static int nommu_region_show(struct seq_
49283 if (len < 1)
49284 len = 1;
49285 seq_printf(m, "%*c", len, ' ');
49286- seq_path(m, &file->f_path, "");
49287+ seq_path(m, &file->f_path, "\n\\");
49288 }
49289
49290 seq_putc(m, '\n');
49291diff -urNp linux-2.6.32.48/fs/proc/proc_net.c linux-2.6.32.48/fs/proc/proc_net.c
49292--- linux-2.6.32.48/fs/proc/proc_net.c 2011-11-08 19:02:43.000000000 -0500
49293+++ linux-2.6.32.48/fs/proc/proc_net.c 2011-11-15 19:59:43.000000000 -0500
49294@@ -104,6 +104,17 @@ static struct net *get_proc_task_net(str
49295 struct task_struct *task;
49296 struct nsproxy *ns;
49297 struct net *net = NULL;
49298+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
49299+ const struct cred *cred = current_cred();
49300+#endif
49301+
49302+#ifdef CONFIG_GRKERNSEC_PROC_USER
49303+ if (cred->fsuid)
49304+ return net;
49305+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
49306+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
49307+ return net;
49308+#endif
49309
49310 rcu_read_lock();
49311 task = pid_task(proc_pid(dir), PIDTYPE_PID);
49312diff -urNp linux-2.6.32.48/fs/proc/proc_sysctl.c linux-2.6.32.48/fs/proc/proc_sysctl.c
49313--- linux-2.6.32.48/fs/proc/proc_sysctl.c 2011-11-08 19:02:43.000000000 -0500
49314+++ linux-2.6.32.48/fs/proc/proc_sysctl.c 2011-11-15 19:59:43.000000000 -0500
49315@@ -7,11 +7,13 @@
49316 #include <linux/security.h>
49317 #include "internal.h"
49318
49319+extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
49320+
49321 static const struct dentry_operations proc_sys_dentry_operations;
49322 static const struct file_operations proc_sys_file_operations;
49323-static const struct inode_operations proc_sys_inode_operations;
49324+const struct inode_operations proc_sys_inode_operations;
49325 static const struct file_operations proc_sys_dir_file_operations;
49326-static const struct inode_operations proc_sys_dir_operations;
49327+const struct inode_operations proc_sys_dir_operations;
49328
49329 static struct inode *proc_sys_make_inode(struct super_block *sb,
49330 struct ctl_table_header *head, struct ctl_table *table)
49331@@ -109,6 +111,9 @@ static struct dentry *proc_sys_lookup(st
49332 if (!p)
49333 goto out;
49334
49335+ if (gr_handle_sysctl(p, MAY_EXEC))
49336+ goto out;
49337+
49338 err = ERR_PTR(-ENOMEM);
49339 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
49340 if (h)
49341@@ -119,6 +124,9 @@ static struct dentry *proc_sys_lookup(st
49342
49343 err = NULL;
49344 dentry->d_op = &proc_sys_dentry_operations;
49345+
49346+ gr_handle_proc_create(dentry, inode);
49347+
49348 d_add(dentry, inode);
49349
49350 out:
49351@@ -200,6 +208,9 @@ static int proc_sys_fill_cache(struct fi
49352 return -ENOMEM;
49353 } else {
49354 child->d_op = &proc_sys_dentry_operations;
49355+
49356+ gr_handle_proc_create(child, inode);
49357+
49358 d_add(child, inode);
49359 }
49360 } else {
49361@@ -228,6 +239,9 @@ static int scan(struct ctl_table_header
49362 if (*pos < file->f_pos)
49363 continue;
49364
49365+ if (gr_handle_sysctl(table, 0))
49366+ continue;
49367+
49368 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
49369 if (res)
49370 return res;
49371@@ -344,6 +358,9 @@ static int proc_sys_getattr(struct vfsmo
49372 if (IS_ERR(head))
49373 return PTR_ERR(head);
49374
49375+ if (table && gr_handle_sysctl(table, MAY_EXEC))
49376+ return -ENOENT;
49377+
49378 generic_fillattr(inode, stat);
49379 if (table)
49380 stat->mode = (stat->mode & S_IFMT) | table->mode;
49381@@ -362,13 +379,13 @@ static const struct file_operations proc
49382 .llseek = generic_file_llseek,
49383 };
49384
49385-static const struct inode_operations proc_sys_inode_operations = {
49386+const struct inode_operations proc_sys_inode_operations = {
49387 .permission = proc_sys_permission,
49388 .setattr = proc_sys_setattr,
49389 .getattr = proc_sys_getattr,
49390 };
49391
49392-static const struct inode_operations proc_sys_dir_operations = {
49393+const struct inode_operations proc_sys_dir_operations = {
49394 .lookup = proc_sys_lookup,
49395 .permission = proc_sys_permission,
49396 .setattr = proc_sys_setattr,
49397diff -urNp linux-2.6.32.48/fs/proc/root.c linux-2.6.32.48/fs/proc/root.c
49398--- linux-2.6.32.48/fs/proc/root.c 2011-11-08 19:02:43.000000000 -0500
49399+++ linux-2.6.32.48/fs/proc/root.c 2011-11-15 19:59:43.000000000 -0500
49400@@ -134,7 +134,15 @@ void __init proc_root_init(void)
49401 #ifdef CONFIG_PROC_DEVICETREE
49402 proc_device_tree_init();
49403 #endif
49404+#ifdef CONFIG_GRKERNSEC_PROC_ADD
49405+#ifdef CONFIG_GRKERNSEC_PROC_USER
49406+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
49407+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
49408+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
49409+#endif
49410+#else
49411 proc_mkdir("bus", NULL);
49412+#endif
49413 proc_sys_init();
49414 }
49415
49416diff -urNp linux-2.6.32.48/fs/proc/task_mmu.c linux-2.6.32.48/fs/proc/task_mmu.c
49417--- linux-2.6.32.48/fs/proc/task_mmu.c 2011-11-08 19:02:43.000000000 -0500
49418+++ linux-2.6.32.48/fs/proc/task_mmu.c 2011-11-15 19:59:43.000000000 -0500
49419@@ -46,15 +46,26 @@ void task_mem(struct seq_file *m, struct
49420 "VmStk:\t%8lu kB\n"
49421 "VmExe:\t%8lu kB\n"
49422 "VmLib:\t%8lu kB\n"
49423- "VmPTE:\t%8lu kB\n",
49424- hiwater_vm << (PAGE_SHIFT-10),
49425+ "VmPTE:\t%8lu kB\n"
49426+
49427+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
49428+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
49429+#endif
49430+
49431+ ,hiwater_vm << (PAGE_SHIFT-10),
49432 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
49433 mm->locked_vm << (PAGE_SHIFT-10),
49434 hiwater_rss << (PAGE_SHIFT-10),
49435 total_rss << (PAGE_SHIFT-10),
49436 data << (PAGE_SHIFT-10),
49437 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
49438- (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
49439+ (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10
49440+
49441+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
49442+ , mm->context.user_cs_base, mm->context.user_cs_limit
49443+#endif
49444+
49445+ );
49446 }
49447
49448 unsigned long task_vsize(struct mm_struct *mm)
49449@@ -175,7 +186,8 @@ static void m_stop(struct seq_file *m, v
49450 struct proc_maps_private *priv = m->private;
49451 struct vm_area_struct *vma = v;
49452
49453- vma_stop(priv, vma);
49454+ if (!IS_ERR(vma))
49455+ vma_stop(priv, vma);
49456 if (priv->task)
49457 put_task_struct(priv->task);
49458 }
49459@@ -199,6 +211,12 @@ static int do_maps_open(struct inode *in
49460 return ret;
49461 }
49462
49463+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49464+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
49465+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
49466+ _mm->pax_flags & MF_PAX_SEGMEXEC))
49467+#endif
49468+
49469 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
49470 {
49471 struct mm_struct *mm = vma->vm_mm;
49472@@ -206,7 +224,6 @@ static void show_map_vma(struct seq_file
49473 int flags = vma->vm_flags;
49474 unsigned long ino = 0;
49475 unsigned long long pgoff = 0;
49476- unsigned long start;
49477 dev_t dev = 0;
49478 int len;
49479
49480@@ -217,20 +234,23 @@ static void show_map_vma(struct seq_file
49481 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
49482 }
49483
49484- /* We don't show the stack guard page in /proc/maps */
49485- start = vma->vm_start;
49486- if (vma->vm_flags & VM_GROWSDOWN)
49487- if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
49488- start += PAGE_SIZE;
49489-
49490 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
49491- start,
49492+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49493+ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start,
49494+ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end,
49495+#else
49496+ vma->vm_start,
49497 vma->vm_end,
49498+#endif
49499 flags & VM_READ ? 'r' : '-',
49500 flags & VM_WRITE ? 'w' : '-',
49501 flags & VM_EXEC ? 'x' : '-',
49502 flags & VM_MAYSHARE ? 's' : 'p',
49503+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49504+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
49505+#else
49506 pgoff,
49507+#endif
49508 MAJOR(dev), MINOR(dev), ino, &len);
49509
49510 /*
49511@@ -239,7 +259,7 @@ static void show_map_vma(struct seq_file
49512 */
49513 if (file) {
49514 pad_len_spaces(m, len);
49515- seq_path(m, &file->f_path, "\n");
49516+ seq_path(m, &file->f_path, "\n\\");
49517 } else {
49518 const char *name = arch_vma_name(vma);
49519 if (!name) {
49520@@ -247,8 +267,9 @@ static void show_map_vma(struct seq_file
49521 if (vma->vm_start <= mm->brk &&
49522 vma->vm_end >= mm->start_brk) {
49523 name = "[heap]";
49524- } else if (vma->vm_start <= mm->start_stack &&
49525- vma->vm_end >= mm->start_stack) {
49526+ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
49527+ (vma->vm_start <= mm->start_stack &&
49528+ vma->vm_end >= mm->start_stack)) {
49529 name = "[stack]";
49530 }
49531 } else {
49532@@ -391,9 +412,16 @@ static int show_smap(struct seq_file *m,
49533 };
49534
49535 memset(&mss, 0, sizeof mss);
49536- mss.vma = vma;
49537- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
49538- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
49539+
49540+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49541+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
49542+#endif
49543+ mss.vma = vma;
49544+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
49545+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
49546+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49547+ }
49548+#endif
49549
49550 show_map_vma(m, vma);
49551
49552@@ -409,7 +437,11 @@ static int show_smap(struct seq_file *m,
49553 "Swap: %8lu kB\n"
49554 "KernelPageSize: %8lu kB\n"
49555 "MMUPageSize: %8lu kB\n",
49556+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49557+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
49558+#else
49559 (vma->vm_end - vma->vm_start) >> 10,
49560+#endif
49561 mss.resident >> 10,
49562 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
49563 mss.shared_clean >> 10,
49564diff -urNp linux-2.6.32.48/fs/proc/task_nommu.c linux-2.6.32.48/fs/proc/task_nommu.c
49565--- linux-2.6.32.48/fs/proc/task_nommu.c 2011-11-08 19:02:43.000000000 -0500
49566+++ linux-2.6.32.48/fs/proc/task_nommu.c 2011-11-15 19:59:43.000000000 -0500
49567@@ -50,7 +50,7 @@ void task_mem(struct seq_file *m, struct
49568 else
49569 bytes += kobjsize(mm);
49570
49571- if (current->fs && current->fs->users > 1)
49572+ if (current->fs && atomic_read(&current->fs->users) > 1)
49573 sbytes += kobjsize(current->fs);
49574 else
49575 bytes += kobjsize(current->fs);
49576@@ -154,7 +154,7 @@ static int nommu_vma_show(struct seq_fil
49577 if (len < 1)
49578 len = 1;
49579 seq_printf(m, "%*c", len, ' ');
49580- seq_path(m, &file->f_path, "");
49581+ seq_path(m, &file->f_path, "\n\\");
49582 }
49583
49584 seq_putc(m, '\n');
49585diff -urNp linux-2.6.32.48/fs/readdir.c linux-2.6.32.48/fs/readdir.c
49586--- linux-2.6.32.48/fs/readdir.c 2011-11-08 19:02:43.000000000 -0500
49587+++ linux-2.6.32.48/fs/readdir.c 2011-11-15 19:59:43.000000000 -0500
49588@@ -16,6 +16,7 @@
49589 #include <linux/security.h>
49590 #include <linux/syscalls.h>
49591 #include <linux/unistd.h>
49592+#include <linux/namei.h>
49593
49594 #include <asm/uaccess.h>
49595
49596@@ -67,6 +68,7 @@ struct old_linux_dirent {
49597
49598 struct readdir_callback {
49599 struct old_linux_dirent __user * dirent;
49600+ struct file * file;
49601 int result;
49602 };
49603
49604@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
49605 buf->result = -EOVERFLOW;
49606 return -EOVERFLOW;
49607 }
49608+
49609+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
49610+ return 0;
49611+
49612 buf->result++;
49613 dirent = buf->dirent;
49614 if (!access_ok(VERIFY_WRITE, dirent,
49615@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
49616
49617 buf.result = 0;
49618 buf.dirent = dirent;
49619+ buf.file = file;
49620
49621 error = vfs_readdir(file, fillonedir, &buf);
49622 if (buf.result)
49623@@ -142,6 +149,7 @@ struct linux_dirent {
49624 struct getdents_callback {
49625 struct linux_dirent __user * current_dir;
49626 struct linux_dirent __user * previous;
49627+ struct file * file;
49628 int count;
49629 int error;
49630 };
49631@@ -162,6 +170,10 @@ static int filldir(void * __buf, const c
49632 buf->error = -EOVERFLOW;
49633 return -EOVERFLOW;
49634 }
49635+
49636+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
49637+ return 0;
49638+
49639 dirent = buf->previous;
49640 if (dirent) {
49641 if (__put_user(offset, &dirent->d_off))
49642@@ -209,6 +221,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
49643 buf.previous = NULL;
49644 buf.count = count;
49645 buf.error = 0;
49646+ buf.file = file;
49647
49648 error = vfs_readdir(file, filldir, &buf);
49649 if (error >= 0)
49650@@ -228,6 +241,7 @@ out:
49651 struct getdents_callback64 {
49652 struct linux_dirent64 __user * current_dir;
49653 struct linux_dirent64 __user * previous;
49654+ struct file *file;
49655 int count;
49656 int error;
49657 };
49658@@ -242,6 +256,10 @@ static int filldir64(void * __buf, const
49659 buf->error = -EINVAL; /* only used if we fail.. */
49660 if (reclen > buf->count)
49661 return -EINVAL;
49662+
49663+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
49664+ return 0;
49665+
49666 dirent = buf->previous;
49667 if (dirent) {
49668 if (__put_user(offset, &dirent->d_off))
49669@@ -289,6 +307,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
49670
49671 buf.current_dir = dirent;
49672 buf.previous = NULL;
49673+ buf.file = file;
49674 buf.count = count;
49675 buf.error = 0;
49676
49677@@ -297,7 +316,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
49678 error = buf.error;
49679 lastdirent = buf.previous;
49680 if (lastdirent) {
49681- typeof(lastdirent->d_off) d_off = file->f_pos;
49682+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
49683 if (__put_user(d_off, &lastdirent->d_off))
49684 error = -EFAULT;
49685 else
49686diff -urNp linux-2.6.32.48/fs/reiserfs/dir.c linux-2.6.32.48/fs/reiserfs/dir.c
49687--- linux-2.6.32.48/fs/reiserfs/dir.c 2011-11-08 19:02:43.000000000 -0500
49688+++ linux-2.6.32.48/fs/reiserfs/dir.c 2011-11-15 19:59:43.000000000 -0500
49689@@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentr
49690 struct reiserfs_dir_entry de;
49691 int ret = 0;
49692
49693+ pax_track_stack();
49694+
49695 reiserfs_write_lock(inode->i_sb);
49696
49697 reiserfs_check_lock_depth(inode->i_sb, "readdir");
49698diff -urNp linux-2.6.32.48/fs/reiserfs/do_balan.c linux-2.6.32.48/fs/reiserfs/do_balan.c
49699--- linux-2.6.32.48/fs/reiserfs/do_balan.c 2011-11-08 19:02:43.000000000 -0500
49700+++ linux-2.6.32.48/fs/reiserfs/do_balan.c 2011-11-15 19:59:43.000000000 -0500
49701@@ -2058,7 +2058,7 @@ void do_balance(struct tree_balance *tb,
49702 return;
49703 }
49704
49705- atomic_inc(&(fs_generation(tb->tb_sb)));
49706+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
49707 do_balance_starts(tb);
49708
49709 /* balance leaf returns 0 except if combining L R and S into
49710diff -urNp linux-2.6.32.48/fs/reiserfs/item_ops.c linux-2.6.32.48/fs/reiserfs/item_ops.c
49711--- linux-2.6.32.48/fs/reiserfs/item_ops.c 2011-11-08 19:02:43.000000000 -0500
49712+++ linux-2.6.32.48/fs/reiserfs/item_ops.c 2011-11-15 19:59:43.000000000 -0500
49713@@ -102,7 +102,7 @@ static void sd_print_vi(struct virtual_i
49714 vi->vi_index, vi->vi_type, vi->vi_ih);
49715 }
49716
49717-static struct item_operations stat_data_ops = {
49718+static const struct item_operations stat_data_ops = {
49719 .bytes_number = sd_bytes_number,
49720 .decrement_key = sd_decrement_key,
49721 .is_left_mergeable = sd_is_left_mergeable,
49722@@ -196,7 +196,7 @@ static void direct_print_vi(struct virtu
49723 vi->vi_index, vi->vi_type, vi->vi_ih);
49724 }
49725
49726-static struct item_operations direct_ops = {
49727+static const struct item_operations direct_ops = {
49728 .bytes_number = direct_bytes_number,
49729 .decrement_key = direct_decrement_key,
49730 .is_left_mergeable = direct_is_left_mergeable,
49731@@ -341,7 +341,7 @@ static void indirect_print_vi(struct vir
49732 vi->vi_index, vi->vi_type, vi->vi_ih);
49733 }
49734
49735-static struct item_operations indirect_ops = {
49736+static const struct item_operations indirect_ops = {
49737 .bytes_number = indirect_bytes_number,
49738 .decrement_key = indirect_decrement_key,
49739 .is_left_mergeable = indirect_is_left_mergeable,
49740@@ -628,7 +628,7 @@ static void direntry_print_vi(struct vir
49741 printk("\n");
49742 }
49743
49744-static struct item_operations direntry_ops = {
49745+static const struct item_operations direntry_ops = {
49746 .bytes_number = direntry_bytes_number,
49747 .decrement_key = direntry_decrement_key,
49748 .is_left_mergeable = direntry_is_left_mergeable,
49749@@ -724,7 +724,7 @@ static void errcatch_print_vi(struct vir
49750 "Invalid item type observed, run fsck ASAP");
49751 }
49752
49753-static struct item_operations errcatch_ops = {
49754+static const struct item_operations errcatch_ops = {
49755 errcatch_bytes_number,
49756 errcatch_decrement_key,
49757 errcatch_is_left_mergeable,
49758@@ -746,7 +746,7 @@ static struct item_operations errcatch_o
49759 #error Item types must use disk-format assigned values.
49760 #endif
49761
49762-struct item_operations *item_ops[TYPE_ANY + 1] = {
49763+const struct item_operations * const item_ops[TYPE_ANY + 1] = {
49764 &stat_data_ops,
49765 &indirect_ops,
49766 &direct_ops,
49767diff -urNp linux-2.6.32.48/fs/reiserfs/journal.c linux-2.6.32.48/fs/reiserfs/journal.c
49768--- linux-2.6.32.48/fs/reiserfs/journal.c 2011-11-08 19:02:43.000000000 -0500
49769+++ linux-2.6.32.48/fs/reiserfs/journal.c 2011-11-15 19:59:43.000000000 -0500
49770@@ -2329,6 +2329,8 @@ static struct buffer_head *reiserfs_brea
49771 struct buffer_head *bh;
49772 int i, j;
49773
49774+ pax_track_stack();
49775+
49776 bh = __getblk(dev, block, bufsize);
49777 if (buffer_uptodate(bh))
49778 return (bh);
49779diff -urNp linux-2.6.32.48/fs/reiserfs/namei.c linux-2.6.32.48/fs/reiserfs/namei.c
49780--- linux-2.6.32.48/fs/reiserfs/namei.c 2011-11-08 19:02:43.000000000 -0500
49781+++ linux-2.6.32.48/fs/reiserfs/namei.c 2011-11-15 19:59:43.000000000 -0500
49782@@ -1214,6 +1214,8 @@ static int reiserfs_rename(struct inode
49783 unsigned long savelink = 1;
49784 struct timespec ctime;
49785
49786+ pax_track_stack();
49787+
49788 /* three balancings: (1) old name removal, (2) new name insertion
49789 and (3) maybe "save" link insertion
49790 stat data updates: (1) old directory,
49791diff -urNp linux-2.6.32.48/fs/reiserfs/procfs.c linux-2.6.32.48/fs/reiserfs/procfs.c
49792--- linux-2.6.32.48/fs/reiserfs/procfs.c 2011-11-08 19:02:43.000000000 -0500
49793+++ linux-2.6.32.48/fs/reiserfs/procfs.c 2011-11-15 19:59:43.000000000 -0500
49794@@ -123,7 +123,7 @@ static int show_super(struct seq_file *m
49795 "SMALL_TAILS " : "NO_TAILS ",
49796 replay_only(sb) ? "REPLAY_ONLY " : "",
49797 convert_reiserfs(sb) ? "CONV " : "",
49798- atomic_read(&r->s_generation_counter),
49799+ atomic_read_unchecked(&r->s_generation_counter),
49800 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
49801 SF(s_do_balance), SF(s_unneeded_left_neighbor),
49802 SF(s_good_search_by_key_reada), SF(s_bmaps),
49803@@ -309,6 +309,8 @@ static int show_journal(struct seq_file
49804 struct journal_params *jp = &rs->s_v1.s_journal;
49805 char b[BDEVNAME_SIZE];
49806
49807+ pax_track_stack();
49808+
49809 seq_printf(m, /* on-disk fields */
49810 "jp_journal_1st_block: \t%i\n"
49811 "jp_journal_dev: \t%s[%x]\n"
49812diff -urNp linux-2.6.32.48/fs/reiserfs/stree.c linux-2.6.32.48/fs/reiserfs/stree.c
49813--- linux-2.6.32.48/fs/reiserfs/stree.c 2011-11-08 19:02:43.000000000 -0500
49814+++ linux-2.6.32.48/fs/reiserfs/stree.c 2011-11-15 19:59:43.000000000 -0500
49815@@ -1159,6 +1159,8 @@ int reiserfs_delete_item(struct reiserfs
49816 int iter = 0;
49817 #endif
49818
49819+ pax_track_stack();
49820+
49821 BUG_ON(!th->t_trans_id);
49822
49823 init_tb_struct(th, &s_del_balance, sb, path,
49824@@ -1296,6 +1298,8 @@ void reiserfs_delete_solid_item(struct r
49825 int retval;
49826 int quota_cut_bytes = 0;
49827
49828+ pax_track_stack();
49829+
49830 BUG_ON(!th->t_trans_id);
49831
49832 le_key2cpu_key(&cpu_key, key);
49833@@ -1525,6 +1529,8 @@ int reiserfs_cut_from_item(struct reiser
49834 int quota_cut_bytes;
49835 loff_t tail_pos = 0;
49836
49837+ pax_track_stack();
49838+
49839 BUG_ON(!th->t_trans_id);
49840
49841 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
49842@@ -1920,6 +1926,8 @@ int reiserfs_paste_into_item(struct reis
49843 int retval;
49844 int fs_gen;
49845
49846+ pax_track_stack();
49847+
49848 BUG_ON(!th->t_trans_id);
49849
49850 fs_gen = get_generation(inode->i_sb);
49851@@ -2007,6 +2015,8 @@ int reiserfs_insert_item(struct reiserfs
49852 int fs_gen = 0;
49853 int quota_bytes = 0;
49854
49855+ pax_track_stack();
49856+
49857 BUG_ON(!th->t_trans_id);
49858
49859 if (inode) { /* Do we count quotas for item? */
49860diff -urNp linux-2.6.32.48/fs/reiserfs/super.c linux-2.6.32.48/fs/reiserfs/super.c
49861--- linux-2.6.32.48/fs/reiserfs/super.c 2011-11-08 19:02:43.000000000 -0500
49862+++ linux-2.6.32.48/fs/reiserfs/super.c 2011-11-15 19:59:43.000000000 -0500
49863@@ -912,6 +912,8 @@ static int reiserfs_parse_options(struct
49864 {.option_name = NULL}
49865 };
49866
49867+ pax_track_stack();
49868+
49869 *blocks = 0;
49870 if (!options || !*options)
49871 /* use default configuration: create tails, journaling on, no
49872diff -urNp linux-2.6.32.48/fs/select.c linux-2.6.32.48/fs/select.c
49873--- linux-2.6.32.48/fs/select.c 2011-11-08 19:02:43.000000000 -0500
49874+++ linux-2.6.32.48/fs/select.c 2011-11-15 19:59:43.000000000 -0500
49875@@ -20,6 +20,7 @@
49876 #include <linux/module.h>
49877 #include <linux/slab.h>
49878 #include <linux/poll.h>
49879+#include <linux/security.h>
49880 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
49881 #include <linux/file.h>
49882 #include <linux/fdtable.h>
49883@@ -401,6 +402,8 @@ int do_select(int n, fd_set_bits *fds, s
49884 int retval, i, timed_out = 0;
49885 unsigned long slack = 0;
49886
49887+ pax_track_stack();
49888+
49889 rcu_read_lock();
49890 retval = max_select_fd(n, fds);
49891 rcu_read_unlock();
49892@@ -529,6 +532,8 @@ int core_sys_select(int n, fd_set __user
49893 /* Allocate small arguments on the stack to save memory and be faster */
49894 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
49895
49896+ pax_track_stack();
49897+
49898 ret = -EINVAL;
49899 if (n < 0)
49900 goto out_nofds;
49901@@ -821,6 +826,9 @@ int do_sys_poll(struct pollfd __user *uf
49902 struct poll_list *walk = head;
49903 unsigned long todo = nfds;
49904
49905+ pax_track_stack();
49906+
49907+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
49908 if (nfds > current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
49909 return -EINVAL;
49910
49911diff -urNp linux-2.6.32.48/fs/seq_file.c linux-2.6.32.48/fs/seq_file.c
49912--- linux-2.6.32.48/fs/seq_file.c 2011-11-08 19:02:43.000000000 -0500
49913+++ linux-2.6.32.48/fs/seq_file.c 2011-11-15 19:59:43.000000000 -0500
49914@@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
49915 return 0;
49916 }
49917 if (!m->buf) {
49918- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
49919+ m->size = PAGE_SIZE;
49920+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
49921 if (!m->buf)
49922 return -ENOMEM;
49923 }
49924@@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
49925 Eoverflow:
49926 m->op->stop(m, p);
49927 kfree(m->buf);
49928- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
49929+ m->size <<= 1;
49930+ m->buf = kmalloc(m->size, GFP_KERNEL);
49931 return !m->buf ? -ENOMEM : -EAGAIN;
49932 }
49933
49934@@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
49935 m->version = file->f_version;
49936 /* grab buffer if we didn't have one */
49937 if (!m->buf) {
49938- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
49939+ m->size = PAGE_SIZE;
49940+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
49941 if (!m->buf)
49942 goto Enomem;
49943 }
49944@@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
49945 goto Fill;
49946 m->op->stop(m, p);
49947 kfree(m->buf);
49948- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
49949+ m->size <<= 1;
49950+ m->buf = kmalloc(m->size, GFP_KERNEL);
49951 if (!m->buf)
49952 goto Enomem;
49953 m->count = 0;
49954@@ -551,7 +555,7 @@ static void single_stop(struct seq_file
49955 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
49956 void *data)
49957 {
49958- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
49959+ seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
49960 int res = -ENOMEM;
49961
49962 if (op) {
49963diff -urNp linux-2.6.32.48/fs/smbfs/proc.c linux-2.6.32.48/fs/smbfs/proc.c
49964--- linux-2.6.32.48/fs/smbfs/proc.c 2011-11-08 19:02:43.000000000 -0500
49965+++ linux-2.6.32.48/fs/smbfs/proc.c 2011-11-15 19:59:43.000000000 -0500
49966@@ -266,9 +266,9 @@ int smb_setcodepage(struct smb_sb_info *
49967
49968 out:
49969 if (server->local_nls != NULL && server->remote_nls != NULL)
49970- server->ops->convert = convert_cp;
49971+ *(void **)&server->ops->convert = convert_cp;
49972 else
49973- server->ops->convert = convert_memcpy;
49974+ *(void **)&server->ops->convert = convert_memcpy;
49975
49976 smb_unlock_server(server);
49977 return n;
49978@@ -933,9 +933,9 @@ smb_newconn(struct smb_sb_info *server,
49979
49980 /* FIXME: the win9x code wants to modify these ... (seek/trunc bug) */
49981 if (server->mnt->flags & SMB_MOUNT_OLDATTR) {
49982- server->ops->getattr = smb_proc_getattr_core;
49983+ *(void **)&server->ops->getattr = smb_proc_getattr_core;
49984 } else if (server->mnt->flags & SMB_MOUNT_DIRATTR) {
49985- server->ops->getattr = smb_proc_getattr_ff;
49986+ *(void **)&server->ops->getattr = smb_proc_getattr_ff;
49987 }
49988
49989 /* Decode server capabilities */
49990@@ -3439,7 +3439,7 @@ out:
49991 static void
49992 install_ops(struct smb_ops *dst, struct smb_ops *src)
49993 {
49994- memcpy(dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
49995+ memcpy((void *)dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
49996 }
49997
49998 /* < LANMAN2 */
49999diff -urNp linux-2.6.32.48/fs/smbfs/symlink.c linux-2.6.32.48/fs/smbfs/symlink.c
50000--- linux-2.6.32.48/fs/smbfs/symlink.c 2011-11-08 19:02:43.000000000 -0500
50001+++ linux-2.6.32.48/fs/smbfs/symlink.c 2011-11-15 19:59:43.000000000 -0500
50002@@ -55,7 +55,7 @@ static void *smb_follow_link(struct dent
50003
50004 static void smb_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
50005 {
50006- char *s = nd_get_link(nd);
50007+ const char *s = nd_get_link(nd);
50008 if (!IS_ERR(s))
50009 __putname(s);
50010 }
50011diff -urNp linux-2.6.32.48/fs/splice.c linux-2.6.32.48/fs/splice.c
50012--- linux-2.6.32.48/fs/splice.c 2011-11-08 19:02:43.000000000 -0500
50013+++ linux-2.6.32.48/fs/splice.c 2011-11-15 19:59:43.000000000 -0500
50014@@ -185,7 +185,7 @@ ssize_t splice_to_pipe(struct pipe_inode
50015 pipe_lock(pipe);
50016
50017 for (;;) {
50018- if (!pipe->readers) {
50019+ if (!atomic_read(&pipe->readers)) {
50020 send_sig(SIGPIPE, current, 0);
50021 if (!ret)
50022 ret = -EPIPE;
50023@@ -239,9 +239,9 @@ ssize_t splice_to_pipe(struct pipe_inode
50024 do_wakeup = 0;
50025 }
50026
50027- pipe->waiting_writers++;
50028+ atomic_inc(&pipe->waiting_writers);
50029 pipe_wait(pipe);
50030- pipe->waiting_writers--;
50031+ atomic_dec(&pipe->waiting_writers);
50032 }
50033
50034 pipe_unlock(pipe);
50035@@ -285,6 +285,8 @@ __generic_file_splice_read(struct file *
50036 .spd_release = spd_release_page,
50037 };
50038
50039+ pax_track_stack();
50040+
50041 index = *ppos >> PAGE_CACHE_SHIFT;
50042 loff = *ppos & ~PAGE_CACHE_MASK;
50043 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
50044@@ -521,7 +523,7 @@ static ssize_t kernel_readv(struct file
50045 old_fs = get_fs();
50046 set_fs(get_ds());
50047 /* The cast to a user pointer is valid due to the set_fs() */
50048- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
50049+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
50050 set_fs(old_fs);
50051
50052 return res;
50053@@ -536,7 +538,7 @@ static ssize_t kernel_write(struct file
50054 old_fs = get_fs();
50055 set_fs(get_ds());
50056 /* The cast to a user pointer is valid due to the set_fs() */
50057- res = vfs_write(file, (const char __user *)buf, count, &pos);
50058+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
50059 set_fs(old_fs);
50060
50061 return res;
50062@@ -565,6 +567,8 @@ ssize_t default_file_splice_read(struct
50063 .spd_release = spd_release_page,
50064 };
50065
50066+ pax_track_stack();
50067+
50068 index = *ppos >> PAGE_CACHE_SHIFT;
50069 offset = *ppos & ~PAGE_CACHE_MASK;
50070 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
50071@@ -578,7 +582,7 @@ ssize_t default_file_splice_read(struct
50072 goto err;
50073
50074 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
50075- vec[i].iov_base = (void __user *) page_address(page);
50076+ vec[i].iov_base = (__force void __user *) page_address(page);
50077 vec[i].iov_len = this_len;
50078 pages[i] = page;
50079 spd.nr_pages++;
50080@@ -800,10 +804,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
50081 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
50082 {
50083 while (!pipe->nrbufs) {
50084- if (!pipe->writers)
50085+ if (!atomic_read(&pipe->writers))
50086 return 0;
50087
50088- if (!pipe->waiting_writers && sd->num_spliced)
50089+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
50090 return 0;
50091
50092 if (sd->flags & SPLICE_F_NONBLOCK)
50093@@ -1140,7 +1144,7 @@ ssize_t splice_direct_to_actor(struct fi
50094 * out of the pipe right after the splice_to_pipe(). So set
50095 * PIPE_READERS appropriately.
50096 */
50097- pipe->readers = 1;
50098+ atomic_set(&pipe->readers, 1);
50099
50100 current->splice_pipe = pipe;
50101 }
50102@@ -1593,6 +1597,8 @@ static long vmsplice_to_pipe(struct file
50103 .spd_release = spd_release_page,
50104 };
50105
50106+ pax_track_stack();
50107+
50108 pipe = pipe_info(file->f_path.dentry->d_inode);
50109 if (!pipe)
50110 return -EBADF;
50111@@ -1701,9 +1707,9 @@ static int ipipe_prep(struct pipe_inode_
50112 ret = -ERESTARTSYS;
50113 break;
50114 }
50115- if (!pipe->writers)
50116+ if (!atomic_read(&pipe->writers))
50117 break;
50118- if (!pipe->waiting_writers) {
50119+ if (!atomic_read(&pipe->waiting_writers)) {
50120 if (flags & SPLICE_F_NONBLOCK) {
50121 ret = -EAGAIN;
50122 break;
50123@@ -1735,7 +1741,7 @@ static int opipe_prep(struct pipe_inode_
50124 pipe_lock(pipe);
50125
50126 while (pipe->nrbufs >= PIPE_BUFFERS) {
50127- if (!pipe->readers) {
50128+ if (!atomic_read(&pipe->readers)) {
50129 send_sig(SIGPIPE, current, 0);
50130 ret = -EPIPE;
50131 break;
50132@@ -1748,9 +1754,9 @@ static int opipe_prep(struct pipe_inode_
50133 ret = -ERESTARTSYS;
50134 break;
50135 }
50136- pipe->waiting_writers++;
50137+ atomic_inc(&pipe->waiting_writers);
50138 pipe_wait(pipe);
50139- pipe->waiting_writers--;
50140+ atomic_dec(&pipe->waiting_writers);
50141 }
50142
50143 pipe_unlock(pipe);
50144@@ -1786,14 +1792,14 @@ retry:
50145 pipe_double_lock(ipipe, opipe);
50146
50147 do {
50148- if (!opipe->readers) {
50149+ if (!atomic_read(&opipe->readers)) {
50150 send_sig(SIGPIPE, current, 0);
50151 if (!ret)
50152 ret = -EPIPE;
50153 break;
50154 }
50155
50156- if (!ipipe->nrbufs && !ipipe->writers)
50157+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
50158 break;
50159
50160 /*
50161@@ -1893,7 +1899,7 @@ static int link_pipe(struct pipe_inode_i
50162 pipe_double_lock(ipipe, opipe);
50163
50164 do {
50165- if (!opipe->readers) {
50166+ if (!atomic_read(&opipe->readers)) {
50167 send_sig(SIGPIPE, current, 0);
50168 if (!ret)
50169 ret = -EPIPE;
50170@@ -1938,7 +1944,7 @@ static int link_pipe(struct pipe_inode_i
50171 * return EAGAIN if we have the potential of some data in the
50172 * future, otherwise just return 0
50173 */
50174- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
50175+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
50176 ret = -EAGAIN;
50177
50178 pipe_unlock(ipipe);
50179diff -urNp linux-2.6.32.48/fs/sysfs/file.c linux-2.6.32.48/fs/sysfs/file.c
50180--- linux-2.6.32.48/fs/sysfs/file.c 2011-11-08 19:02:43.000000000 -0500
50181+++ linux-2.6.32.48/fs/sysfs/file.c 2011-11-15 19:59:43.000000000 -0500
50182@@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
50183
50184 struct sysfs_open_dirent {
50185 atomic_t refcnt;
50186- atomic_t event;
50187+ atomic_unchecked_t event;
50188 wait_queue_head_t poll;
50189 struct list_head buffers; /* goes through sysfs_buffer.list */
50190 };
50191@@ -53,7 +53,7 @@ struct sysfs_buffer {
50192 size_t count;
50193 loff_t pos;
50194 char * page;
50195- struct sysfs_ops * ops;
50196+ const struct sysfs_ops * ops;
50197 struct mutex mutex;
50198 int needs_read_fill;
50199 int event;
50200@@ -75,7 +75,7 @@ static int fill_read_buffer(struct dentr
50201 {
50202 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
50203 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
50204- struct sysfs_ops * ops = buffer->ops;
50205+ const struct sysfs_ops * ops = buffer->ops;
50206 int ret = 0;
50207 ssize_t count;
50208
50209@@ -88,7 +88,7 @@ static int fill_read_buffer(struct dentr
50210 if (!sysfs_get_active_two(attr_sd))
50211 return -ENODEV;
50212
50213- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
50214+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
50215 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
50216
50217 sysfs_put_active_two(attr_sd);
50218@@ -199,7 +199,7 @@ flush_write_buffer(struct dentry * dentr
50219 {
50220 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
50221 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
50222- struct sysfs_ops * ops = buffer->ops;
50223+ const struct sysfs_ops * ops = buffer->ops;
50224 int rc;
50225
50226 /* need attr_sd for attr and ops, its parent for kobj */
50227@@ -294,7 +294,7 @@ static int sysfs_get_open_dirent(struct
50228 return -ENOMEM;
50229
50230 atomic_set(&new_od->refcnt, 0);
50231- atomic_set(&new_od->event, 1);
50232+ atomic_set_unchecked(&new_od->event, 1);
50233 init_waitqueue_head(&new_od->poll);
50234 INIT_LIST_HEAD(&new_od->buffers);
50235 goto retry;
50236@@ -335,7 +335,7 @@ static int sysfs_open_file(struct inode
50237 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
50238 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
50239 struct sysfs_buffer *buffer;
50240- struct sysfs_ops *ops;
50241+ const struct sysfs_ops *ops;
50242 int error = -EACCES;
50243 char *p;
50244
50245@@ -444,7 +444,7 @@ static unsigned int sysfs_poll(struct fi
50246
50247 sysfs_put_active_two(attr_sd);
50248
50249- if (buffer->event != atomic_read(&od->event))
50250+ if (buffer->event != atomic_read_unchecked(&od->event))
50251 goto trigger;
50252
50253 return DEFAULT_POLLMASK;
50254@@ -463,7 +463,7 @@ void sysfs_notify_dirent(struct sysfs_di
50255
50256 od = sd->s_attr.open;
50257 if (od) {
50258- atomic_inc(&od->event);
50259+ atomic_inc_unchecked(&od->event);
50260 wake_up_interruptible(&od->poll);
50261 }
50262
50263diff -urNp linux-2.6.32.48/fs/sysfs/mount.c linux-2.6.32.48/fs/sysfs/mount.c
50264--- linux-2.6.32.48/fs/sysfs/mount.c 2011-11-08 19:02:43.000000000 -0500
50265+++ linux-2.6.32.48/fs/sysfs/mount.c 2011-11-15 19:59:43.000000000 -0500
50266@@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
50267 .s_name = "",
50268 .s_count = ATOMIC_INIT(1),
50269 .s_flags = SYSFS_DIR,
50270+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
50271+ .s_mode = S_IFDIR | S_IRWXU,
50272+#else
50273 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
50274+#endif
50275 .s_ino = 1,
50276 };
50277
50278diff -urNp linux-2.6.32.48/fs/sysfs/symlink.c linux-2.6.32.48/fs/sysfs/symlink.c
50279--- linux-2.6.32.48/fs/sysfs/symlink.c 2011-11-08 19:02:43.000000000 -0500
50280+++ linux-2.6.32.48/fs/sysfs/symlink.c 2011-11-15 19:59:43.000000000 -0500
50281@@ -204,7 +204,7 @@ static void *sysfs_follow_link(struct de
50282
50283 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
50284 {
50285- char *page = nd_get_link(nd);
50286+ const char *page = nd_get_link(nd);
50287 if (!IS_ERR(page))
50288 free_page((unsigned long)page);
50289 }
50290diff -urNp linux-2.6.32.48/fs/udf/balloc.c linux-2.6.32.48/fs/udf/balloc.c
50291--- linux-2.6.32.48/fs/udf/balloc.c 2011-11-08 19:02:43.000000000 -0500
50292+++ linux-2.6.32.48/fs/udf/balloc.c 2011-11-15 19:59:43.000000000 -0500
50293@@ -172,9 +172,7 @@ static void udf_bitmap_free_blocks(struc
50294
50295 mutex_lock(&sbi->s_alloc_mutex);
50296 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
50297- if (bloc->logicalBlockNum < 0 ||
50298- (bloc->logicalBlockNum + count) >
50299- partmap->s_partition_len) {
50300+ if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
50301 udf_debug("%d < %d || %d + %d > %d\n",
50302 bloc->logicalBlockNum, 0, bloc->logicalBlockNum,
50303 count, partmap->s_partition_len);
50304@@ -436,9 +434,7 @@ static void udf_table_free_blocks(struct
50305
50306 mutex_lock(&sbi->s_alloc_mutex);
50307 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
50308- if (bloc->logicalBlockNum < 0 ||
50309- (bloc->logicalBlockNum + count) >
50310- partmap->s_partition_len) {
50311+ if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
50312 udf_debug("%d < %d || %d + %d > %d\n",
50313 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
50314 partmap->s_partition_len);
50315diff -urNp linux-2.6.32.48/fs/udf/inode.c linux-2.6.32.48/fs/udf/inode.c
50316--- linux-2.6.32.48/fs/udf/inode.c 2011-11-08 19:02:43.000000000 -0500
50317+++ linux-2.6.32.48/fs/udf/inode.c 2011-11-15 19:59:43.000000000 -0500
50318@@ -484,6 +484,8 @@ static struct buffer_head *inode_getblk(
50319 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
50320 int lastblock = 0;
50321
50322+ pax_track_stack();
50323+
50324 prev_epos.offset = udf_file_entry_alloc_offset(inode);
50325 prev_epos.block = iinfo->i_location;
50326 prev_epos.bh = NULL;
50327diff -urNp linux-2.6.32.48/fs/udf/misc.c linux-2.6.32.48/fs/udf/misc.c
50328--- linux-2.6.32.48/fs/udf/misc.c 2011-11-08 19:02:43.000000000 -0500
50329+++ linux-2.6.32.48/fs/udf/misc.c 2011-11-15 19:59:43.000000000 -0500
50330@@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
50331
50332 u8 udf_tag_checksum(const struct tag *t)
50333 {
50334- u8 *data = (u8 *)t;
50335+ const u8 *data = (const u8 *)t;
50336 u8 checksum = 0;
50337 int i;
50338 for (i = 0; i < sizeof(struct tag); ++i)
50339diff -urNp linux-2.6.32.48/fs/utimes.c linux-2.6.32.48/fs/utimes.c
50340--- linux-2.6.32.48/fs/utimes.c 2011-11-08 19:02:43.000000000 -0500
50341+++ linux-2.6.32.48/fs/utimes.c 2011-11-15 19:59:43.000000000 -0500
50342@@ -1,6 +1,7 @@
50343 #include <linux/compiler.h>
50344 #include <linux/file.h>
50345 #include <linux/fs.h>
50346+#include <linux/security.h>
50347 #include <linux/linkage.h>
50348 #include <linux/mount.h>
50349 #include <linux/namei.h>
50350@@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
50351 goto mnt_drop_write_and_out;
50352 }
50353 }
50354+
50355+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
50356+ error = -EACCES;
50357+ goto mnt_drop_write_and_out;
50358+ }
50359+
50360 mutex_lock(&inode->i_mutex);
50361 error = notify_change(path->dentry, &newattrs);
50362 mutex_unlock(&inode->i_mutex);
50363diff -urNp linux-2.6.32.48/fs/xattr_acl.c linux-2.6.32.48/fs/xattr_acl.c
50364--- linux-2.6.32.48/fs/xattr_acl.c 2011-11-08 19:02:43.000000000 -0500
50365+++ linux-2.6.32.48/fs/xattr_acl.c 2011-11-15 19:59:43.000000000 -0500
50366@@ -17,8 +17,8 @@
50367 struct posix_acl *
50368 posix_acl_from_xattr(const void *value, size_t size)
50369 {
50370- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
50371- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
50372+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
50373+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
50374 int count;
50375 struct posix_acl *acl;
50376 struct posix_acl_entry *acl_e;
50377diff -urNp linux-2.6.32.48/fs/xattr.c linux-2.6.32.48/fs/xattr.c
50378--- linux-2.6.32.48/fs/xattr.c 2011-11-08 19:02:43.000000000 -0500
50379+++ linux-2.6.32.48/fs/xattr.c 2011-11-15 19:59:43.000000000 -0500
50380@@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
50381 * Extended attribute SET operations
50382 */
50383 static long
50384-setxattr(struct dentry *d, const char __user *name, const void __user *value,
50385+setxattr(struct path *path, const char __user *name, const void __user *value,
50386 size_t size, int flags)
50387 {
50388 int error;
50389@@ -271,7 +271,13 @@ setxattr(struct dentry *d, const char __
50390 return PTR_ERR(kvalue);
50391 }
50392
50393- error = vfs_setxattr(d, kname, kvalue, size, flags);
50394+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
50395+ error = -EACCES;
50396+ goto out;
50397+ }
50398+
50399+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
50400+out:
50401 kfree(kvalue);
50402 return error;
50403 }
50404@@ -288,7 +294,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
50405 return error;
50406 error = mnt_want_write(path.mnt);
50407 if (!error) {
50408- error = setxattr(path.dentry, name, value, size, flags);
50409+ error = setxattr(&path, name, value, size, flags);
50410 mnt_drop_write(path.mnt);
50411 }
50412 path_put(&path);
50413@@ -307,7 +313,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
50414 return error;
50415 error = mnt_want_write(path.mnt);
50416 if (!error) {
50417- error = setxattr(path.dentry, name, value, size, flags);
50418+ error = setxattr(&path, name, value, size, flags);
50419 mnt_drop_write(path.mnt);
50420 }
50421 path_put(&path);
50422@@ -318,17 +324,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
50423 const void __user *,value, size_t, size, int, flags)
50424 {
50425 struct file *f;
50426- struct dentry *dentry;
50427 int error = -EBADF;
50428
50429 f = fget(fd);
50430 if (!f)
50431 return error;
50432- dentry = f->f_path.dentry;
50433- audit_inode(NULL, dentry);
50434+ audit_inode(NULL, f->f_path.dentry);
50435 error = mnt_want_write_file(f);
50436 if (!error) {
50437- error = setxattr(dentry, name, value, size, flags);
50438+ error = setxattr(&f->f_path, name, value, size, flags);
50439 mnt_drop_write(f->f_path.mnt);
50440 }
50441 fput(f);
50442diff -urNp linux-2.6.32.48/fs/xfs/linux-2.6/xfs_ioctl32.c linux-2.6.32.48/fs/xfs/linux-2.6/xfs_ioctl32.c
50443--- linux-2.6.32.48/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-11-08 19:02:43.000000000 -0500
50444+++ linux-2.6.32.48/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-11-15 19:59:43.000000000 -0500
50445@@ -75,6 +75,7 @@ xfs_compat_ioc_fsgeometry_v1(
50446 xfs_fsop_geom_t fsgeo;
50447 int error;
50448
50449+ memset(&fsgeo, 0, sizeof(fsgeo));
50450 error = xfs_fs_geometry(mp, &fsgeo, 3);
50451 if (error)
50452 return -error;
50453diff -urNp linux-2.6.32.48/fs/xfs/linux-2.6/xfs_ioctl.c linux-2.6.32.48/fs/xfs/linux-2.6/xfs_ioctl.c
50454--- linux-2.6.32.48/fs/xfs/linux-2.6/xfs_ioctl.c 2011-11-08 19:02:43.000000000 -0500
50455+++ linux-2.6.32.48/fs/xfs/linux-2.6/xfs_ioctl.c 2011-11-15 19:59:43.000000000 -0500
50456@@ -134,7 +134,7 @@ xfs_find_handle(
50457 }
50458
50459 error = -EFAULT;
50460- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
50461+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
50462 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
50463 goto out_put;
50464
50465@@ -423,7 +423,7 @@ xfs_attrlist_by_handle(
50466 if (IS_ERR(dentry))
50467 return PTR_ERR(dentry);
50468
50469- kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
50470+ kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL);
50471 if (!kbuf)
50472 goto out_dput;
50473
50474@@ -697,7 +697,7 @@ xfs_ioc_fsgeometry_v1(
50475 xfs_mount_t *mp,
50476 void __user *arg)
50477 {
50478- xfs_fsop_geom_t fsgeo;
50479+ xfs_fsop_geom_t fsgeo;
50480 int error;
50481
50482 error = xfs_fs_geometry(mp, &fsgeo, 3);
50483diff -urNp linux-2.6.32.48/fs/xfs/linux-2.6/xfs_iops.c linux-2.6.32.48/fs/xfs/linux-2.6/xfs_iops.c
50484--- linux-2.6.32.48/fs/xfs/linux-2.6/xfs_iops.c 2011-11-08 19:02:43.000000000 -0500
50485+++ linux-2.6.32.48/fs/xfs/linux-2.6/xfs_iops.c 2011-11-15 19:59:43.000000000 -0500
50486@@ -468,7 +468,7 @@ xfs_vn_put_link(
50487 struct nameidata *nd,
50488 void *p)
50489 {
50490- char *s = nd_get_link(nd);
50491+ const char *s = nd_get_link(nd);
50492
50493 if (!IS_ERR(s))
50494 kfree(s);
50495diff -urNp linux-2.6.32.48/fs/xfs/xfs_bmap.c linux-2.6.32.48/fs/xfs/xfs_bmap.c
50496--- linux-2.6.32.48/fs/xfs/xfs_bmap.c 2011-11-08 19:02:43.000000000 -0500
50497+++ linux-2.6.32.48/fs/xfs/xfs_bmap.c 2011-11-15 19:59:43.000000000 -0500
50498@@ -360,7 +360,7 @@ xfs_bmap_validate_ret(
50499 int nmap,
50500 int ret_nmap);
50501 #else
50502-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
50503+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
50504 #endif /* DEBUG */
50505
50506 #if defined(XFS_RW_TRACE)
50507diff -urNp linux-2.6.32.48/fs/xfs/xfs_dir2_sf.c linux-2.6.32.48/fs/xfs/xfs_dir2_sf.c
50508--- linux-2.6.32.48/fs/xfs/xfs_dir2_sf.c 2011-11-08 19:02:43.000000000 -0500
50509+++ linux-2.6.32.48/fs/xfs/xfs_dir2_sf.c 2011-11-15 19:59:43.000000000 -0500
50510@@ -779,7 +779,15 @@ xfs_dir2_sf_getdents(
50511 }
50512
50513 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
50514- if (filldir(dirent, sfep->name, sfep->namelen,
50515+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
50516+ char name[sfep->namelen];
50517+ memcpy(name, sfep->name, sfep->namelen);
50518+ if (filldir(dirent, name, sfep->namelen,
50519+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
50520+ *offset = off & 0x7fffffff;
50521+ return 0;
50522+ }
50523+ } else if (filldir(dirent, sfep->name, sfep->namelen,
50524 off & 0x7fffffff, ino, DT_UNKNOWN)) {
50525 *offset = off & 0x7fffffff;
50526 return 0;
50527diff -urNp linux-2.6.32.48/grsecurity/gracl_alloc.c linux-2.6.32.48/grsecurity/gracl_alloc.c
50528--- linux-2.6.32.48/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
50529+++ linux-2.6.32.48/grsecurity/gracl_alloc.c 2011-11-15 19:59:43.000000000 -0500
50530@@ -0,0 +1,105 @@
50531+#include <linux/kernel.h>
50532+#include <linux/mm.h>
50533+#include <linux/slab.h>
50534+#include <linux/vmalloc.h>
50535+#include <linux/gracl.h>
50536+#include <linux/grsecurity.h>
50537+
50538+static unsigned long alloc_stack_next = 1;
50539+static unsigned long alloc_stack_size = 1;
50540+static void **alloc_stack;
50541+
50542+static __inline__ int
50543+alloc_pop(void)
50544+{
50545+ if (alloc_stack_next == 1)
50546+ return 0;
50547+
50548+ kfree(alloc_stack[alloc_stack_next - 2]);
50549+
50550+ alloc_stack_next--;
50551+
50552+ return 1;
50553+}
50554+
50555+static __inline__ int
50556+alloc_push(void *buf)
50557+{
50558+ if (alloc_stack_next >= alloc_stack_size)
50559+ return 1;
50560+
50561+ alloc_stack[alloc_stack_next - 1] = buf;
50562+
50563+ alloc_stack_next++;
50564+
50565+ return 0;
50566+}
50567+
50568+void *
50569+acl_alloc(unsigned long len)
50570+{
50571+ void *ret = NULL;
50572+
50573+ if (!len || len > PAGE_SIZE)
50574+ goto out;
50575+
50576+ ret = kmalloc(len, GFP_KERNEL);
50577+
50578+ if (ret) {
50579+ if (alloc_push(ret)) {
50580+ kfree(ret);
50581+ ret = NULL;
50582+ }
50583+ }
50584+
50585+out:
50586+ return ret;
50587+}
50588+
50589+void *
50590+acl_alloc_num(unsigned long num, unsigned long len)
50591+{
50592+ if (!len || (num > (PAGE_SIZE / len)))
50593+ return NULL;
50594+
50595+ return acl_alloc(num * len);
50596+}
50597+
50598+void
50599+acl_free_all(void)
50600+{
50601+ if (gr_acl_is_enabled() || !alloc_stack)
50602+ return;
50603+
50604+ while (alloc_pop()) ;
50605+
50606+ if (alloc_stack) {
50607+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
50608+ kfree(alloc_stack);
50609+ else
50610+ vfree(alloc_stack);
50611+ }
50612+
50613+ alloc_stack = NULL;
50614+ alloc_stack_size = 1;
50615+ alloc_stack_next = 1;
50616+
50617+ return;
50618+}
50619+
50620+int
50621+acl_alloc_stack_init(unsigned long size)
50622+{
50623+ if ((size * sizeof (void *)) <= PAGE_SIZE)
50624+ alloc_stack =
50625+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
50626+ else
50627+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
50628+
50629+ alloc_stack_size = size;
50630+
50631+ if (!alloc_stack)
50632+ return 0;
50633+ else
50634+ return 1;
50635+}
50636diff -urNp linux-2.6.32.48/grsecurity/gracl.c linux-2.6.32.48/grsecurity/gracl.c
50637--- linux-2.6.32.48/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
50638+++ linux-2.6.32.48/grsecurity/gracl.c 2011-11-16 17:55:28.000000000 -0500
50639@@ -0,0 +1,4141 @@
50640+#include <linux/kernel.h>
50641+#include <linux/module.h>
50642+#include <linux/sched.h>
50643+#include <linux/mm.h>
50644+#include <linux/file.h>
50645+#include <linux/fs.h>
50646+#include <linux/namei.h>
50647+#include <linux/mount.h>
50648+#include <linux/tty.h>
50649+#include <linux/proc_fs.h>
50650+#include <linux/smp_lock.h>
50651+#include <linux/slab.h>
50652+#include <linux/vmalloc.h>
50653+#include <linux/types.h>
50654+#include <linux/sysctl.h>
50655+#include <linux/netdevice.h>
50656+#include <linux/ptrace.h>
50657+#include <linux/gracl.h>
50658+#include <linux/gralloc.h>
50659+#include <linux/grsecurity.h>
50660+#include <linux/grinternal.h>
50661+#include <linux/pid_namespace.h>
50662+#include <linux/fdtable.h>
50663+#include <linux/percpu.h>
50664+
50665+#include <asm/uaccess.h>
50666+#include <asm/errno.h>
50667+#include <asm/mman.h>
50668+
50669+static struct acl_role_db acl_role_set;
50670+static struct name_db name_set;
50671+static struct inodev_db inodev_set;
50672+
50673+/* for keeping track of userspace pointers used for subjects, so we
50674+ can share references in the kernel as well
50675+*/
50676+
50677+static struct dentry *real_root;
50678+static struct vfsmount *real_root_mnt;
50679+
50680+static struct acl_subj_map_db subj_map_set;
50681+
50682+static struct acl_role_label *default_role;
50683+
50684+static struct acl_role_label *role_list;
50685+
50686+static u16 acl_sp_role_value;
50687+
50688+extern char *gr_shared_page[4];
50689+static DEFINE_MUTEX(gr_dev_mutex);
50690+DEFINE_RWLOCK(gr_inode_lock);
50691+
50692+struct gr_arg *gr_usermode;
50693+
50694+static unsigned int gr_status __read_only = GR_STATUS_INIT;
50695+
50696+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
50697+extern void gr_clear_learn_entries(void);
50698+
50699+#ifdef CONFIG_GRKERNSEC_RESLOG
50700+extern void gr_log_resource(const struct task_struct *task,
50701+ const int res, const unsigned long wanted, const int gt);
50702+#endif
50703+
50704+unsigned char *gr_system_salt;
50705+unsigned char *gr_system_sum;
50706+
50707+static struct sprole_pw **acl_special_roles = NULL;
50708+static __u16 num_sprole_pws = 0;
50709+
50710+static struct acl_role_label *kernel_role = NULL;
50711+
50712+static unsigned int gr_auth_attempts = 0;
50713+static unsigned long gr_auth_expires = 0UL;
50714+
50715+#ifdef CONFIG_NET
50716+extern struct vfsmount *sock_mnt;
50717+#endif
50718+extern struct vfsmount *pipe_mnt;
50719+extern struct vfsmount *shm_mnt;
50720+#ifdef CONFIG_HUGETLBFS
50721+extern struct vfsmount *hugetlbfs_vfsmount;
50722+#endif
50723+
50724+static struct acl_object_label *fakefs_obj_rw;
50725+static struct acl_object_label *fakefs_obj_rwx;
50726+
50727+extern int gr_init_uidset(void);
50728+extern void gr_free_uidset(void);
50729+extern void gr_remove_uid(uid_t uid);
50730+extern int gr_find_uid(uid_t uid);
50731+
50732+__inline__ int
50733+gr_acl_is_enabled(void)
50734+{
50735+ return (gr_status & GR_READY);
50736+}
50737+
50738+#ifdef CONFIG_BTRFS_FS
50739+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
50740+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
50741+#endif
50742+
50743+static inline dev_t __get_dev(const struct dentry *dentry)
50744+{
50745+#ifdef CONFIG_BTRFS_FS
50746+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
50747+ return get_btrfs_dev_from_inode(dentry->d_inode);
50748+ else
50749+#endif
50750+ return dentry->d_inode->i_sb->s_dev;
50751+}
50752+
50753+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
50754+{
50755+ return __get_dev(dentry);
50756+}
50757+
50758+static char gr_task_roletype_to_char(struct task_struct *task)
50759+{
50760+ switch (task->role->roletype &
50761+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
50762+ GR_ROLE_SPECIAL)) {
50763+ case GR_ROLE_DEFAULT:
50764+ return 'D';
50765+ case GR_ROLE_USER:
50766+ return 'U';
50767+ case GR_ROLE_GROUP:
50768+ return 'G';
50769+ case GR_ROLE_SPECIAL:
50770+ return 'S';
50771+ }
50772+
50773+ return 'X';
50774+}
50775+
50776+char gr_roletype_to_char(void)
50777+{
50778+ return gr_task_roletype_to_char(current);
50779+}
50780+
50781+__inline__ int
50782+gr_acl_tpe_check(void)
50783+{
50784+ if (unlikely(!(gr_status & GR_READY)))
50785+ return 0;
50786+ if (current->role->roletype & GR_ROLE_TPE)
50787+ return 1;
50788+ else
50789+ return 0;
50790+}
50791+
50792+int
50793+gr_handle_rawio(const struct inode *inode)
50794+{
50795+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
50796+ if (inode && S_ISBLK(inode->i_mode) &&
50797+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
50798+ !capable(CAP_SYS_RAWIO))
50799+ return 1;
50800+#endif
50801+ return 0;
50802+}
50803+
50804+static int
50805+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
50806+{
50807+ if (likely(lena != lenb))
50808+ return 0;
50809+
50810+ return !memcmp(a, b, lena);
50811+}
50812+
50813+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
50814+{
50815+ *buflen -= namelen;
50816+ if (*buflen < 0)
50817+ return -ENAMETOOLONG;
50818+ *buffer -= namelen;
50819+ memcpy(*buffer, str, namelen);
50820+ return 0;
50821+}
50822+
50823+/* this must be called with vfsmount_lock and dcache_lock held */
50824+
50825+static char * __our_d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
50826+ struct dentry *root, struct vfsmount *rootmnt,
50827+ char *buffer, int buflen)
50828+{
50829+ char * end = buffer+buflen;
50830+ char * retval;
50831+ int namelen;
50832+
50833+ *--end = '\0';
50834+ buflen--;
50835+
50836+ if (buflen < 1)
50837+ goto Elong;
50838+ /* Get '/' right */
50839+ retval = end-1;
50840+ *retval = '/';
50841+
50842+ for (;;) {
50843+ struct dentry * parent;
50844+
50845+ if (dentry == root && vfsmnt == rootmnt)
50846+ break;
50847+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
50848+ /* Global root? */
50849+ if (vfsmnt->mnt_parent == vfsmnt)
50850+ goto global_root;
50851+ dentry = vfsmnt->mnt_mountpoint;
50852+ vfsmnt = vfsmnt->mnt_parent;
50853+ continue;
50854+ }
50855+ parent = dentry->d_parent;
50856+ prefetch(parent);
50857+ namelen = dentry->d_name.len;
50858+ buflen -= namelen + 1;
50859+ if (buflen < 0)
50860+ goto Elong;
50861+ end -= namelen;
50862+ memcpy(end, dentry->d_name.name, namelen);
50863+ *--end = '/';
50864+ retval = end;
50865+ dentry = parent;
50866+ }
50867+
50868+out:
50869+ return retval;
50870+
50871+global_root:
50872+ namelen = dentry->d_name.len;
50873+ buflen -= namelen;
50874+ if (buflen < 0)
50875+ goto Elong;
50876+ retval -= namelen-1; /* hit the slash */
50877+ memcpy(retval, dentry->d_name.name, namelen);
50878+ goto out;
50879+Elong:
50880+ retval = ERR_PTR(-ENAMETOOLONG);
50881+ goto out;
50882+}
50883+
50884+static char *
50885+gen_full_path(struct dentry *dentry, struct vfsmount *vfsmnt,
50886+ struct dentry *root, struct vfsmount *rootmnt, char *buf, int buflen)
50887+{
50888+ char *retval;
50889+
50890+ retval = __our_d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
50891+ if (unlikely(IS_ERR(retval)))
50892+ retval = strcpy(buf, "<path too long>");
50893+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
50894+ retval[1] = '\0';
50895+
50896+ return retval;
50897+}
50898+
50899+static char *
50900+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
50901+ char *buf, int buflen)
50902+{
50903+ char *res;
50904+
50905+ /* we can use real_root, real_root_mnt, because this is only called
50906+ by the RBAC system */
50907+ res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, real_root, real_root_mnt, buf, buflen);
50908+
50909+ return res;
50910+}
50911+
50912+static char *
50913+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
50914+ char *buf, int buflen)
50915+{
50916+ char *res;
50917+ struct dentry *root;
50918+ struct vfsmount *rootmnt;
50919+ struct task_struct *reaper = &init_task;
50920+
50921+ /* we can't use real_root, real_root_mnt, because they belong only to the RBAC system */
50922+ read_lock(&reaper->fs->lock);
50923+ root = dget(reaper->fs->root.dentry);
50924+ rootmnt = mntget(reaper->fs->root.mnt);
50925+ read_unlock(&reaper->fs->lock);
50926+
50927+ spin_lock(&dcache_lock);
50928+ spin_lock(&vfsmount_lock);
50929+ res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, root, rootmnt, buf, buflen);
50930+ spin_unlock(&vfsmount_lock);
50931+ spin_unlock(&dcache_lock);
50932+
50933+ dput(root);
50934+ mntput(rootmnt);
50935+ return res;
50936+}
50937+
50938+static char *
50939+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
50940+{
50941+ char *ret;
50942+ spin_lock(&dcache_lock);
50943+ spin_lock(&vfsmount_lock);
50944+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
50945+ PAGE_SIZE);
50946+ spin_unlock(&vfsmount_lock);
50947+ spin_unlock(&dcache_lock);
50948+ return ret;
50949+}
50950+
50951+static char *
50952+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
50953+{
50954+ char *ret;
50955+ char *buf;
50956+ int buflen;
50957+
50958+ spin_lock(&dcache_lock);
50959+ spin_lock(&vfsmount_lock);
50960+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
50961+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
50962+ buflen = (int)(ret - buf);
50963+ if (buflen >= 5)
50964+ prepend(&ret, &buflen, "/proc", 5);
50965+ else
50966+ ret = strcpy(buf, "<path too long>");
50967+ spin_unlock(&vfsmount_lock);
50968+ spin_unlock(&dcache_lock);
50969+ return ret;
50970+}
50971+
50972+char *
50973+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
50974+{
50975+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
50976+ PAGE_SIZE);
50977+}
50978+
50979+char *
50980+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
50981+{
50982+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
50983+ PAGE_SIZE);
50984+}
50985+
50986+char *
50987+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
50988+{
50989+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
50990+ PAGE_SIZE);
50991+}
50992+
50993+char *
50994+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
50995+{
50996+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
50997+ PAGE_SIZE);
50998+}
50999+
51000+char *
51001+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
51002+{
51003+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
51004+ PAGE_SIZE);
51005+}
51006+
51007+__inline__ __u32
51008+to_gr_audit(const __u32 reqmode)
51009+{
51010+ /* masks off auditable permission flags, then shifts them to create
51011+ auditing flags, and adds the special case of append auditing if
51012+ we're requesting write */
51013+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
51014+}
51015+
51016+struct acl_subject_label *
51017+lookup_subject_map(const struct acl_subject_label *userp)
51018+{
51019+ unsigned int index = shash(userp, subj_map_set.s_size);
51020+ struct subject_map *match;
51021+
51022+ match = subj_map_set.s_hash[index];
51023+
51024+ while (match && match->user != userp)
51025+ match = match->next;
51026+
51027+ if (match != NULL)
51028+ return match->kernel;
51029+ else
51030+ return NULL;
51031+}
51032+
51033+static void
51034+insert_subj_map_entry(struct subject_map *subjmap)
51035+{
51036+ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
51037+ struct subject_map **curr;
51038+
51039+ subjmap->prev = NULL;
51040+
51041+ curr = &subj_map_set.s_hash[index];
51042+ if (*curr != NULL)
51043+ (*curr)->prev = subjmap;
51044+
51045+ subjmap->next = *curr;
51046+ *curr = subjmap;
51047+
51048+ return;
51049+}
51050+
51051+static struct acl_role_label *
51052+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
51053+ const gid_t gid)
51054+{
51055+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
51056+ struct acl_role_label *match;
51057+ struct role_allowed_ip *ipp;
51058+ unsigned int x;
51059+ u32 curr_ip = task->signal->curr_ip;
51060+
51061+ task->signal->saved_ip = curr_ip;
51062+
51063+ match = acl_role_set.r_hash[index];
51064+
51065+ while (match) {
51066+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
51067+ for (x = 0; x < match->domain_child_num; x++) {
51068+ if (match->domain_children[x] == uid)
51069+ goto found;
51070+ }
51071+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
51072+ break;
51073+ match = match->next;
51074+ }
51075+found:
51076+ if (match == NULL) {
51077+ try_group:
51078+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
51079+ match = acl_role_set.r_hash[index];
51080+
51081+ while (match) {
51082+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
51083+ for (x = 0; x < match->domain_child_num; x++) {
51084+ if (match->domain_children[x] == gid)
51085+ goto found2;
51086+ }
51087+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
51088+ break;
51089+ match = match->next;
51090+ }
51091+found2:
51092+ if (match == NULL)
51093+ match = default_role;
51094+ if (match->allowed_ips == NULL)
51095+ return match;
51096+ else {
51097+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
51098+ if (likely
51099+ ((ntohl(curr_ip) & ipp->netmask) ==
51100+ (ntohl(ipp->addr) & ipp->netmask)))
51101+ return match;
51102+ }
51103+ match = default_role;
51104+ }
51105+ } else if (match->allowed_ips == NULL) {
51106+ return match;
51107+ } else {
51108+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
51109+ if (likely
51110+ ((ntohl(curr_ip) & ipp->netmask) ==
51111+ (ntohl(ipp->addr) & ipp->netmask)))
51112+ return match;
51113+ }
51114+ goto try_group;
51115+ }
51116+
51117+ return match;
51118+}
51119+
51120+struct acl_subject_label *
51121+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
51122+ const struct acl_role_label *role)
51123+{
51124+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
51125+ struct acl_subject_label *match;
51126+
51127+ match = role->subj_hash[index];
51128+
51129+ while (match && (match->inode != ino || match->device != dev ||
51130+ (match->mode & GR_DELETED))) {
51131+ match = match->next;
51132+ }
51133+
51134+ if (match && !(match->mode & GR_DELETED))
51135+ return match;
51136+ else
51137+ return NULL;
51138+}
51139+
51140+struct acl_subject_label *
51141+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
51142+ const struct acl_role_label *role)
51143+{
51144+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
51145+ struct acl_subject_label *match;
51146+
51147+ match = role->subj_hash[index];
51148+
51149+ while (match && (match->inode != ino || match->device != dev ||
51150+ !(match->mode & GR_DELETED))) {
51151+ match = match->next;
51152+ }
51153+
51154+ if (match && (match->mode & GR_DELETED))
51155+ return match;
51156+ else
51157+ return NULL;
51158+}
51159+
51160+static struct acl_object_label *
51161+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
51162+ const struct acl_subject_label *subj)
51163+{
51164+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
51165+ struct acl_object_label *match;
51166+
51167+ match = subj->obj_hash[index];
51168+
51169+ while (match && (match->inode != ino || match->device != dev ||
51170+ (match->mode & GR_DELETED))) {
51171+ match = match->next;
51172+ }
51173+
51174+ if (match && !(match->mode & GR_DELETED))
51175+ return match;
51176+ else
51177+ return NULL;
51178+}
51179+
51180+static struct acl_object_label *
51181+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
51182+ const struct acl_subject_label *subj)
51183+{
51184+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
51185+ struct acl_object_label *match;
51186+
51187+ match = subj->obj_hash[index];
51188+
51189+ while (match && (match->inode != ino || match->device != dev ||
51190+ !(match->mode & GR_DELETED))) {
51191+ match = match->next;
51192+ }
51193+
51194+ if (match && (match->mode & GR_DELETED))
51195+ return match;
51196+
51197+ match = subj->obj_hash[index];
51198+
51199+ while (match && (match->inode != ino || match->device != dev ||
51200+ (match->mode & GR_DELETED))) {
51201+ match = match->next;
51202+ }
51203+
51204+ if (match && !(match->mode & GR_DELETED))
51205+ return match;
51206+ else
51207+ return NULL;
51208+}
51209+
51210+static struct name_entry *
51211+lookup_name_entry(const char *name)
51212+{
51213+ unsigned int len = strlen(name);
51214+ unsigned int key = full_name_hash(name, len);
51215+ unsigned int index = key % name_set.n_size;
51216+ struct name_entry *match;
51217+
51218+ match = name_set.n_hash[index];
51219+
51220+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
51221+ match = match->next;
51222+
51223+ return match;
51224+}
51225+
51226+static struct name_entry *
51227+lookup_name_entry_create(const char *name)
51228+{
51229+ unsigned int len = strlen(name);
51230+ unsigned int key = full_name_hash(name, len);
51231+ unsigned int index = key % name_set.n_size;
51232+ struct name_entry *match;
51233+
51234+ match = name_set.n_hash[index];
51235+
51236+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
51237+ !match->deleted))
51238+ match = match->next;
51239+
51240+ if (match && match->deleted)
51241+ return match;
51242+
51243+ match = name_set.n_hash[index];
51244+
51245+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
51246+ match->deleted))
51247+ match = match->next;
51248+
51249+ if (match && !match->deleted)
51250+ return match;
51251+ else
51252+ return NULL;
51253+}
51254+
51255+static struct inodev_entry *
51256+lookup_inodev_entry(const ino_t ino, const dev_t dev)
51257+{
51258+ unsigned int index = fhash(ino, dev, inodev_set.i_size);
51259+ struct inodev_entry *match;
51260+
51261+ match = inodev_set.i_hash[index];
51262+
51263+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
51264+ match = match->next;
51265+
51266+ return match;
51267+}
51268+
51269+static void
51270+insert_inodev_entry(struct inodev_entry *entry)
51271+{
51272+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
51273+ inodev_set.i_size);
51274+ struct inodev_entry **curr;
51275+
51276+ entry->prev = NULL;
51277+
51278+ curr = &inodev_set.i_hash[index];
51279+ if (*curr != NULL)
51280+ (*curr)->prev = entry;
51281+
51282+ entry->next = *curr;
51283+ *curr = entry;
51284+
51285+ return;
51286+}
51287+
51288+static void
51289+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
51290+{
51291+ unsigned int index =
51292+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
51293+ struct acl_role_label **curr;
51294+ struct acl_role_label *tmp;
51295+
51296+ curr = &acl_role_set.r_hash[index];
51297+
51298+ /* if role was already inserted due to domains and already has
51299+ a role in the same bucket as it attached, then we need to
51300+ combine these two buckets
51301+ */
51302+ if (role->next) {
51303+ tmp = role->next;
51304+ while (tmp->next)
51305+ tmp = tmp->next;
51306+ tmp->next = *curr;
51307+ } else
51308+ role->next = *curr;
51309+ *curr = role;
51310+
51311+ return;
51312+}
51313+
51314+static void
51315+insert_acl_role_label(struct acl_role_label *role)
51316+{
51317+ int i;
51318+
51319+ if (role_list == NULL) {
51320+ role_list = role;
51321+ role->prev = NULL;
51322+ } else {
51323+ role->prev = role_list;
51324+ role_list = role;
51325+ }
51326+
51327+ /* used for hash chains */
51328+ role->next = NULL;
51329+
51330+ if (role->roletype & GR_ROLE_DOMAIN) {
51331+ for (i = 0; i < role->domain_child_num; i++)
51332+ __insert_acl_role_label(role, role->domain_children[i]);
51333+ } else
51334+ __insert_acl_role_label(role, role->uidgid);
51335+}
51336+
51337+static int
51338+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
51339+{
51340+ struct name_entry **curr, *nentry;
51341+ struct inodev_entry *ientry;
51342+ unsigned int len = strlen(name);
51343+ unsigned int key = full_name_hash(name, len);
51344+ unsigned int index = key % name_set.n_size;
51345+
51346+ curr = &name_set.n_hash[index];
51347+
51348+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
51349+ curr = &((*curr)->next);
51350+
51351+ if (*curr != NULL)
51352+ return 1;
51353+
51354+ nentry = acl_alloc(sizeof (struct name_entry));
51355+ if (nentry == NULL)
51356+ return 0;
51357+ ientry = acl_alloc(sizeof (struct inodev_entry));
51358+ if (ientry == NULL)
51359+ return 0;
51360+ ientry->nentry = nentry;
51361+
51362+ nentry->key = key;
51363+ nentry->name = name;
51364+ nentry->inode = inode;
51365+ nentry->device = device;
51366+ nentry->len = len;
51367+ nentry->deleted = deleted;
51368+
51369+ nentry->prev = NULL;
51370+ curr = &name_set.n_hash[index];
51371+ if (*curr != NULL)
51372+ (*curr)->prev = nentry;
51373+ nentry->next = *curr;
51374+ *curr = nentry;
51375+
51376+ /* insert us into the table searchable by inode/dev */
51377+ insert_inodev_entry(ientry);
51378+
51379+ return 1;
51380+}
51381+
51382+static void
51383+insert_acl_obj_label(struct acl_object_label *obj,
51384+ struct acl_subject_label *subj)
51385+{
51386+ unsigned int index =
51387+ fhash(obj->inode, obj->device, subj->obj_hash_size);
51388+ struct acl_object_label **curr;
51389+
51390+
51391+ obj->prev = NULL;
51392+
51393+ curr = &subj->obj_hash[index];
51394+ if (*curr != NULL)
51395+ (*curr)->prev = obj;
51396+
51397+ obj->next = *curr;
51398+ *curr = obj;
51399+
51400+ return;
51401+}
51402+
51403+static void
51404+insert_acl_subj_label(struct acl_subject_label *obj,
51405+ struct acl_role_label *role)
51406+{
51407+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
51408+ struct acl_subject_label **curr;
51409+
51410+ obj->prev = NULL;
51411+
51412+ curr = &role->subj_hash[index];
51413+ if (*curr != NULL)
51414+ (*curr)->prev = obj;
51415+
51416+ obj->next = *curr;
51417+ *curr = obj;
51418+
51419+ return;
51420+}
51421+
51422+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
51423+
51424+static void *
51425+create_table(__u32 * len, int elementsize)
51426+{
51427+ unsigned int table_sizes[] = {
51428+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
51429+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
51430+ 4194301, 8388593, 16777213, 33554393, 67108859
51431+ };
51432+ void *newtable = NULL;
51433+ unsigned int pwr = 0;
51434+
51435+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
51436+ table_sizes[pwr] <= *len)
51437+ pwr++;
51438+
51439+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
51440+ return newtable;
51441+
51442+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
51443+ newtable =
51444+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
51445+ else
51446+ newtable = vmalloc(table_sizes[pwr] * elementsize);
51447+
51448+ *len = table_sizes[pwr];
51449+
51450+ return newtable;
51451+}
51452+
51453+static int
51454+init_variables(const struct gr_arg *arg)
51455+{
51456+ struct task_struct *reaper = &init_task;
51457+ unsigned int stacksize;
51458+
51459+ subj_map_set.s_size = arg->role_db.num_subjects;
51460+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
51461+ name_set.n_size = arg->role_db.num_objects;
51462+ inodev_set.i_size = arg->role_db.num_objects;
51463+
51464+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
51465+ !name_set.n_size || !inodev_set.i_size)
51466+ return 1;
51467+
51468+ if (!gr_init_uidset())
51469+ return 1;
51470+
51471+ /* set up the stack that holds allocation info */
51472+
51473+ stacksize = arg->role_db.num_pointers + 5;
51474+
51475+ if (!acl_alloc_stack_init(stacksize))
51476+ return 1;
51477+
51478+ /* grab reference for the real root dentry and vfsmount */
51479+ read_lock(&reaper->fs->lock);
51480+ real_root = dget(reaper->fs->root.dentry);
51481+ real_root_mnt = mntget(reaper->fs->root.mnt);
51482+ read_unlock(&reaper->fs->lock);
51483+
51484+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51485+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root), real_root->d_inode->i_ino);
51486+#endif
51487+
51488+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
51489+ if (fakefs_obj_rw == NULL)
51490+ return 1;
51491+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
51492+
51493+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
51494+ if (fakefs_obj_rwx == NULL)
51495+ return 1;
51496+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
51497+
51498+ subj_map_set.s_hash =
51499+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
51500+ acl_role_set.r_hash =
51501+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
51502+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
51503+ inodev_set.i_hash =
51504+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
51505+
51506+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
51507+ !name_set.n_hash || !inodev_set.i_hash)
51508+ return 1;
51509+
51510+ memset(subj_map_set.s_hash, 0,
51511+ sizeof(struct subject_map *) * subj_map_set.s_size);
51512+ memset(acl_role_set.r_hash, 0,
51513+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
51514+ memset(name_set.n_hash, 0,
51515+ sizeof (struct name_entry *) * name_set.n_size);
51516+ memset(inodev_set.i_hash, 0,
51517+ sizeof (struct inodev_entry *) * inodev_set.i_size);
51518+
51519+ return 0;
51520+}
51521+
51522+/* free information not needed after startup
51523+ currently contains user->kernel pointer mappings for subjects
51524+*/
51525+
51526+static void
51527+free_init_variables(void)
51528+{
51529+ __u32 i;
51530+
51531+ if (subj_map_set.s_hash) {
51532+ for (i = 0; i < subj_map_set.s_size; i++) {
51533+ if (subj_map_set.s_hash[i]) {
51534+ kfree(subj_map_set.s_hash[i]);
51535+ subj_map_set.s_hash[i] = NULL;
51536+ }
51537+ }
51538+
51539+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
51540+ PAGE_SIZE)
51541+ kfree(subj_map_set.s_hash);
51542+ else
51543+ vfree(subj_map_set.s_hash);
51544+ }
51545+
51546+ return;
51547+}
51548+
51549+static void
51550+free_variables(void)
51551+{
51552+ struct acl_subject_label *s;
51553+ struct acl_role_label *r;
51554+ struct task_struct *task, *task2;
51555+ unsigned int x;
51556+
51557+ gr_clear_learn_entries();
51558+
51559+ read_lock(&tasklist_lock);
51560+ do_each_thread(task2, task) {
51561+ task->acl_sp_role = 0;
51562+ task->acl_role_id = 0;
51563+ task->acl = NULL;
51564+ task->role = NULL;
51565+ } while_each_thread(task2, task);
51566+ read_unlock(&tasklist_lock);
51567+
51568+ /* release the reference to the real root dentry and vfsmount */
51569+ if (real_root)
51570+ dput(real_root);
51571+ real_root = NULL;
51572+ if (real_root_mnt)
51573+ mntput(real_root_mnt);
51574+ real_root_mnt = NULL;
51575+
51576+ /* free all object hash tables */
51577+
51578+ FOR_EACH_ROLE_START(r)
51579+ if (r->subj_hash == NULL)
51580+ goto next_role;
51581+ FOR_EACH_SUBJECT_START(r, s, x)
51582+ if (s->obj_hash == NULL)
51583+ break;
51584+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
51585+ kfree(s->obj_hash);
51586+ else
51587+ vfree(s->obj_hash);
51588+ FOR_EACH_SUBJECT_END(s, x)
51589+ FOR_EACH_NESTED_SUBJECT_START(r, s)
51590+ if (s->obj_hash == NULL)
51591+ break;
51592+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
51593+ kfree(s->obj_hash);
51594+ else
51595+ vfree(s->obj_hash);
51596+ FOR_EACH_NESTED_SUBJECT_END(s)
51597+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
51598+ kfree(r->subj_hash);
51599+ else
51600+ vfree(r->subj_hash);
51601+ r->subj_hash = NULL;
51602+next_role:
51603+ FOR_EACH_ROLE_END(r)
51604+
51605+ acl_free_all();
51606+
51607+ if (acl_role_set.r_hash) {
51608+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
51609+ PAGE_SIZE)
51610+ kfree(acl_role_set.r_hash);
51611+ else
51612+ vfree(acl_role_set.r_hash);
51613+ }
51614+ if (name_set.n_hash) {
51615+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
51616+ PAGE_SIZE)
51617+ kfree(name_set.n_hash);
51618+ else
51619+ vfree(name_set.n_hash);
51620+ }
51621+
51622+ if (inodev_set.i_hash) {
51623+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
51624+ PAGE_SIZE)
51625+ kfree(inodev_set.i_hash);
51626+ else
51627+ vfree(inodev_set.i_hash);
51628+ }
51629+
51630+ gr_free_uidset();
51631+
51632+ memset(&name_set, 0, sizeof (struct name_db));
51633+ memset(&inodev_set, 0, sizeof (struct inodev_db));
51634+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
51635+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
51636+
51637+ default_role = NULL;
51638+ role_list = NULL;
51639+
51640+ return;
51641+}
51642+
51643+static __u32
51644+count_user_objs(struct acl_object_label *userp)
51645+{
51646+ struct acl_object_label o_tmp;
51647+ __u32 num = 0;
51648+
51649+ while (userp) {
51650+ if (copy_from_user(&o_tmp, userp,
51651+ sizeof (struct acl_object_label)))
51652+ break;
51653+
51654+ userp = o_tmp.prev;
51655+ num++;
51656+ }
51657+
51658+ return num;
51659+}
51660+
51661+static struct acl_subject_label *
51662+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
51663+
51664+static int
51665+copy_user_glob(struct acl_object_label *obj)
51666+{
51667+ struct acl_object_label *g_tmp, **guser;
51668+ unsigned int len;
51669+ char *tmp;
51670+
51671+ if (obj->globbed == NULL)
51672+ return 0;
51673+
51674+ guser = &obj->globbed;
51675+ while (*guser) {
51676+ g_tmp = (struct acl_object_label *)
51677+ acl_alloc(sizeof (struct acl_object_label));
51678+ if (g_tmp == NULL)
51679+ return -ENOMEM;
51680+
51681+ if (copy_from_user(g_tmp, *guser,
51682+ sizeof (struct acl_object_label)))
51683+ return -EFAULT;
51684+
51685+ len = strnlen_user(g_tmp->filename, PATH_MAX);
51686+
51687+ if (!len || len >= PATH_MAX)
51688+ return -EINVAL;
51689+
51690+ if ((tmp = (char *) acl_alloc(len)) == NULL)
51691+ return -ENOMEM;
51692+
51693+ if (copy_from_user(tmp, g_tmp->filename, len))
51694+ return -EFAULT;
51695+ tmp[len-1] = '\0';
51696+ g_tmp->filename = tmp;
51697+
51698+ *guser = g_tmp;
51699+ guser = &(g_tmp->next);
51700+ }
51701+
51702+ return 0;
51703+}
51704+
51705+static int
51706+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
51707+ struct acl_role_label *role)
51708+{
51709+ struct acl_object_label *o_tmp;
51710+ unsigned int len;
51711+ int ret;
51712+ char *tmp;
51713+
51714+ while (userp) {
51715+ if ((o_tmp = (struct acl_object_label *)
51716+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
51717+ return -ENOMEM;
51718+
51719+ if (copy_from_user(o_tmp, userp,
51720+ sizeof (struct acl_object_label)))
51721+ return -EFAULT;
51722+
51723+ userp = o_tmp->prev;
51724+
51725+ len = strnlen_user(o_tmp->filename, PATH_MAX);
51726+
51727+ if (!len || len >= PATH_MAX)
51728+ return -EINVAL;
51729+
51730+ if ((tmp = (char *) acl_alloc(len)) == NULL)
51731+ return -ENOMEM;
51732+
51733+ if (copy_from_user(tmp, o_tmp->filename, len))
51734+ return -EFAULT;
51735+ tmp[len-1] = '\0';
51736+ o_tmp->filename = tmp;
51737+
51738+ insert_acl_obj_label(o_tmp, subj);
51739+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
51740+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
51741+ return -ENOMEM;
51742+
51743+ ret = copy_user_glob(o_tmp);
51744+ if (ret)
51745+ return ret;
51746+
51747+ if (o_tmp->nested) {
51748+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
51749+ if (IS_ERR(o_tmp->nested))
51750+ return PTR_ERR(o_tmp->nested);
51751+
51752+ /* insert into nested subject list */
51753+ o_tmp->nested->next = role->hash->first;
51754+ role->hash->first = o_tmp->nested;
51755+ }
51756+ }
51757+
51758+ return 0;
51759+}
51760+
51761+static __u32
51762+count_user_subjs(struct acl_subject_label *userp)
51763+{
51764+ struct acl_subject_label s_tmp;
51765+ __u32 num = 0;
51766+
51767+ while (userp) {
51768+ if (copy_from_user(&s_tmp, userp,
51769+ sizeof (struct acl_subject_label)))
51770+ break;
51771+
51772+ userp = s_tmp.prev;
51773+ /* do not count nested subjects against this count, since
51774+ they are not included in the hash table, but are
51775+ attached to objects. We have already counted
51776+ the subjects in userspace for the allocation
51777+ stack
51778+ */
51779+ if (!(s_tmp.mode & GR_NESTED))
51780+ num++;
51781+ }
51782+
51783+ return num;
51784+}
51785+
51786+static int
51787+copy_user_allowedips(struct acl_role_label *rolep)
51788+{
51789+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
51790+
51791+ ruserip = rolep->allowed_ips;
51792+
51793+ while (ruserip) {
51794+ rlast = rtmp;
51795+
51796+ if ((rtmp = (struct role_allowed_ip *)
51797+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
51798+ return -ENOMEM;
51799+
51800+ if (copy_from_user(rtmp, ruserip,
51801+ sizeof (struct role_allowed_ip)))
51802+ return -EFAULT;
51803+
51804+ ruserip = rtmp->prev;
51805+
51806+ if (!rlast) {
51807+ rtmp->prev = NULL;
51808+ rolep->allowed_ips = rtmp;
51809+ } else {
51810+ rlast->next = rtmp;
51811+ rtmp->prev = rlast;
51812+ }
51813+
51814+ if (!ruserip)
51815+ rtmp->next = NULL;
51816+ }
51817+
51818+ return 0;
51819+}
51820+
51821+static int
51822+copy_user_transitions(struct acl_role_label *rolep)
51823+{
51824+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
51825+
51826+ unsigned int len;
51827+ char *tmp;
51828+
51829+ rusertp = rolep->transitions;
51830+
51831+ while (rusertp) {
51832+ rlast = rtmp;
51833+
51834+ if ((rtmp = (struct role_transition *)
51835+ acl_alloc(sizeof (struct role_transition))) == NULL)
51836+ return -ENOMEM;
51837+
51838+ if (copy_from_user(rtmp, rusertp,
51839+ sizeof (struct role_transition)))
51840+ return -EFAULT;
51841+
51842+ rusertp = rtmp->prev;
51843+
51844+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
51845+
51846+ if (!len || len >= GR_SPROLE_LEN)
51847+ return -EINVAL;
51848+
51849+ if ((tmp = (char *) acl_alloc(len)) == NULL)
51850+ return -ENOMEM;
51851+
51852+ if (copy_from_user(tmp, rtmp->rolename, len))
51853+ return -EFAULT;
51854+ tmp[len-1] = '\0';
51855+ rtmp->rolename = tmp;
51856+
51857+ if (!rlast) {
51858+ rtmp->prev = NULL;
51859+ rolep->transitions = rtmp;
51860+ } else {
51861+ rlast->next = rtmp;
51862+ rtmp->prev = rlast;
51863+ }
51864+
51865+ if (!rusertp)
51866+ rtmp->next = NULL;
51867+ }
51868+
51869+ return 0;
51870+}
51871+
51872+static struct acl_subject_label *
51873+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
51874+{
51875+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
51876+ unsigned int len;
51877+ char *tmp;
51878+ __u32 num_objs;
51879+ struct acl_ip_label **i_tmp, *i_utmp2;
51880+ struct gr_hash_struct ghash;
51881+ struct subject_map *subjmap;
51882+ unsigned int i_num;
51883+ int err;
51884+
51885+ s_tmp = lookup_subject_map(userp);
51886+
51887+ /* we've already copied this subject into the kernel, just return
51888+ the reference to it, and don't copy it over again
51889+ */
51890+ if (s_tmp)
51891+ return(s_tmp);
51892+
51893+ if ((s_tmp = (struct acl_subject_label *)
51894+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
51895+ return ERR_PTR(-ENOMEM);
51896+
51897+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
51898+ if (subjmap == NULL)
51899+ return ERR_PTR(-ENOMEM);
51900+
51901+ subjmap->user = userp;
51902+ subjmap->kernel = s_tmp;
51903+ insert_subj_map_entry(subjmap);
51904+
51905+ if (copy_from_user(s_tmp, userp,
51906+ sizeof (struct acl_subject_label)))
51907+ return ERR_PTR(-EFAULT);
51908+
51909+ len = strnlen_user(s_tmp->filename, PATH_MAX);
51910+
51911+ if (!len || len >= PATH_MAX)
51912+ return ERR_PTR(-EINVAL);
51913+
51914+ if ((tmp = (char *) acl_alloc(len)) == NULL)
51915+ return ERR_PTR(-ENOMEM);
51916+
51917+ if (copy_from_user(tmp, s_tmp->filename, len))
51918+ return ERR_PTR(-EFAULT);
51919+ tmp[len-1] = '\0';
51920+ s_tmp->filename = tmp;
51921+
51922+ if (!strcmp(s_tmp->filename, "/"))
51923+ role->root_label = s_tmp;
51924+
51925+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
51926+ return ERR_PTR(-EFAULT);
51927+
51928+ /* copy user and group transition tables */
51929+
51930+ if (s_tmp->user_trans_num) {
51931+ uid_t *uidlist;
51932+
51933+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
51934+ if (uidlist == NULL)
51935+ return ERR_PTR(-ENOMEM);
51936+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
51937+ return ERR_PTR(-EFAULT);
51938+
51939+ s_tmp->user_transitions = uidlist;
51940+ }
51941+
51942+ if (s_tmp->group_trans_num) {
51943+ gid_t *gidlist;
51944+
51945+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
51946+ if (gidlist == NULL)
51947+ return ERR_PTR(-ENOMEM);
51948+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
51949+ return ERR_PTR(-EFAULT);
51950+
51951+ s_tmp->group_transitions = gidlist;
51952+ }
51953+
51954+ /* set up object hash table */
51955+ num_objs = count_user_objs(ghash.first);
51956+
51957+ s_tmp->obj_hash_size = num_objs;
51958+ s_tmp->obj_hash =
51959+ (struct acl_object_label **)
51960+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
51961+
51962+ if (!s_tmp->obj_hash)
51963+ return ERR_PTR(-ENOMEM);
51964+
51965+ memset(s_tmp->obj_hash, 0,
51966+ s_tmp->obj_hash_size *
51967+ sizeof (struct acl_object_label *));
51968+
51969+ /* add in objects */
51970+ err = copy_user_objs(ghash.first, s_tmp, role);
51971+
51972+ if (err)
51973+ return ERR_PTR(err);
51974+
51975+ /* set pointer for parent subject */
51976+ if (s_tmp->parent_subject) {
51977+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
51978+
51979+ if (IS_ERR(s_tmp2))
51980+ return s_tmp2;
51981+
51982+ s_tmp->parent_subject = s_tmp2;
51983+ }
51984+
51985+ /* add in ip acls */
51986+
51987+ if (!s_tmp->ip_num) {
51988+ s_tmp->ips = NULL;
51989+ goto insert;
51990+ }
51991+
51992+ i_tmp =
51993+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
51994+ sizeof (struct acl_ip_label *));
51995+
51996+ if (!i_tmp)
51997+ return ERR_PTR(-ENOMEM);
51998+
51999+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
52000+ *(i_tmp + i_num) =
52001+ (struct acl_ip_label *)
52002+ acl_alloc(sizeof (struct acl_ip_label));
52003+ if (!*(i_tmp + i_num))
52004+ return ERR_PTR(-ENOMEM);
52005+
52006+ if (copy_from_user
52007+ (&i_utmp2, s_tmp->ips + i_num,
52008+ sizeof (struct acl_ip_label *)))
52009+ return ERR_PTR(-EFAULT);
52010+
52011+ if (copy_from_user
52012+ (*(i_tmp + i_num), i_utmp2,
52013+ sizeof (struct acl_ip_label)))
52014+ return ERR_PTR(-EFAULT);
52015+
52016+ if ((*(i_tmp + i_num))->iface == NULL)
52017+ continue;
52018+
52019+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
52020+ if (!len || len >= IFNAMSIZ)
52021+ return ERR_PTR(-EINVAL);
52022+ tmp = acl_alloc(len);
52023+ if (tmp == NULL)
52024+ return ERR_PTR(-ENOMEM);
52025+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
52026+ return ERR_PTR(-EFAULT);
52027+ (*(i_tmp + i_num))->iface = tmp;
52028+ }
52029+
52030+ s_tmp->ips = i_tmp;
52031+
52032+insert:
52033+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
52034+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
52035+ return ERR_PTR(-ENOMEM);
52036+
52037+ return s_tmp;
52038+}
52039+
52040+static int
52041+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
52042+{
52043+ struct acl_subject_label s_pre;
52044+ struct acl_subject_label * ret;
52045+ int err;
52046+
52047+ while (userp) {
52048+ if (copy_from_user(&s_pre, userp,
52049+ sizeof (struct acl_subject_label)))
52050+ return -EFAULT;
52051+
52052+ /* do not add nested subjects here, add
52053+ while parsing objects
52054+ */
52055+
52056+ if (s_pre.mode & GR_NESTED) {
52057+ userp = s_pre.prev;
52058+ continue;
52059+ }
52060+
52061+ ret = do_copy_user_subj(userp, role);
52062+
52063+ err = PTR_ERR(ret);
52064+ if (IS_ERR(ret))
52065+ return err;
52066+
52067+ insert_acl_subj_label(ret, role);
52068+
52069+ userp = s_pre.prev;
52070+ }
52071+
52072+ return 0;
52073+}
52074+
52075+static int
52076+copy_user_acl(struct gr_arg *arg)
52077+{
52078+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
52079+ struct sprole_pw *sptmp;
52080+ struct gr_hash_struct *ghash;
52081+ uid_t *domainlist;
52082+ unsigned int r_num;
52083+ unsigned int len;
52084+ char *tmp;
52085+ int err = 0;
52086+ __u16 i;
52087+ __u32 num_subjs;
52088+
52089+ /* we need a default and kernel role */
52090+ if (arg->role_db.num_roles < 2)
52091+ return -EINVAL;
52092+
52093+ /* copy special role authentication info from userspace */
52094+
52095+ num_sprole_pws = arg->num_sprole_pws;
52096+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
52097+
52098+ if (!acl_special_roles) {
52099+ err = -ENOMEM;
52100+ goto cleanup;
52101+ }
52102+
52103+ for (i = 0; i < num_sprole_pws; i++) {
52104+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
52105+ if (!sptmp) {
52106+ err = -ENOMEM;
52107+ goto cleanup;
52108+ }
52109+ if (copy_from_user(sptmp, arg->sprole_pws + i,
52110+ sizeof (struct sprole_pw))) {
52111+ err = -EFAULT;
52112+ goto cleanup;
52113+ }
52114+
52115+ len =
52116+ strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
52117+
52118+ if (!len || len >= GR_SPROLE_LEN) {
52119+ err = -EINVAL;
52120+ goto cleanup;
52121+ }
52122+
52123+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
52124+ err = -ENOMEM;
52125+ goto cleanup;
52126+ }
52127+
52128+ if (copy_from_user(tmp, sptmp->rolename, len)) {
52129+ err = -EFAULT;
52130+ goto cleanup;
52131+ }
52132+ tmp[len-1] = '\0';
52133+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
52134+ printk(KERN_ALERT "Copying special role %s\n", tmp);
52135+#endif
52136+ sptmp->rolename = tmp;
52137+ acl_special_roles[i] = sptmp;
52138+ }
52139+
52140+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
52141+
52142+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
52143+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
52144+
52145+ if (!r_tmp) {
52146+ err = -ENOMEM;
52147+ goto cleanup;
52148+ }
52149+
52150+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
52151+ sizeof (struct acl_role_label *))) {
52152+ err = -EFAULT;
52153+ goto cleanup;
52154+ }
52155+
52156+ if (copy_from_user(r_tmp, r_utmp2,
52157+ sizeof (struct acl_role_label))) {
52158+ err = -EFAULT;
52159+ goto cleanup;
52160+ }
52161+
52162+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
52163+
52164+ if (!len || len >= PATH_MAX) {
52165+ err = -EINVAL;
52166+ goto cleanup;
52167+ }
52168+
52169+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
52170+ err = -ENOMEM;
52171+ goto cleanup;
52172+ }
52173+ if (copy_from_user(tmp, r_tmp->rolename, len)) {
52174+ err = -EFAULT;
52175+ goto cleanup;
52176+ }
52177+ tmp[len-1] = '\0';
52178+ r_tmp->rolename = tmp;
52179+
52180+ if (!strcmp(r_tmp->rolename, "default")
52181+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
52182+ default_role = r_tmp;
52183+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
52184+ kernel_role = r_tmp;
52185+ }
52186+
52187+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
52188+ err = -ENOMEM;
52189+ goto cleanup;
52190+ }
52191+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
52192+ err = -EFAULT;
52193+ goto cleanup;
52194+ }
52195+
52196+ r_tmp->hash = ghash;
52197+
52198+ num_subjs = count_user_subjs(r_tmp->hash->first);
52199+
52200+ r_tmp->subj_hash_size = num_subjs;
52201+ r_tmp->subj_hash =
52202+ (struct acl_subject_label **)
52203+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
52204+
52205+ if (!r_tmp->subj_hash) {
52206+ err = -ENOMEM;
52207+ goto cleanup;
52208+ }
52209+
52210+ err = copy_user_allowedips(r_tmp);
52211+ if (err)
52212+ goto cleanup;
52213+
52214+ /* copy domain info */
52215+ if (r_tmp->domain_children != NULL) {
52216+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
52217+ if (domainlist == NULL) {
52218+ err = -ENOMEM;
52219+ goto cleanup;
52220+ }
52221+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
52222+ err = -EFAULT;
52223+ goto cleanup;
52224+ }
52225+ r_tmp->domain_children = domainlist;
52226+ }
52227+
52228+ err = copy_user_transitions(r_tmp);
52229+ if (err)
52230+ goto cleanup;
52231+
52232+ memset(r_tmp->subj_hash, 0,
52233+ r_tmp->subj_hash_size *
52234+ sizeof (struct acl_subject_label *));
52235+
52236+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
52237+
52238+ if (err)
52239+ goto cleanup;
52240+
52241+ /* set nested subject list to null */
52242+ r_tmp->hash->first = NULL;
52243+
52244+ insert_acl_role_label(r_tmp);
52245+ }
52246+
52247+ goto return_err;
52248+ cleanup:
52249+ free_variables();
52250+ return_err:
52251+ return err;
52252+
52253+}
52254+
52255+static int
52256+gracl_init(struct gr_arg *args)
52257+{
52258+ int error = 0;
52259+
52260+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
52261+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
52262+
52263+ if (init_variables(args)) {
52264+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
52265+ error = -ENOMEM;
52266+ free_variables();
52267+ goto out;
52268+ }
52269+
52270+ error = copy_user_acl(args);
52271+ free_init_variables();
52272+ if (error) {
52273+ free_variables();
52274+ goto out;
52275+ }
52276+
52277+ if ((error = gr_set_acls(0))) {
52278+ free_variables();
52279+ goto out;
52280+ }
52281+
52282+ pax_open_kernel();
52283+ gr_status |= GR_READY;
52284+ pax_close_kernel();
52285+
52286+ out:
52287+ return error;
52288+}
52289+
52290+/* derived from glibc fnmatch() 0: match, 1: no match*/
52291+
52292+static int
52293+glob_match(const char *p, const char *n)
52294+{
52295+ char c;
52296+
52297+ while ((c = *p++) != '\0') {
52298+ switch (c) {
52299+ case '?':
52300+ if (*n == '\0')
52301+ return 1;
52302+ else if (*n == '/')
52303+ return 1;
52304+ break;
52305+ case '\\':
52306+ if (*n != c)
52307+ return 1;
52308+ break;
52309+ case '*':
52310+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
52311+ if (*n == '/')
52312+ return 1;
52313+ else if (c == '?') {
52314+ if (*n == '\0')
52315+ return 1;
52316+ else
52317+ ++n;
52318+ }
52319+ }
52320+ if (c == '\0') {
52321+ return 0;
52322+ } else {
52323+ const char *endp;
52324+
52325+ if ((endp = strchr(n, '/')) == NULL)
52326+ endp = n + strlen(n);
52327+
52328+ if (c == '[') {
52329+ for (--p; n < endp; ++n)
52330+ if (!glob_match(p, n))
52331+ return 0;
52332+ } else if (c == '/') {
52333+ while (*n != '\0' && *n != '/')
52334+ ++n;
52335+ if (*n == '/' && !glob_match(p, n + 1))
52336+ return 0;
52337+ } else {
52338+ for (--p; n < endp; ++n)
52339+ if (*n == c && !glob_match(p, n))
52340+ return 0;
52341+ }
52342+
52343+ return 1;
52344+ }
52345+ case '[':
52346+ {
52347+ int not;
52348+ char cold;
52349+
52350+ if (*n == '\0' || *n == '/')
52351+ return 1;
52352+
52353+ not = (*p == '!' || *p == '^');
52354+ if (not)
52355+ ++p;
52356+
52357+ c = *p++;
52358+ for (;;) {
52359+ unsigned char fn = (unsigned char)*n;
52360+
52361+ if (c == '\0')
52362+ return 1;
52363+ else {
52364+ if (c == fn)
52365+ goto matched;
52366+ cold = c;
52367+ c = *p++;
52368+
52369+ if (c == '-' && *p != ']') {
52370+ unsigned char cend = *p++;
52371+
52372+ if (cend == '\0')
52373+ return 1;
52374+
52375+ if (cold <= fn && fn <= cend)
52376+ goto matched;
52377+
52378+ c = *p++;
52379+ }
52380+ }
52381+
52382+ if (c == ']')
52383+ break;
52384+ }
52385+ if (!not)
52386+ return 1;
52387+ break;
52388+ matched:
52389+ while (c != ']') {
52390+ if (c == '\0')
52391+ return 1;
52392+
52393+ c = *p++;
52394+ }
52395+ if (not)
52396+ return 1;
52397+ }
52398+ break;
52399+ default:
52400+ if (c != *n)
52401+ return 1;
52402+ }
52403+
52404+ ++n;
52405+ }
52406+
52407+ if (*n == '\0')
52408+ return 0;
52409+
52410+ if (*n == '/')
52411+ return 0;
52412+
52413+ return 1;
52414+}
52415+
52416+static struct acl_object_label *
52417+chk_glob_label(struct acl_object_label *globbed,
52418+ struct dentry *dentry, struct vfsmount *mnt, char **path)
52419+{
52420+ struct acl_object_label *tmp;
52421+
52422+ if (*path == NULL)
52423+ *path = gr_to_filename_nolock(dentry, mnt);
52424+
52425+ tmp = globbed;
52426+
52427+ while (tmp) {
52428+ if (!glob_match(tmp->filename, *path))
52429+ return tmp;
52430+ tmp = tmp->next;
52431+ }
52432+
52433+ return NULL;
52434+}
52435+
52436+static struct acl_object_label *
52437+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
52438+ const ino_t curr_ino, const dev_t curr_dev,
52439+ const struct acl_subject_label *subj, char **path, const int checkglob)
52440+{
52441+ struct acl_subject_label *tmpsubj;
52442+ struct acl_object_label *retval;
52443+ struct acl_object_label *retval2;
52444+
52445+ tmpsubj = (struct acl_subject_label *) subj;
52446+ read_lock(&gr_inode_lock);
52447+ do {
52448+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
52449+ if (retval) {
52450+ if (checkglob && retval->globbed) {
52451+ retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
52452+ (struct vfsmount *)orig_mnt, path);
52453+ if (retval2)
52454+ retval = retval2;
52455+ }
52456+ break;
52457+ }
52458+ } while ((tmpsubj = tmpsubj->parent_subject));
52459+ read_unlock(&gr_inode_lock);
52460+
52461+ return retval;
52462+}
52463+
52464+static __inline__ struct acl_object_label *
52465+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
52466+ const struct dentry *curr_dentry,
52467+ const struct acl_subject_label *subj, char **path, const int checkglob)
52468+{
52469+ int newglob = checkglob;
52470+
52471+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
52472+ as we don't want a / * rule to match instead of the / object
52473+ don't do this for create lookups that call this function though, since they're looking up
52474+ on the parent and thus need globbing checks on all paths
52475+ */
52476+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
52477+ newglob = GR_NO_GLOB;
52478+
52479+ return __full_lookup(orig_dentry, orig_mnt,
52480+ curr_dentry->d_inode->i_ino,
52481+ __get_dev(curr_dentry), subj, path, newglob);
52482+}
52483+
52484+static struct acl_object_label *
52485+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52486+ const struct acl_subject_label *subj, char *path, const int checkglob)
52487+{
52488+ struct dentry *dentry = (struct dentry *) l_dentry;
52489+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
52490+ struct acl_object_label *retval;
52491+
52492+ spin_lock(&dcache_lock);
52493+ spin_lock(&vfsmount_lock);
52494+
52495+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
52496+#ifdef CONFIG_NET
52497+ mnt == sock_mnt ||
52498+#endif
52499+#ifdef CONFIG_HUGETLBFS
52500+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
52501+#endif
52502+ /* ignore Eric Biederman */
52503+ IS_PRIVATE(l_dentry->d_inode))) {
52504+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
52505+ goto out;
52506+ }
52507+
52508+ for (;;) {
52509+ if (dentry == real_root && mnt == real_root_mnt)
52510+ break;
52511+
52512+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
52513+ if (mnt->mnt_parent == mnt)
52514+ break;
52515+
52516+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
52517+ if (retval != NULL)
52518+ goto out;
52519+
52520+ dentry = mnt->mnt_mountpoint;
52521+ mnt = mnt->mnt_parent;
52522+ continue;
52523+ }
52524+
52525+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
52526+ if (retval != NULL)
52527+ goto out;
52528+
52529+ dentry = dentry->d_parent;
52530+ }
52531+
52532+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
52533+
52534+ if (retval == NULL)
52535+ retval = full_lookup(l_dentry, l_mnt, real_root, subj, &path, checkglob);
52536+out:
52537+ spin_unlock(&vfsmount_lock);
52538+ spin_unlock(&dcache_lock);
52539+
52540+ BUG_ON(retval == NULL);
52541+
52542+ return retval;
52543+}
52544+
52545+static __inline__ struct acl_object_label *
52546+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52547+ const struct acl_subject_label *subj)
52548+{
52549+ char *path = NULL;
52550+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
52551+}
52552+
52553+static __inline__ struct acl_object_label *
52554+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52555+ const struct acl_subject_label *subj)
52556+{
52557+ char *path = NULL;
52558+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
52559+}
52560+
52561+static __inline__ struct acl_object_label *
52562+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52563+ const struct acl_subject_label *subj, char *path)
52564+{
52565+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
52566+}
52567+
52568+static struct acl_subject_label *
52569+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52570+ const struct acl_role_label *role)
52571+{
52572+ struct dentry *dentry = (struct dentry *) l_dentry;
52573+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
52574+ struct acl_subject_label *retval;
52575+
52576+ spin_lock(&dcache_lock);
52577+ spin_lock(&vfsmount_lock);
52578+
52579+ for (;;) {
52580+ if (dentry == real_root && mnt == real_root_mnt)
52581+ break;
52582+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
52583+ if (mnt->mnt_parent == mnt)
52584+ break;
52585+
52586+ read_lock(&gr_inode_lock);
52587+ retval =
52588+ lookup_acl_subj_label(dentry->d_inode->i_ino,
52589+ __get_dev(dentry), role);
52590+ read_unlock(&gr_inode_lock);
52591+ if (retval != NULL)
52592+ goto out;
52593+
52594+ dentry = mnt->mnt_mountpoint;
52595+ mnt = mnt->mnt_parent;
52596+ continue;
52597+ }
52598+
52599+ read_lock(&gr_inode_lock);
52600+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
52601+ __get_dev(dentry), role);
52602+ read_unlock(&gr_inode_lock);
52603+ if (retval != NULL)
52604+ goto out;
52605+
52606+ dentry = dentry->d_parent;
52607+ }
52608+
52609+ read_lock(&gr_inode_lock);
52610+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
52611+ __get_dev(dentry), role);
52612+ read_unlock(&gr_inode_lock);
52613+
52614+ if (unlikely(retval == NULL)) {
52615+ read_lock(&gr_inode_lock);
52616+ retval = lookup_acl_subj_label(real_root->d_inode->i_ino,
52617+ __get_dev(real_root), role);
52618+ read_unlock(&gr_inode_lock);
52619+ }
52620+out:
52621+ spin_unlock(&vfsmount_lock);
52622+ spin_unlock(&dcache_lock);
52623+
52624+ BUG_ON(retval == NULL);
52625+
52626+ return retval;
52627+}
52628+
52629+static void
52630+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
52631+{
52632+ struct task_struct *task = current;
52633+ const struct cred *cred = current_cred();
52634+
52635+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
52636+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
52637+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
52638+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
52639+
52640+ return;
52641+}
52642+
52643+static void
52644+gr_log_learn_sysctl(const char *path, const __u32 mode)
52645+{
52646+ struct task_struct *task = current;
52647+ const struct cred *cred = current_cred();
52648+
52649+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
52650+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
52651+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
52652+ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
52653+
52654+ return;
52655+}
52656+
52657+static void
52658+gr_log_learn_id_change(const char type, const unsigned int real,
52659+ const unsigned int effective, const unsigned int fs)
52660+{
52661+ struct task_struct *task = current;
52662+ const struct cred *cred = current_cred();
52663+
52664+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
52665+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
52666+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
52667+ type, real, effective, fs, &task->signal->saved_ip);
52668+
52669+ return;
52670+}
52671+
52672+__u32
52673+gr_search_file(const struct dentry * dentry, const __u32 mode,
52674+ const struct vfsmount * mnt)
52675+{
52676+ __u32 retval = mode;
52677+ struct acl_subject_label *curracl;
52678+ struct acl_object_label *currobj;
52679+
52680+ if (unlikely(!(gr_status & GR_READY)))
52681+ return (mode & ~GR_AUDITS);
52682+
52683+ curracl = current->acl;
52684+
52685+ currobj = chk_obj_label(dentry, mnt, curracl);
52686+ retval = currobj->mode & mode;
52687+
52688+ /* if we're opening a specified transfer file for writing
52689+ (e.g. /dev/initctl), then transfer our role to init
52690+ */
52691+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
52692+ current->role->roletype & GR_ROLE_PERSIST)) {
52693+ struct task_struct *task = init_pid_ns.child_reaper;
52694+
52695+ if (task->role != current->role) {
52696+ task->acl_sp_role = 0;
52697+ task->acl_role_id = current->acl_role_id;
52698+ task->role = current->role;
52699+ rcu_read_lock();
52700+ read_lock(&grsec_exec_file_lock);
52701+ gr_apply_subject_to_task(task);
52702+ read_unlock(&grsec_exec_file_lock);
52703+ rcu_read_unlock();
52704+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
52705+ }
52706+ }
52707+
52708+ if (unlikely
52709+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
52710+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
52711+ __u32 new_mode = mode;
52712+
52713+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
52714+
52715+ retval = new_mode;
52716+
52717+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
52718+ new_mode |= GR_INHERIT;
52719+
52720+ if (!(mode & GR_NOLEARN))
52721+ gr_log_learn(dentry, mnt, new_mode);
52722+ }
52723+
52724+ return retval;
52725+}
52726+
52727+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
52728+ const struct dentry *parent,
52729+ const struct vfsmount *mnt)
52730+{
52731+ struct name_entry *match;
52732+ struct acl_object_label *matchpo;
52733+ struct acl_subject_label *curracl;
52734+ char *path;
52735+
52736+ if (unlikely(!(gr_status & GR_READY)))
52737+ return NULL;
52738+
52739+ preempt_disable();
52740+ path = gr_to_filename_rbac(new_dentry, mnt);
52741+ match = lookup_name_entry_create(path);
52742+
52743+ curracl = current->acl;
52744+
52745+ if (match) {
52746+ read_lock(&gr_inode_lock);
52747+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
52748+ read_unlock(&gr_inode_lock);
52749+
52750+ if (matchpo) {
52751+ preempt_enable();
52752+ return matchpo;
52753+ }
52754+ }
52755+
52756+ // lookup parent
52757+
52758+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
52759+
52760+ preempt_enable();
52761+ return matchpo;
52762+}
52763+
52764+__u32
52765+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
52766+ const struct vfsmount * mnt, const __u32 mode)
52767+{
52768+ struct acl_object_label *matchpo;
52769+ __u32 retval;
52770+
52771+ if (unlikely(!(gr_status & GR_READY)))
52772+ return (mode & ~GR_AUDITS);
52773+
52774+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
52775+
52776+ retval = matchpo->mode & mode;
52777+
52778+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
52779+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
52780+ __u32 new_mode = mode;
52781+
52782+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
52783+
52784+ gr_log_learn(new_dentry, mnt, new_mode);
52785+ return new_mode;
52786+ }
52787+
52788+ return retval;
52789+}
52790+
52791+__u32
52792+gr_check_link(const struct dentry * new_dentry,
52793+ const struct dentry * parent_dentry,
52794+ const struct vfsmount * parent_mnt,
52795+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
52796+{
52797+ struct acl_object_label *obj;
52798+ __u32 oldmode, newmode;
52799+ __u32 needmode;
52800+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
52801+ GR_DELETE | GR_INHERIT;
52802+
52803+ if (unlikely(!(gr_status & GR_READY)))
52804+ return (GR_CREATE | GR_LINK);
52805+
52806+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
52807+ oldmode = obj->mode;
52808+
52809+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
52810+ newmode = obj->mode;
52811+
52812+ needmode = newmode & checkmodes;
52813+
52814+ // old name for hardlink must have at least the permissions of the new name
52815+ if ((oldmode & needmode) != needmode)
52816+ goto bad;
52817+
52818+ // if old name had restrictions/auditing, make sure the new name does as well
52819+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
52820+
52821+ // don't allow hardlinking of suid/sgid files without permission
52822+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
52823+ needmode |= GR_SETID;
52824+
52825+ if ((newmode & needmode) != needmode)
52826+ goto bad;
52827+
52828+ // enforce minimum permissions
52829+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
52830+ return newmode;
52831+bad:
52832+ needmode = oldmode;
52833+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
52834+ needmode |= GR_SETID;
52835+
52836+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
52837+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
52838+ return (GR_CREATE | GR_LINK);
52839+ } else if (newmode & GR_SUPPRESS)
52840+ return GR_SUPPRESS;
52841+ else
52842+ return 0;
52843+}
52844+
52845+int
52846+gr_check_hidden_task(const struct task_struct *task)
52847+{
52848+ if (unlikely(!(gr_status & GR_READY)))
52849+ return 0;
52850+
52851+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
52852+ return 1;
52853+
52854+ return 0;
52855+}
52856+
52857+int
52858+gr_check_protected_task(const struct task_struct *task)
52859+{
52860+ if (unlikely(!(gr_status & GR_READY) || !task))
52861+ return 0;
52862+
52863+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
52864+ task->acl != current->acl)
52865+ return 1;
52866+
52867+ return 0;
52868+}
52869+
52870+int
52871+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
52872+{
52873+ struct task_struct *p;
52874+ int ret = 0;
52875+
52876+ if (unlikely(!(gr_status & GR_READY) || !pid))
52877+ return ret;
52878+
52879+ read_lock(&tasklist_lock);
52880+ do_each_pid_task(pid, type, p) {
52881+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
52882+ p->acl != current->acl) {
52883+ ret = 1;
52884+ goto out;
52885+ }
52886+ } while_each_pid_task(pid, type, p);
52887+out:
52888+ read_unlock(&tasklist_lock);
52889+
52890+ return ret;
52891+}
52892+
52893+void
52894+gr_copy_label(struct task_struct *tsk)
52895+{
52896+ tsk->signal->used_accept = 0;
52897+ tsk->acl_sp_role = 0;
52898+ tsk->acl_role_id = current->acl_role_id;
52899+ tsk->acl = current->acl;
52900+ tsk->role = current->role;
52901+ tsk->signal->curr_ip = current->signal->curr_ip;
52902+ tsk->signal->saved_ip = current->signal->saved_ip;
52903+ if (current->exec_file)
52904+ get_file(current->exec_file);
52905+ tsk->exec_file = current->exec_file;
52906+ tsk->is_writable = current->is_writable;
52907+ if (unlikely(current->signal->used_accept)) {
52908+ current->signal->curr_ip = 0;
52909+ current->signal->saved_ip = 0;
52910+ }
52911+
52912+ return;
52913+}
52914+
52915+static void
52916+gr_set_proc_res(struct task_struct *task)
52917+{
52918+ struct acl_subject_label *proc;
52919+ unsigned short i;
52920+
52921+ proc = task->acl;
52922+
52923+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
52924+ return;
52925+
52926+ for (i = 0; i < RLIM_NLIMITS; i++) {
52927+ if (!(proc->resmask & (1 << i)))
52928+ continue;
52929+
52930+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
52931+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
52932+ }
52933+
52934+ return;
52935+}
52936+
52937+extern int __gr_process_user_ban(struct user_struct *user);
52938+
52939+int
52940+gr_check_user_change(int real, int effective, int fs)
52941+{
52942+ unsigned int i;
52943+ __u16 num;
52944+ uid_t *uidlist;
52945+ int curuid;
52946+ int realok = 0;
52947+ int effectiveok = 0;
52948+ int fsok = 0;
52949+
52950+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
52951+ struct user_struct *user;
52952+
52953+ if (real == -1)
52954+ goto skipit;
52955+
52956+ user = find_user(real);
52957+ if (user == NULL)
52958+ goto skipit;
52959+
52960+ if (__gr_process_user_ban(user)) {
52961+ /* for find_user */
52962+ free_uid(user);
52963+ return 1;
52964+ }
52965+
52966+ /* for find_user */
52967+ free_uid(user);
52968+
52969+skipit:
52970+#endif
52971+
52972+ if (unlikely(!(gr_status & GR_READY)))
52973+ return 0;
52974+
52975+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
52976+ gr_log_learn_id_change('u', real, effective, fs);
52977+
52978+ num = current->acl->user_trans_num;
52979+ uidlist = current->acl->user_transitions;
52980+
52981+ if (uidlist == NULL)
52982+ return 0;
52983+
52984+ if (real == -1)
52985+ realok = 1;
52986+ if (effective == -1)
52987+ effectiveok = 1;
52988+ if (fs == -1)
52989+ fsok = 1;
52990+
52991+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
52992+ for (i = 0; i < num; i++) {
52993+ curuid = (int)uidlist[i];
52994+ if (real == curuid)
52995+ realok = 1;
52996+ if (effective == curuid)
52997+ effectiveok = 1;
52998+ if (fs == curuid)
52999+ fsok = 1;
53000+ }
53001+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
53002+ for (i = 0; i < num; i++) {
53003+ curuid = (int)uidlist[i];
53004+ if (real == curuid)
53005+ break;
53006+ if (effective == curuid)
53007+ break;
53008+ if (fs == curuid)
53009+ break;
53010+ }
53011+ /* not in deny list */
53012+ if (i == num) {
53013+ realok = 1;
53014+ effectiveok = 1;
53015+ fsok = 1;
53016+ }
53017+ }
53018+
53019+ if (realok && effectiveok && fsok)
53020+ return 0;
53021+ else {
53022+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
53023+ return 1;
53024+ }
53025+}
53026+
53027+int
53028+gr_check_group_change(int real, int effective, int fs)
53029+{
53030+ unsigned int i;
53031+ __u16 num;
53032+ gid_t *gidlist;
53033+ int curgid;
53034+ int realok = 0;
53035+ int effectiveok = 0;
53036+ int fsok = 0;
53037+
53038+ if (unlikely(!(gr_status & GR_READY)))
53039+ return 0;
53040+
53041+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
53042+ gr_log_learn_id_change('g', real, effective, fs);
53043+
53044+ num = current->acl->group_trans_num;
53045+ gidlist = current->acl->group_transitions;
53046+
53047+ if (gidlist == NULL)
53048+ return 0;
53049+
53050+ if (real == -1)
53051+ realok = 1;
53052+ if (effective == -1)
53053+ effectiveok = 1;
53054+ if (fs == -1)
53055+ fsok = 1;
53056+
53057+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
53058+ for (i = 0; i < num; i++) {
53059+ curgid = (int)gidlist[i];
53060+ if (real == curgid)
53061+ realok = 1;
53062+ if (effective == curgid)
53063+ effectiveok = 1;
53064+ if (fs == curgid)
53065+ fsok = 1;
53066+ }
53067+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
53068+ for (i = 0; i < num; i++) {
53069+ curgid = (int)gidlist[i];
53070+ if (real == curgid)
53071+ break;
53072+ if (effective == curgid)
53073+ break;
53074+ if (fs == curgid)
53075+ break;
53076+ }
53077+ /* not in deny list */
53078+ if (i == num) {
53079+ realok = 1;
53080+ effectiveok = 1;
53081+ fsok = 1;
53082+ }
53083+ }
53084+
53085+ if (realok && effectiveok && fsok)
53086+ return 0;
53087+ else {
53088+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
53089+ return 1;
53090+ }
53091+}
53092+
53093+void
53094+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
53095+{
53096+ struct acl_role_label *role = task->role;
53097+ struct acl_subject_label *subj = NULL;
53098+ struct acl_object_label *obj;
53099+ struct file *filp;
53100+
53101+ if (unlikely(!(gr_status & GR_READY)))
53102+ return;
53103+
53104+ filp = task->exec_file;
53105+
53106+ /* kernel process, we'll give them the kernel role */
53107+ if (unlikely(!filp)) {
53108+ task->role = kernel_role;
53109+ task->acl = kernel_role->root_label;
53110+ return;
53111+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
53112+ role = lookup_acl_role_label(task, uid, gid);
53113+
53114+ /* perform subject lookup in possibly new role
53115+ we can use this result below in the case where role == task->role
53116+ */
53117+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
53118+
53119+ /* if we changed uid/gid, but result in the same role
53120+ and are using inheritance, don't lose the inherited subject
53121+ if current subject is other than what normal lookup
53122+ would result in, we arrived via inheritance, don't
53123+ lose subject
53124+ */
53125+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
53126+ (subj == task->acl)))
53127+ task->acl = subj;
53128+
53129+ task->role = role;
53130+
53131+ task->is_writable = 0;
53132+
53133+ /* ignore additional mmap checks for processes that are writable
53134+ by the default ACL */
53135+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
53136+ if (unlikely(obj->mode & GR_WRITE))
53137+ task->is_writable = 1;
53138+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
53139+ if (unlikely(obj->mode & GR_WRITE))
53140+ task->is_writable = 1;
53141+
53142+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
53143+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
53144+#endif
53145+
53146+ gr_set_proc_res(task);
53147+
53148+ return;
53149+}
53150+
53151+int
53152+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
53153+ const int unsafe_share)
53154+{
53155+ struct task_struct *task = current;
53156+ struct acl_subject_label *newacl;
53157+ struct acl_object_label *obj;
53158+ __u32 retmode;
53159+
53160+ if (unlikely(!(gr_status & GR_READY)))
53161+ return 0;
53162+
53163+ newacl = chk_subj_label(dentry, mnt, task->role);
53164+
53165+ task_lock(task);
53166+ if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
53167+ !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
53168+ !(task->role->roletype & GR_ROLE_GOD) &&
53169+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
53170+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
53171+ task_unlock(task);
53172+ if (unsafe_share)
53173+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
53174+ else
53175+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
53176+ return -EACCES;
53177+ }
53178+ task_unlock(task);
53179+
53180+ obj = chk_obj_label(dentry, mnt, task->acl);
53181+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
53182+
53183+ if (!(task->acl->mode & GR_INHERITLEARN) &&
53184+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
53185+ if (obj->nested)
53186+ task->acl = obj->nested;
53187+ else
53188+ task->acl = newacl;
53189+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
53190+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
53191+
53192+ task->is_writable = 0;
53193+
53194+ /* ignore additional mmap checks for processes that are writable
53195+ by the default ACL */
53196+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
53197+ if (unlikely(obj->mode & GR_WRITE))
53198+ task->is_writable = 1;
53199+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
53200+ if (unlikely(obj->mode & GR_WRITE))
53201+ task->is_writable = 1;
53202+
53203+ gr_set_proc_res(task);
53204+
53205+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
53206+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
53207+#endif
53208+ return 0;
53209+}
53210+
53211+/* always called with valid inodev ptr */
53212+static void
53213+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
53214+{
53215+ struct acl_object_label *matchpo;
53216+ struct acl_subject_label *matchps;
53217+ struct acl_subject_label *subj;
53218+ struct acl_role_label *role;
53219+ unsigned int x;
53220+
53221+ FOR_EACH_ROLE_START(role)
53222+ FOR_EACH_SUBJECT_START(role, subj, x)
53223+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
53224+ matchpo->mode |= GR_DELETED;
53225+ FOR_EACH_SUBJECT_END(subj,x)
53226+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
53227+ if (subj->inode == ino && subj->device == dev)
53228+ subj->mode |= GR_DELETED;
53229+ FOR_EACH_NESTED_SUBJECT_END(subj)
53230+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
53231+ matchps->mode |= GR_DELETED;
53232+ FOR_EACH_ROLE_END(role)
53233+
53234+ inodev->nentry->deleted = 1;
53235+
53236+ return;
53237+}
53238+
53239+void
53240+gr_handle_delete(const ino_t ino, const dev_t dev)
53241+{
53242+ struct inodev_entry *inodev;
53243+
53244+ if (unlikely(!(gr_status & GR_READY)))
53245+ return;
53246+
53247+ write_lock(&gr_inode_lock);
53248+ inodev = lookup_inodev_entry(ino, dev);
53249+ if (inodev != NULL)
53250+ do_handle_delete(inodev, ino, dev);
53251+ write_unlock(&gr_inode_lock);
53252+
53253+ return;
53254+}
53255+
53256+static void
53257+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
53258+ const ino_t newinode, const dev_t newdevice,
53259+ struct acl_subject_label *subj)
53260+{
53261+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
53262+ struct acl_object_label *match;
53263+
53264+ match = subj->obj_hash[index];
53265+
53266+ while (match && (match->inode != oldinode ||
53267+ match->device != olddevice ||
53268+ !(match->mode & GR_DELETED)))
53269+ match = match->next;
53270+
53271+ if (match && (match->inode == oldinode)
53272+ && (match->device == olddevice)
53273+ && (match->mode & GR_DELETED)) {
53274+ if (match->prev == NULL) {
53275+ subj->obj_hash[index] = match->next;
53276+ if (match->next != NULL)
53277+ match->next->prev = NULL;
53278+ } else {
53279+ match->prev->next = match->next;
53280+ if (match->next != NULL)
53281+ match->next->prev = match->prev;
53282+ }
53283+ match->prev = NULL;
53284+ match->next = NULL;
53285+ match->inode = newinode;
53286+ match->device = newdevice;
53287+ match->mode &= ~GR_DELETED;
53288+
53289+ insert_acl_obj_label(match, subj);
53290+ }
53291+
53292+ return;
53293+}
53294+
53295+static void
53296+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
53297+ const ino_t newinode, const dev_t newdevice,
53298+ struct acl_role_label *role)
53299+{
53300+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
53301+ struct acl_subject_label *match;
53302+
53303+ match = role->subj_hash[index];
53304+
53305+ while (match && (match->inode != oldinode ||
53306+ match->device != olddevice ||
53307+ !(match->mode & GR_DELETED)))
53308+ match = match->next;
53309+
53310+ if (match && (match->inode == oldinode)
53311+ && (match->device == olddevice)
53312+ && (match->mode & GR_DELETED)) {
53313+ if (match->prev == NULL) {
53314+ role->subj_hash[index] = match->next;
53315+ if (match->next != NULL)
53316+ match->next->prev = NULL;
53317+ } else {
53318+ match->prev->next = match->next;
53319+ if (match->next != NULL)
53320+ match->next->prev = match->prev;
53321+ }
53322+ match->prev = NULL;
53323+ match->next = NULL;
53324+ match->inode = newinode;
53325+ match->device = newdevice;
53326+ match->mode &= ~GR_DELETED;
53327+
53328+ insert_acl_subj_label(match, role);
53329+ }
53330+
53331+ return;
53332+}
53333+
53334+static void
53335+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
53336+ const ino_t newinode, const dev_t newdevice)
53337+{
53338+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
53339+ struct inodev_entry *match;
53340+
53341+ match = inodev_set.i_hash[index];
53342+
53343+ while (match && (match->nentry->inode != oldinode ||
53344+ match->nentry->device != olddevice || !match->nentry->deleted))
53345+ match = match->next;
53346+
53347+ if (match && (match->nentry->inode == oldinode)
53348+ && (match->nentry->device == olddevice) &&
53349+ match->nentry->deleted) {
53350+ if (match->prev == NULL) {
53351+ inodev_set.i_hash[index] = match->next;
53352+ if (match->next != NULL)
53353+ match->next->prev = NULL;
53354+ } else {
53355+ match->prev->next = match->next;
53356+ if (match->next != NULL)
53357+ match->next->prev = match->prev;
53358+ }
53359+ match->prev = NULL;
53360+ match->next = NULL;
53361+ match->nentry->inode = newinode;
53362+ match->nentry->device = newdevice;
53363+ match->nentry->deleted = 0;
53364+
53365+ insert_inodev_entry(match);
53366+ }
53367+
53368+ return;
53369+}
53370+
53371+static void
53372+__do_handle_create(const struct name_entry *matchn, ino_t inode, dev_t dev)
53373+{
53374+ struct acl_subject_label *subj;
53375+ struct acl_role_label *role;
53376+ unsigned int x;
53377+
53378+ FOR_EACH_ROLE_START(role)
53379+ update_acl_subj_label(matchn->inode, matchn->device,
53380+ inode, dev, role);
53381+
53382+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
53383+ if ((subj->inode == inode) && (subj->device == dev)) {
53384+ subj->inode = inode;
53385+ subj->device = dev;
53386+ }
53387+ FOR_EACH_NESTED_SUBJECT_END(subj)
53388+ FOR_EACH_SUBJECT_START(role, subj, x)
53389+ update_acl_obj_label(matchn->inode, matchn->device,
53390+ inode, dev, subj);
53391+ FOR_EACH_SUBJECT_END(subj,x)
53392+ FOR_EACH_ROLE_END(role)
53393+
53394+ update_inodev_entry(matchn->inode, matchn->device, inode, dev);
53395+
53396+ return;
53397+}
53398+
53399+static void
53400+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
53401+ const struct vfsmount *mnt)
53402+{
53403+ ino_t ino = dentry->d_inode->i_ino;
53404+ dev_t dev = __get_dev(dentry);
53405+
53406+ __do_handle_create(matchn, ino, dev);
53407+
53408+ return;
53409+}
53410+
53411+void
53412+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
53413+{
53414+ struct name_entry *matchn;
53415+
53416+ if (unlikely(!(gr_status & GR_READY)))
53417+ return;
53418+
53419+ preempt_disable();
53420+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
53421+
53422+ if (unlikely((unsigned long)matchn)) {
53423+ write_lock(&gr_inode_lock);
53424+ do_handle_create(matchn, dentry, mnt);
53425+ write_unlock(&gr_inode_lock);
53426+ }
53427+ preempt_enable();
53428+
53429+ return;
53430+}
53431+
53432+void
53433+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
53434+{
53435+ struct name_entry *matchn;
53436+
53437+ if (unlikely(!(gr_status & GR_READY)))
53438+ return;
53439+
53440+ preempt_disable();
53441+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
53442+
53443+ if (unlikely((unsigned long)matchn)) {
53444+ write_lock(&gr_inode_lock);
53445+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
53446+ write_unlock(&gr_inode_lock);
53447+ }
53448+ preempt_enable();
53449+
53450+ return;
53451+}
53452+
53453+void
53454+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
53455+ struct dentry *old_dentry,
53456+ struct dentry *new_dentry,
53457+ struct vfsmount *mnt, const __u8 replace)
53458+{
53459+ struct name_entry *matchn;
53460+ struct inodev_entry *inodev;
53461+ struct inode *inode = new_dentry->d_inode;
53462+ ino_t oldinode = old_dentry->d_inode->i_ino;
53463+ dev_t olddev = __get_dev(old_dentry);
53464+
53465+ /* vfs_rename swaps the name and parent link for old_dentry and
53466+ new_dentry
53467+ at this point, old_dentry has the new name, parent link, and inode
53468+ for the renamed file
53469+ if a file is being replaced by a rename, new_dentry has the inode
53470+ and name for the replaced file
53471+ */
53472+
53473+ if (unlikely(!(gr_status & GR_READY)))
53474+ return;
53475+
53476+ preempt_disable();
53477+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
53478+
53479+ /* we wouldn't have to check d_inode if it weren't for
53480+ NFS silly-renaming
53481+ */
53482+
53483+ write_lock(&gr_inode_lock);
53484+ if (unlikely(replace && inode)) {
53485+ ino_t newinode = inode->i_ino;
53486+ dev_t newdev = __get_dev(new_dentry);
53487+ inodev = lookup_inodev_entry(newinode, newdev);
53488+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
53489+ do_handle_delete(inodev, newinode, newdev);
53490+ }
53491+
53492+ inodev = lookup_inodev_entry(oldinode, olddev);
53493+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
53494+ do_handle_delete(inodev, oldinode, olddev);
53495+
53496+ if (unlikely((unsigned long)matchn))
53497+ do_handle_create(matchn, old_dentry, mnt);
53498+
53499+ write_unlock(&gr_inode_lock);
53500+ preempt_enable();
53501+
53502+ return;
53503+}
53504+
53505+static int
53506+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
53507+ unsigned char **sum)
53508+{
53509+ struct acl_role_label *r;
53510+ struct role_allowed_ip *ipp;
53511+ struct role_transition *trans;
53512+ unsigned int i;
53513+ int found = 0;
53514+ u32 curr_ip = current->signal->curr_ip;
53515+
53516+ current->signal->saved_ip = curr_ip;
53517+
53518+ /* check transition table */
53519+
53520+ for (trans = current->role->transitions; trans; trans = trans->next) {
53521+ if (!strcmp(rolename, trans->rolename)) {
53522+ found = 1;
53523+ break;
53524+ }
53525+ }
53526+
53527+ if (!found)
53528+ return 0;
53529+
53530+ /* handle special roles that do not require authentication
53531+ and check ip */
53532+
53533+ FOR_EACH_ROLE_START(r)
53534+ if (!strcmp(rolename, r->rolename) &&
53535+ (r->roletype & GR_ROLE_SPECIAL)) {
53536+ found = 0;
53537+ if (r->allowed_ips != NULL) {
53538+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
53539+ if ((ntohl(curr_ip) & ipp->netmask) ==
53540+ (ntohl(ipp->addr) & ipp->netmask))
53541+ found = 1;
53542+ }
53543+ } else
53544+ found = 2;
53545+ if (!found)
53546+ return 0;
53547+
53548+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
53549+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
53550+ *salt = NULL;
53551+ *sum = NULL;
53552+ return 1;
53553+ }
53554+ }
53555+ FOR_EACH_ROLE_END(r)
53556+
53557+ for (i = 0; i < num_sprole_pws; i++) {
53558+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
53559+ *salt = acl_special_roles[i]->salt;
53560+ *sum = acl_special_roles[i]->sum;
53561+ return 1;
53562+ }
53563+ }
53564+
53565+ return 0;
53566+}
53567+
53568+static void
53569+assign_special_role(char *rolename)
53570+{
53571+ struct acl_object_label *obj;
53572+ struct acl_role_label *r;
53573+ struct acl_role_label *assigned = NULL;
53574+ struct task_struct *tsk;
53575+ struct file *filp;
53576+
53577+ FOR_EACH_ROLE_START(r)
53578+ if (!strcmp(rolename, r->rolename) &&
53579+ (r->roletype & GR_ROLE_SPECIAL)) {
53580+ assigned = r;
53581+ break;
53582+ }
53583+ FOR_EACH_ROLE_END(r)
53584+
53585+ if (!assigned)
53586+ return;
53587+
53588+ read_lock(&tasklist_lock);
53589+ read_lock(&grsec_exec_file_lock);
53590+
53591+ tsk = current->real_parent;
53592+ if (tsk == NULL)
53593+ goto out_unlock;
53594+
53595+ filp = tsk->exec_file;
53596+ if (filp == NULL)
53597+ goto out_unlock;
53598+
53599+ tsk->is_writable = 0;
53600+
53601+ tsk->acl_sp_role = 1;
53602+ tsk->acl_role_id = ++acl_sp_role_value;
53603+ tsk->role = assigned;
53604+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
53605+
53606+ /* ignore additional mmap checks for processes that are writable
53607+ by the default ACL */
53608+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
53609+ if (unlikely(obj->mode & GR_WRITE))
53610+ tsk->is_writable = 1;
53611+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
53612+ if (unlikely(obj->mode & GR_WRITE))
53613+ tsk->is_writable = 1;
53614+
53615+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
53616+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
53617+#endif
53618+
53619+out_unlock:
53620+ read_unlock(&grsec_exec_file_lock);
53621+ read_unlock(&tasklist_lock);
53622+ return;
53623+}
53624+
53625+int gr_check_secure_terminal(struct task_struct *task)
53626+{
53627+ struct task_struct *p, *p2, *p3;
53628+ struct files_struct *files;
53629+ struct fdtable *fdt;
53630+ struct file *our_file = NULL, *file;
53631+ int i;
53632+
53633+ if (task->signal->tty == NULL)
53634+ return 1;
53635+
53636+ files = get_files_struct(task);
53637+ if (files != NULL) {
53638+ rcu_read_lock();
53639+ fdt = files_fdtable(files);
53640+ for (i=0; i < fdt->max_fds; i++) {
53641+ file = fcheck_files(files, i);
53642+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
53643+ get_file(file);
53644+ our_file = file;
53645+ }
53646+ }
53647+ rcu_read_unlock();
53648+ put_files_struct(files);
53649+ }
53650+
53651+ if (our_file == NULL)
53652+ return 1;
53653+
53654+ read_lock(&tasklist_lock);
53655+ do_each_thread(p2, p) {
53656+ files = get_files_struct(p);
53657+ if (files == NULL ||
53658+ (p->signal && p->signal->tty == task->signal->tty)) {
53659+ if (files != NULL)
53660+ put_files_struct(files);
53661+ continue;
53662+ }
53663+ rcu_read_lock();
53664+ fdt = files_fdtable(files);
53665+ for (i=0; i < fdt->max_fds; i++) {
53666+ file = fcheck_files(files, i);
53667+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
53668+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
53669+ p3 = task;
53670+ while (p3->pid > 0) {
53671+ if (p3 == p)
53672+ break;
53673+ p3 = p3->real_parent;
53674+ }
53675+ if (p3 == p)
53676+ break;
53677+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
53678+ gr_handle_alertkill(p);
53679+ rcu_read_unlock();
53680+ put_files_struct(files);
53681+ read_unlock(&tasklist_lock);
53682+ fput(our_file);
53683+ return 0;
53684+ }
53685+ }
53686+ rcu_read_unlock();
53687+ put_files_struct(files);
53688+ } while_each_thread(p2, p);
53689+ read_unlock(&tasklist_lock);
53690+
53691+ fput(our_file);
53692+ return 1;
53693+}
53694+
53695+ssize_t
53696+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
53697+{
53698+ struct gr_arg_wrapper uwrap;
53699+ unsigned char *sprole_salt = NULL;
53700+ unsigned char *sprole_sum = NULL;
53701+ int error = sizeof (struct gr_arg_wrapper);
53702+ int error2 = 0;
53703+
53704+ mutex_lock(&gr_dev_mutex);
53705+
53706+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
53707+ error = -EPERM;
53708+ goto out;
53709+ }
53710+
53711+ if (count != sizeof (struct gr_arg_wrapper)) {
53712+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
53713+ error = -EINVAL;
53714+ goto out;
53715+ }
53716+
53717+
53718+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
53719+ gr_auth_expires = 0;
53720+ gr_auth_attempts = 0;
53721+ }
53722+
53723+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
53724+ error = -EFAULT;
53725+ goto out;
53726+ }
53727+
53728+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
53729+ error = -EINVAL;
53730+ goto out;
53731+ }
53732+
53733+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
53734+ error = -EFAULT;
53735+ goto out;
53736+ }
53737+
53738+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
53739+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
53740+ time_after(gr_auth_expires, get_seconds())) {
53741+ error = -EBUSY;
53742+ goto out;
53743+ }
53744+
53745+ /* if non-root trying to do anything other than use a special role,
53746+ do not attempt authentication, do not count towards authentication
53747+ locking
53748+ */
53749+
53750+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
53751+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
53752+ current_uid()) {
53753+ error = -EPERM;
53754+ goto out;
53755+ }
53756+
53757+ /* ensure pw and special role name are null terminated */
53758+
53759+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
53760+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
53761+
53762+ /* Okay.
53763+ * We have our enough of the argument structure..(we have yet
53764+ * to copy_from_user the tables themselves) . Copy the tables
53765+ * only if we need them, i.e. for loading operations. */
53766+
53767+ switch (gr_usermode->mode) {
53768+ case GR_STATUS:
53769+ if (gr_status & GR_READY) {
53770+ error = 1;
53771+ if (!gr_check_secure_terminal(current))
53772+ error = 3;
53773+ } else
53774+ error = 2;
53775+ goto out;
53776+ case GR_SHUTDOWN:
53777+ if ((gr_status & GR_READY)
53778+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
53779+ pax_open_kernel();
53780+ gr_status &= ~GR_READY;
53781+ pax_close_kernel();
53782+
53783+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
53784+ free_variables();
53785+ memset(gr_usermode, 0, sizeof (struct gr_arg));
53786+ memset(gr_system_salt, 0, GR_SALT_LEN);
53787+ memset(gr_system_sum, 0, GR_SHA_LEN);
53788+ } else if (gr_status & GR_READY) {
53789+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
53790+ error = -EPERM;
53791+ } else {
53792+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
53793+ error = -EAGAIN;
53794+ }
53795+ break;
53796+ case GR_ENABLE:
53797+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
53798+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
53799+ else {
53800+ if (gr_status & GR_READY)
53801+ error = -EAGAIN;
53802+ else
53803+ error = error2;
53804+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
53805+ }
53806+ break;
53807+ case GR_RELOAD:
53808+ if (!(gr_status & GR_READY)) {
53809+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
53810+ error = -EAGAIN;
53811+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
53812+ lock_kernel();
53813+
53814+ pax_open_kernel();
53815+ gr_status &= ~GR_READY;
53816+ pax_close_kernel();
53817+
53818+ free_variables();
53819+ if (!(error2 = gracl_init(gr_usermode))) {
53820+ unlock_kernel();
53821+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
53822+ } else {
53823+ unlock_kernel();
53824+ error = error2;
53825+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
53826+ }
53827+ } else {
53828+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
53829+ error = -EPERM;
53830+ }
53831+ break;
53832+ case GR_SEGVMOD:
53833+ if (unlikely(!(gr_status & GR_READY))) {
53834+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
53835+ error = -EAGAIN;
53836+ break;
53837+ }
53838+
53839+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
53840+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
53841+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
53842+ struct acl_subject_label *segvacl;
53843+ segvacl =
53844+ lookup_acl_subj_label(gr_usermode->segv_inode,
53845+ gr_usermode->segv_device,
53846+ current->role);
53847+ if (segvacl) {
53848+ segvacl->crashes = 0;
53849+ segvacl->expires = 0;
53850+ }
53851+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
53852+ gr_remove_uid(gr_usermode->segv_uid);
53853+ }
53854+ } else {
53855+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
53856+ error = -EPERM;
53857+ }
53858+ break;
53859+ case GR_SPROLE:
53860+ case GR_SPROLEPAM:
53861+ if (unlikely(!(gr_status & GR_READY))) {
53862+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
53863+ error = -EAGAIN;
53864+ break;
53865+ }
53866+
53867+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
53868+ current->role->expires = 0;
53869+ current->role->auth_attempts = 0;
53870+ }
53871+
53872+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
53873+ time_after(current->role->expires, get_seconds())) {
53874+ error = -EBUSY;
53875+ goto out;
53876+ }
53877+
53878+ if (lookup_special_role_auth
53879+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
53880+ && ((!sprole_salt && !sprole_sum)
53881+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
53882+ char *p = "";
53883+ assign_special_role(gr_usermode->sp_role);
53884+ read_lock(&tasklist_lock);
53885+ if (current->real_parent)
53886+ p = current->real_parent->role->rolename;
53887+ read_unlock(&tasklist_lock);
53888+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
53889+ p, acl_sp_role_value);
53890+ } else {
53891+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
53892+ error = -EPERM;
53893+ if(!(current->role->auth_attempts++))
53894+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
53895+
53896+ goto out;
53897+ }
53898+ break;
53899+ case GR_UNSPROLE:
53900+ if (unlikely(!(gr_status & GR_READY))) {
53901+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
53902+ error = -EAGAIN;
53903+ break;
53904+ }
53905+
53906+ if (current->role->roletype & GR_ROLE_SPECIAL) {
53907+ char *p = "";
53908+ int i = 0;
53909+
53910+ read_lock(&tasklist_lock);
53911+ if (current->real_parent) {
53912+ p = current->real_parent->role->rolename;
53913+ i = current->real_parent->acl_role_id;
53914+ }
53915+ read_unlock(&tasklist_lock);
53916+
53917+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
53918+ gr_set_acls(1);
53919+ } else {
53920+ error = -EPERM;
53921+ goto out;
53922+ }
53923+ break;
53924+ default:
53925+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
53926+ error = -EINVAL;
53927+ break;
53928+ }
53929+
53930+ if (error != -EPERM)
53931+ goto out;
53932+
53933+ if(!(gr_auth_attempts++))
53934+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
53935+
53936+ out:
53937+ mutex_unlock(&gr_dev_mutex);
53938+ return error;
53939+}
53940+
53941+/* must be called with
53942+ rcu_read_lock();
53943+ read_lock(&tasklist_lock);
53944+ read_lock(&grsec_exec_file_lock);
53945+*/
53946+int gr_apply_subject_to_task(struct task_struct *task)
53947+{
53948+ struct acl_object_label *obj;
53949+ char *tmpname;
53950+ struct acl_subject_label *tmpsubj;
53951+ struct file *filp;
53952+ struct name_entry *nmatch;
53953+
53954+ filp = task->exec_file;
53955+ if (filp == NULL)
53956+ return 0;
53957+
53958+ /* the following is to apply the correct subject
53959+ on binaries running when the RBAC system
53960+ is enabled, when the binaries have been
53961+ replaced or deleted since their execution
53962+ -----
53963+ when the RBAC system starts, the inode/dev
53964+ from exec_file will be one the RBAC system
53965+ is unaware of. It only knows the inode/dev
53966+ of the present file on disk, or the absence
53967+ of it.
53968+ */
53969+ preempt_disable();
53970+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
53971+
53972+ nmatch = lookup_name_entry(tmpname);
53973+ preempt_enable();
53974+ tmpsubj = NULL;
53975+ if (nmatch) {
53976+ if (nmatch->deleted)
53977+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
53978+ else
53979+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
53980+ if (tmpsubj != NULL)
53981+ task->acl = tmpsubj;
53982+ }
53983+ if (tmpsubj == NULL)
53984+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
53985+ task->role);
53986+ if (task->acl) {
53987+ task->is_writable = 0;
53988+ /* ignore additional mmap checks for processes that are writable
53989+ by the default ACL */
53990+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
53991+ if (unlikely(obj->mode & GR_WRITE))
53992+ task->is_writable = 1;
53993+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
53994+ if (unlikely(obj->mode & GR_WRITE))
53995+ task->is_writable = 1;
53996+
53997+ gr_set_proc_res(task);
53998+
53999+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
54000+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
54001+#endif
54002+ } else {
54003+ return 1;
54004+ }
54005+
54006+ return 0;
54007+}
54008+
54009+int
54010+gr_set_acls(const int type)
54011+{
54012+ struct task_struct *task, *task2;
54013+ struct acl_role_label *role = current->role;
54014+ __u16 acl_role_id = current->acl_role_id;
54015+ const struct cred *cred;
54016+ int ret;
54017+
54018+ rcu_read_lock();
54019+ read_lock(&tasklist_lock);
54020+ read_lock(&grsec_exec_file_lock);
54021+ do_each_thread(task2, task) {
54022+ /* check to see if we're called from the exit handler,
54023+ if so, only replace ACLs that have inherited the admin
54024+ ACL */
54025+
54026+ if (type && (task->role != role ||
54027+ task->acl_role_id != acl_role_id))
54028+ continue;
54029+
54030+ task->acl_role_id = 0;
54031+ task->acl_sp_role = 0;
54032+
54033+ if (task->exec_file) {
54034+ cred = __task_cred(task);
54035+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
54036+
54037+ ret = gr_apply_subject_to_task(task);
54038+ if (ret) {
54039+ read_unlock(&grsec_exec_file_lock);
54040+ read_unlock(&tasklist_lock);
54041+ rcu_read_unlock();
54042+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
54043+ return ret;
54044+ }
54045+ } else {
54046+ // it's a kernel process
54047+ task->role = kernel_role;
54048+ task->acl = kernel_role->root_label;
54049+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
54050+ task->acl->mode &= ~GR_PROCFIND;
54051+#endif
54052+ }
54053+ } while_each_thread(task2, task);
54054+ read_unlock(&grsec_exec_file_lock);
54055+ read_unlock(&tasklist_lock);
54056+ rcu_read_unlock();
54057+
54058+ return 0;
54059+}
54060+
54061+void
54062+gr_learn_resource(const struct task_struct *task,
54063+ const int res, const unsigned long wanted, const int gt)
54064+{
54065+ struct acl_subject_label *acl;
54066+ const struct cred *cred;
54067+
54068+ if (unlikely((gr_status & GR_READY) &&
54069+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
54070+ goto skip_reslog;
54071+
54072+#ifdef CONFIG_GRKERNSEC_RESLOG
54073+ gr_log_resource(task, res, wanted, gt);
54074+#endif
54075+ skip_reslog:
54076+
54077+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
54078+ return;
54079+
54080+ acl = task->acl;
54081+
54082+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
54083+ !(acl->resmask & (1 << (unsigned short) res))))
54084+ return;
54085+
54086+ if (wanted >= acl->res[res].rlim_cur) {
54087+ unsigned long res_add;
54088+
54089+ res_add = wanted;
54090+ switch (res) {
54091+ case RLIMIT_CPU:
54092+ res_add += GR_RLIM_CPU_BUMP;
54093+ break;
54094+ case RLIMIT_FSIZE:
54095+ res_add += GR_RLIM_FSIZE_BUMP;
54096+ break;
54097+ case RLIMIT_DATA:
54098+ res_add += GR_RLIM_DATA_BUMP;
54099+ break;
54100+ case RLIMIT_STACK:
54101+ res_add += GR_RLIM_STACK_BUMP;
54102+ break;
54103+ case RLIMIT_CORE:
54104+ res_add += GR_RLIM_CORE_BUMP;
54105+ break;
54106+ case RLIMIT_RSS:
54107+ res_add += GR_RLIM_RSS_BUMP;
54108+ break;
54109+ case RLIMIT_NPROC:
54110+ res_add += GR_RLIM_NPROC_BUMP;
54111+ break;
54112+ case RLIMIT_NOFILE:
54113+ res_add += GR_RLIM_NOFILE_BUMP;
54114+ break;
54115+ case RLIMIT_MEMLOCK:
54116+ res_add += GR_RLIM_MEMLOCK_BUMP;
54117+ break;
54118+ case RLIMIT_AS:
54119+ res_add += GR_RLIM_AS_BUMP;
54120+ break;
54121+ case RLIMIT_LOCKS:
54122+ res_add += GR_RLIM_LOCKS_BUMP;
54123+ break;
54124+ case RLIMIT_SIGPENDING:
54125+ res_add += GR_RLIM_SIGPENDING_BUMP;
54126+ break;
54127+ case RLIMIT_MSGQUEUE:
54128+ res_add += GR_RLIM_MSGQUEUE_BUMP;
54129+ break;
54130+ case RLIMIT_NICE:
54131+ res_add += GR_RLIM_NICE_BUMP;
54132+ break;
54133+ case RLIMIT_RTPRIO:
54134+ res_add += GR_RLIM_RTPRIO_BUMP;
54135+ break;
54136+ case RLIMIT_RTTIME:
54137+ res_add += GR_RLIM_RTTIME_BUMP;
54138+ break;
54139+ }
54140+
54141+ acl->res[res].rlim_cur = res_add;
54142+
54143+ if (wanted > acl->res[res].rlim_max)
54144+ acl->res[res].rlim_max = res_add;
54145+
54146+ /* only log the subject filename, since resource logging is supported for
54147+ single-subject learning only */
54148+ rcu_read_lock();
54149+ cred = __task_cred(task);
54150+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
54151+ task->role->roletype, cred->uid, cred->gid, acl->filename,
54152+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
54153+ "", (unsigned long) res, &task->signal->saved_ip);
54154+ rcu_read_unlock();
54155+ }
54156+
54157+ return;
54158+}
54159+
54160+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
54161+void
54162+pax_set_initial_flags(struct linux_binprm *bprm)
54163+{
54164+ struct task_struct *task = current;
54165+ struct acl_subject_label *proc;
54166+ unsigned long flags;
54167+
54168+ if (unlikely(!(gr_status & GR_READY)))
54169+ return;
54170+
54171+ flags = pax_get_flags(task);
54172+
54173+ proc = task->acl;
54174+
54175+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
54176+ flags &= ~MF_PAX_PAGEEXEC;
54177+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
54178+ flags &= ~MF_PAX_SEGMEXEC;
54179+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
54180+ flags &= ~MF_PAX_RANDMMAP;
54181+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
54182+ flags &= ~MF_PAX_EMUTRAMP;
54183+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
54184+ flags &= ~MF_PAX_MPROTECT;
54185+
54186+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
54187+ flags |= MF_PAX_PAGEEXEC;
54188+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
54189+ flags |= MF_PAX_SEGMEXEC;
54190+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
54191+ flags |= MF_PAX_RANDMMAP;
54192+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
54193+ flags |= MF_PAX_EMUTRAMP;
54194+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
54195+ flags |= MF_PAX_MPROTECT;
54196+
54197+ pax_set_flags(task, flags);
54198+
54199+ return;
54200+}
54201+#endif
54202+
54203+#ifdef CONFIG_SYSCTL
54204+/* Eric Biederman likes breaking userland ABI and every inode-based security
54205+ system to save 35kb of memory */
54206+
54207+/* we modify the passed in filename, but adjust it back before returning */
54208+static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
54209+{
54210+ struct name_entry *nmatch;
54211+ char *p, *lastp = NULL;
54212+ struct acl_object_label *obj = NULL, *tmp;
54213+ struct acl_subject_label *tmpsubj;
54214+ char c = '\0';
54215+
54216+ read_lock(&gr_inode_lock);
54217+
54218+ p = name + len - 1;
54219+ do {
54220+ nmatch = lookup_name_entry(name);
54221+ if (lastp != NULL)
54222+ *lastp = c;
54223+
54224+ if (nmatch == NULL)
54225+ goto next_component;
54226+ tmpsubj = current->acl;
54227+ do {
54228+ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
54229+ if (obj != NULL) {
54230+ tmp = obj->globbed;
54231+ while (tmp) {
54232+ if (!glob_match(tmp->filename, name)) {
54233+ obj = tmp;
54234+ goto found_obj;
54235+ }
54236+ tmp = tmp->next;
54237+ }
54238+ goto found_obj;
54239+ }
54240+ } while ((tmpsubj = tmpsubj->parent_subject));
54241+next_component:
54242+ /* end case */
54243+ if (p == name)
54244+ break;
54245+
54246+ while (*p != '/')
54247+ p--;
54248+ if (p == name)
54249+ lastp = p + 1;
54250+ else {
54251+ lastp = p;
54252+ p--;
54253+ }
54254+ c = *lastp;
54255+ *lastp = '\0';
54256+ } while (1);
54257+found_obj:
54258+ read_unlock(&gr_inode_lock);
54259+ /* obj returned will always be non-null */
54260+ return obj;
54261+}
54262+
54263+/* returns 0 when allowing, non-zero on error
54264+ op of 0 is used for readdir, so we don't log the names of hidden files
54265+*/
54266+__u32
54267+gr_handle_sysctl(const struct ctl_table *table, const int op)
54268+{
54269+ ctl_table *tmp;
54270+ const char *proc_sys = "/proc/sys";
54271+ char *path;
54272+ struct acl_object_label *obj;
54273+ unsigned short len = 0, pos = 0, depth = 0, i;
54274+ __u32 err = 0;
54275+ __u32 mode = 0;
54276+
54277+ if (unlikely(!(gr_status & GR_READY)))
54278+ return 0;
54279+
54280+ /* for now, ignore operations on non-sysctl entries if it's not a
54281+ readdir*/
54282+ if (table->child != NULL && op != 0)
54283+ return 0;
54284+
54285+ mode |= GR_FIND;
54286+ /* it's only a read if it's an entry, read on dirs is for readdir */
54287+ if (op & MAY_READ)
54288+ mode |= GR_READ;
54289+ if (op & MAY_WRITE)
54290+ mode |= GR_WRITE;
54291+
54292+ preempt_disable();
54293+
54294+ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
54295+
54296+ /* it's only a read/write if it's an actual entry, not a dir
54297+ (which are opened for readdir)
54298+ */
54299+
54300+ /* convert the requested sysctl entry into a pathname */
54301+
54302+ for (tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
54303+ len += strlen(tmp->procname);
54304+ len++;
54305+ depth++;
54306+ }
54307+
54308+ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
54309+ /* deny */
54310+ goto out;
54311+ }
54312+
54313+ memset(path, 0, PAGE_SIZE);
54314+
54315+ memcpy(path, proc_sys, strlen(proc_sys));
54316+
54317+ pos += strlen(proc_sys);
54318+
54319+ for (; depth > 0; depth--) {
54320+ path[pos] = '/';
54321+ pos++;
54322+ for (i = 1, tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
54323+ if (depth == i) {
54324+ memcpy(path + pos, tmp->procname,
54325+ strlen(tmp->procname));
54326+ pos += strlen(tmp->procname);
54327+ }
54328+ i++;
54329+ }
54330+ }
54331+
54332+ obj = gr_lookup_by_name(path, pos);
54333+ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
54334+
54335+ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
54336+ ((err & mode) != mode))) {
54337+ __u32 new_mode = mode;
54338+
54339+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
54340+
54341+ err = 0;
54342+ gr_log_learn_sysctl(path, new_mode);
54343+ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
54344+ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
54345+ err = -ENOENT;
54346+ } else if (!(err & GR_FIND)) {
54347+ err = -ENOENT;
54348+ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
54349+ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
54350+ path, (mode & GR_READ) ? " reading" : "",
54351+ (mode & GR_WRITE) ? " writing" : "");
54352+ err = -EACCES;
54353+ } else if ((err & mode) != mode) {
54354+ err = -EACCES;
54355+ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
54356+ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
54357+ path, (mode & GR_READ) ? " reading" : "",
54358+ (mode & GR_WRITE) ? " writing" : "");
54359+ err = 0;
54360+ } else
54361+ err = 0;
54362+
54363+ out:
54364+ preempt_enable();
54365+
54366+ return err;
54367+}
54368+#endif
54369+
54370+int
54371+gr_handle_proc_ptrace(struct task_struct *task)
54372+{
54373+ struct file *filp;
54374+ struct task_struct *tmp = task;
54375+ struct task_struct *curtemp = current;
54376+ __u32 retmode;
54377+
54378+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
54379+ if (unlikely(!(gr_status & GR_READY)))
54380+ return 0;
54381+#endif
54382+
54383+ read_lock(&tasklist_lock);
54384+ read_lock(&grsec_exec_file_lock);
54385+ filp = task->exec_file;
54386+
54387+ while (tmp->pid > 0) {
54388+ if (tmp == curtemp)
54389+ break;
54390+ tmp = tmp->real_parent;
54391+ }
54392+
54393+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
54394+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
54395+ read_unlock(&grsec_exec_file_lock);
54396+ read_unlock(&tasklist_lock);
54397+ return 1;
54398+ }
54399+
54400+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
54401+ if (!(gr_status & GR_READY)) {
54402+ read_unlock(&grsec_exec_file_lock);
54403+ read_unlock(&tasklist_lock);
54404+ return 0;
54405+ }
54406+#endif
54407+
54408+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
54409+ read_unlock(&grsec_exec_file_lock);
54410+ read_unlock(&tasklist_lock);
54411+
54412+ if (retmode & GR_NOPTRACE)
54413+ return 1;
54414+
54415+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
54416+ && (current->acl != task->acl || (current->acl != current->role->root_label
54417+ && current->pid != task->pid)))
54418+ return 1;
54419+
54420+ return 0;
54421+}
54422+
54423+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
54424+{
54425+ if (unlikely(!(gr_status & GR_READY)))
54426+ return;
54427+
54428+ if (!(current->role->roletype & GR_ROLE_GOD))
54429+ return;
54430+
54431+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
54432+ p->role->rolename, gr_task_roletype_to_char(p),
54433+ p->acl->filename);
54434+}
54435+
54436+int
54437+gr_handle_ptrace(struct task_struct *task, const long request)
54438+{
54439+ struct task_struct *tmp = task;
54440+ struct task_struct *curtemp = current;
54441+ __u32 retmode;
54442+
54443+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
54444+ if (unlikely(!(gr_status & GR_READY)))
54445+ return 0;
54446+#endif
54447+
54448+ read_lock(&tasklist_lock);
54449+ while (tmp->pid > 0) {
54450+ if (tmp == curtemp)
54451+ break;
54452+ tmp = tmp->real_parent;
54453+ }
54454+
54455+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
54456+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
54457+ read_unlock(&tasklist_lock);
54458+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
54459+ return 1;
54460+ }
54461+ read_unlock(&tasklist_lock);
54462+
54463+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
54464+ if (!(gr_status & GR_READY))
54465+ return 0;
54466+#endif
54467+
54468+ read_lock(&grsec_exec_file_lock);
54469+ if (unlikely(!task->exec_file)) {
54470+ read_unlock(&grsec_exec_file_lock);
54471+ return 0;
54472+ }
54473+
54474+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
54475+ read_unlock(&grsec_exec_file_lock);
54476+
54477+ if (retmode & GR_NOPTRACE) {
54478+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
54479+ return 1;
54480+ }
54481+
54482+ if (retmode & GR_PTRACERD) {
54483+ switch (request) {
54484+ case PTRACE_POKETEXT:
54485+ case PTRACE_POKEDATA:
54486+ case PTRACE_POKEUSR:
54487+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
54488+ case PTRACE_SETREGS:
54489+ case PTRACE_SETFPREGS:
54490+#endif
54491+#ifdef CONFIG_X86
54492+ case PTRACE_SETFPXREGS:
54493+#endif
54494+#ifdef CONFIG_ALTIVEC
54495+ case PTRACE_SETVRREGS:
54496+#endif
54497+ return 1;
54498+ default:
54499+ return 0;
54500+ }
54501+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
54502+ !(current->role->roletype & GR_ROLE_GOD) &&
54503+ (current->acl != task->acl)) {
54504+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
54505+ return 1;
54506+ }
54507+
54508+ return 0;
54509+}
54510+
54511+static int is_writable_mmap(const struct file *filp)
54512+{
54513+ struct task_struct *task = current;
54514+ struct acl_object_label *obj, *obj2;
54515+
54516+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
54517+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
54518+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
54519+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
54520+ task->role->root_label);
54521+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
54522+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
54523+ return 1;
54524+ }
54525+ }
54526+ return 0;
54527+}
54528+
54529+int
54530+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
54531+{
54532+ __u32 mode;
54533+
54534+ if (unlikely(!file || !(prot & PROT_EXEC)))
54535+ return 1;
54536+
54537+ if (is_writable_mmap(file))
54538+ return 0;
54539+
54540+ mode =
54541+ gr_search_file(file->f_path.dentry,
54542+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
54543+ file->f_path.mnt);
54544+
54545+ if (!gr_tpe_allow(file))
54546+ return 0;
54547+
54548+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
54549+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
54550+ return 0;
54551+ } else if (unlikely(!(mode & GR_EXEC))) {
54552+ return 0;
54553+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
54554+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
54555+ return 1;
54556+ }
54557+
54558+ return 1;
54559+}
54560+
54561+int
54562+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
54563+{
54564+ __u32 mode;
54565+
54566+ if (unlikely(!file || !(prot & PROT_EXEC)))
54567+ return 1;
54568+
54569+ if (is_writable_mmap(file))
54570+ return 0;
54571+
54572+ mode =
54573+ gr_search_file(file->f_path.dentry,
54574+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
54575+ file->f_path.mnt);
54576+
54577+ if (!gr_tpe_allow(file))
54578+ return 0;
54579+
54580+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
54581+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
54582+ return 0;
54583+ } else if (unlikely(!(mode & GR_EXEC))) {
54584+ return 0;
54585+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
54586+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
54587+ return 1;
54588+ }
54589+
54590+ return 1;
54591+}
54592+
54593+void
54594+gr_acl_handle_psacct(struct task_struct *task, const long code)
54595+{
54596+ unsigned long runtime;
54597+ unsigned long cputime;
54598+ unsigned int wday, cday;
54599+ __u8 whr, chr;
54600+ __u8 wmin, cmin;
54601+ __u8 wsec, csec;
54602+ struct timespec timeval;
54603+
54604+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
54605+ !(task->acl->mode & GR_PROCACCT)))
54606+ return;
54607+
54608+ do_posix_clock_monotonic_gettime(&timeval);
54609+ runtime = timeval.tv_sec - task->start_time.tv_sec;
54610+ wday = runtime / (3600 * 24);
54611+ runtime -= wday * (3600 * 24);
54612+ whr = runtime / 3600;
54613+ runtime -= whr * 3600;
54614+ wmin = runtime / 60;
54615+ runtime -= wmin * 60;
54616+ wsec = runtime;
54617+
54618+ cputime = (task->utime + task->stime) / HZ;
54619+ cday = cputime / (3600 * 24);
54620+ cputime -= cday * (3600 * 24);
54621+ chr = cputime / 3600;
54622+ cputime -= chr * 3600;
54623+ cmin = cputime / 60;
54624+ cputime -= cmin * 60;
54625+ csec = cputime;
54626+
54627+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
54628+
54629+ return;
54630+}
54631+
54632+void gr_set_kernel_label(struct task_struct *task)
54633+{
54634+ if (gr_status & GR_READY) {
54635+ task->role = kernel_role;
54636+ task->acl = kernel_role->root_label;
54637+ }
54638+ return;
54639+}
54640+
54641+#ifdef CONFIG_TASKSTATS
54642+int gr_is_taskstats_denied(int pid)
54643+{
54644+ struct task_struct *task;
54645+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54646+ const struct cred *cred;
54647+#endif
54648+ int ret = 0;
54649+
54650+ /* restrict taskstats viewing to un-chrooted root users
54651+ who have the 'view' subject flag if the RBAC system is enabled
54652+ */
54653+
54654+ rcu_read_lock();
54655+ read_lock(&tasklist_lock);
54656+ task = find_task_by_vpid(pid);
54657+ if (task) {
54658+#ifdef CONFIG_GRKERNSEC_CHROOT
54659+ if (proc_is_chrooted(task))
54660+ ret = -EACCES;
54661+#endif
54662+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54663+ cred = __task_cred(task);
54664+#ifdef CONFIG_GRKERNSEC_PROC_USER
54665+ if (cred->uid != 0)
54666+ ret = -EACCES;
54667+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54668+ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
54669+ ret = -EACCES;
54670+#endif
54671+#endif
54672+ if (gr_status & GR_READY) {
54673+ if (!(task->acl->mode & GR_VIEW))
54674+ ret = -EACCES;
54675+ }
54676+ } else
54677+ ret = -ENOENT;
54678+
54679+ read_unlock(&tasklist_lock);
54680+ rcu_read_unlock();
54681+
54682+ return ret;
54683+}
54684+#endif
54685+
54686+/* AUXV entries are filled via a descendant of search_binary_handler
54687+ after we've already applied the subject for the target
54688+*/
54689+int gr_acl_enable_at_secure(void)
54690+{
54691+ if (unlikely(!(gr_status & GR_READY)))
54692+ return 0;
54693+
54694+ if (current->acl->mode & GR_ATSECURE)
54695+ return 1;
54696+
54697+ return 0;
54698+}
54699+
54700+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
54701+{
54702+ struct task_struct *task = current;
54703+ struct dentry *dentry = file->f_path.dentry;
54704+ struct vfsmount *mnt = file->f_path.mnt;
54705+ struct acl_object_label *obj, *tmp;
54706+ struct acl_subject_label *subj;
54707+ unsigned int bufsize;
54708+ int is_not_root;
54709+ char *path;
54710+ dev_t dev = __get_dev(dentry);
54711+
54712+ if (unlikely(!(gr_status & GR_READY)))
54713+ return 1;
54714+
54715+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
54716+ return 1;
54717+
54718+ /* ignore Eric Biederman */
54719+ if (IS_PRIVATE(dentry->d_inode))
54720+ return 1;
54721+
54722+ subj = task->acl;
54723+ do {
54724+ obj = lookup_acl_obj_label(ino, dev, subj);
54725+ if (obj != NULL)
54726+ return (obj->mode & GR_FIND) ? 1 : 0;
54727+ } while ((subj = subj->parent_subject));
54728+
54729+ /* this is purely an optimization since we're looking for an object
54730+ for the directory we're doing a readdir on
54731+ if it's possible for any globbed object to match the entry we're
54732+ filling into the directory, then the object we find here will be
54733+ an anchor point with attached globbed objects
54734+ */
54735+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
54736+ if (obj->globbed == NULL)
54737+ return (obj->mode & GR_FIND) ? 1 : 0;
54738+
54739+ is_not_root = ((obj->filename[0] == '/') &&
54740+ (obj->filename[1] == '\0')) ? 0 : 1;
54741+ bufsize = PAGE_SIZE - namelen - is_not_root;
54742+
54743+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
54744+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
54745+ return 1;
54746+
54747+ preempt_disable();
54748+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
54749+ bufsize);
54750+
54751+ bufsize = strlen(path);
54752+
54753+ /* if base is "/", don't append an additional slash */
54754+ if (is_not_root)
54755+ *(path + bufsize) = '/';
54756+ memcpy(path + bufsize + is_not_root, name, namelen);
54757+ *(path + bufsize + namelen + is_not_root) = '\0';
54758+
54759+ tmp = obj->globbed;
54760+ while (tmp) {
54761+ if (!glob_match(tmp->filename, path)) {
54762+ preempt_enable();
54763+ return (tmp->mode & GR_FIND) ? 1 : 0;
54764+ }
54765+ tmp = tmp->next;
54766+ }
54767+ preempt_enable();
54768+ return (obj->mode & GR_FIND) ? 1 : 0;
54769+}
54770+
54771+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
54772+EXPORT_SYMBOL(gr_acl_is_enabled);
54773+#endif
54774+EXPORT_SYMBOL(gr_learn_resource);
54775+EXPORT_SYMBOL(gr_set_kernel_label);
54776+#ifdef CONFIG_SECURITY
54777+EXPORT_SYMBOL(gr_check_user_change);
54778+EXPORT_SYMBOL(gr_check_group_change);
54779+#endif
54780+
54781diff -urNp linux-2.6.32.48/grsecurity/gracl_cap.c linux-2.6.32.48/grsecurity/gracl_cap.c
54782--- linux-2.6.32.48/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
54783+++ linux-2.6.32.48/grsecurity/gracl_cap.c 2011-11-15 19:59:43.000000000 -0500
54784@@ -0,0 +1,101 @@
54785+#include <linux/kernel.h>
54786+#include <linux/module.h>
54787+#include <linux/sched.h>
54788+#include <linux/gracl.h>
54789+#include <linux/grsecurity.h>
54790+#include <linux/grinternal.h>
54791+
54792+extern const char *captab_log[];
54793+extern int captab_log_entries;
54794+
54795+int
54796+gr_acl_is_capable(const int cap)
54797+{
54798+ struct task_struct *task = current;
54799+ const struct cred *cred = current_cred();
54800+ struct acl_subject_label *curracl;
54801+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
54802+ kernel_cap_t cap_audit = __cap_empty_set;
54803+
54804+ if (!gr_acl_is_enabled())
54805+ return 1;
54806+
54807+ curracl = task->acl;
54808+
54809+ cap_drop = curracl->cap_lower;
54810+ cap_mask = curracl->cap_mask;
54811+ cap_audit = curracl->cap_invert_audit;
54812+
54813+ while ((curracl = curracl->parent_subject)) {
54814+ /* if the cap isn't specified in the current computed mask but is specified in the
54815+ current level subject, and is lowered in the current level subject, then add
54816+ it to the set of dropped capabilities
54817+ otherwise, add the current level subject's mask to the current computed mask
54818+ */
54819+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
54820+ cap_raise(cap_mask, cap);
54821+ if (cap_raised(curracl->cap_lower, cap))
54822+ cap_raise(cap_drop, cap);
54823+ if (cap_raised(curracl->cap_invert_audit, cap))
54824+ cap_raise(cap_audit, cap);
54825+ }
54826+ }
54827+
54828+ if (!cap_raised(cap_drop, cap)) {
54829+ if (cap_raised(cap_audit, cap))
54830+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
54831+ return 1;
54832+ }
54833+
54834+ curracl = task->acl;
54835+
54836+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
54837+ && cap_raised(cred->cap_effective, cap)) {
54838+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
54839+ task->role->roletype, cred->uid,
54840+ cred->gid, task->exec_file ?
54841+ gr_to_filename(task->exec_file->f_path.dentry,
54842+ task->exec_file->f_path.mnt) : curracl->filename,
54843+ curracl->filename, 0UL,
54844+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
54845+ return 1;
54846+ }
54847+
54848+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
54849+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
54850+ return 0;
54851+}
54852+
54853+int
54854+gr_acl_is_capable_nolog(const int cap)
54855+{
54856+ struct acl_subject_label *curracl;
54857+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
54858+
54859+ if (!gr_acl_is_enabled())
54860+ return 1;
54861+
54862+ curracl = current->acl;
54863+
54864+ cap_drop = curracl->cap_lower;
54865+ cap_mask = curracl->cap_mask;
54866+
54867+ while ((curracl = curracl->parent_subject)) {
54868+ /* if the cap isn't specified in the current computed mask but is specified in the
54869+ current level subject, and is lowered in the current level subject, then add
54870+ it to the set of dropped capabilities
54871+ otherwise, add the current level subject's mask to the current computed mask
54872+ */
54873+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
54874+ cap_raise(cap_mask, cap);
54875+ if (cap_raised(curracl->cap_lower, cap))
54876+ cap_raise(cap_drop, cap);
54877+ }
54878+ }
54879+
54880+ if (!cap_raised(cap_drop, cap))
54881+ return 1;
54882+
54883+ return 0;
54884+}
54885+
54886diff -urNp linux-2.6.32.48/grsecurity/gracl_fs.c linux-2.6.32.48/grsecurity/gracl_fs.c
54887--- linux-2.6.32.48/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
54888+++ linux-2.6.32.48/grsecurity/gracl_fs.c 2011-11-15 19:59:43.000000000 -0500
54889@@ -0,0 +1,431 @@
54890+#include <linux/kernel.h>
54891+#include <linux/sched.h>
54892+#include <linux/types.h>
54893+#include <linux/fs.h>
54894+#include <linux/file.h>
54895+#include <linux/stat.h>
54896+#include <linux/grsecurity.h>
54897+#include <linux/grinternal.h>
54898+#include <linux/gracl.h>
54899+
54900+__u32
54901+gr_acl_handle_hidden_file(const struct dentry * dentry,
54902+ const struct vfsmount * mnt)
54903+{
54904+ __u32 mode;
54905+
54906+ if (unlikely(!dentry->d_inode))
54907+ return GR_FIND;
54908+
54909+ mode =
54910+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
54911+
54912+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
54913+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
54914+ return mode;
54915+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
54916+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
54917+ return 0;
54918+ } else if (unlikely(!(mode & GR_FIND)))
54919+ return 0;
54920+
54921+ return GR_FIND;
54922+}
54923+
54924+__u32
54925+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
54926+ const int fmode)
54927+{
54928+ __u32 reqmode = GR_FIND;
54929+ __u32 mode;
54930+
54931+ if (unlikely(!dentry->d_inode))
54932+ return reqmode;
54933+
54934+ if (unlikely(fmode & O_APPEND))
54935+ reqmode |= GR_APPEND;
54936+ else if (unlikely(fmode & FMODE_WRITE))
54937+ reqmode |= GR_WRITE;
54938+ if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
54939+ reqmode |= GR_READ;
54940+ if ((fmode & FMODE_GREXEC) && (fmode & FMODE_EXEC))
54941+ reqmode &= ~GR_READ;
54942+ mode =
54943+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
54944+ mnt);
54945+
54946+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
54947+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
54948+ reqmode & GR_READ ? " reading" : "",
54949+ reqmode & GR_WRITE ? " writing" : reqmode &
54950+ GR_APPEND ? " appending" : "");
54951+ return reqmode;
54952+ } else
54953+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
54954+ {
54955+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
54956+ reqmode & GR_READ ? " reading" : "",
54957+ reqmode & GR_WRITE ? " writing" : reqmode &
54958+ GR_APPEND ? " appending" : "");
54959+ return 0;
54960+ } else if (unlikely((mode & reqmode) != reqmode))
54961+ return 0;
54962+
54963+ return reqmode;
54964+}
54965+
54966+__u32
54967+gr_acl_handle_creat(const struct dentry * dentry,
54968+ const struct dentry * p_dentry,
54969+ const struct vfsmount * p_mnt, const int fmode,
54970+ const int imode)
54971+{
54972+ __u32 reqmode = GR_WRITE | GR_CREATE;
54973+ __u32 mode;
54974+
54975+ if (unlikely(fmode & O_APPEND))
54976+ reqmode |= GR_APPEND;
54977+ if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
54978+ reqmode |= GR_READ;
54979+ if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
54980+ reqmode |= GR_SETID;
54981+
54982+ mode =
54983+ gr_check_create(dentry, p_dentry, p_mnt,
54984+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
54985+
54986+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
54987+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
54988+ reqmode & GR_READ ? " reading" : "",
54989+ reqmode & GR_WRITE ? " writing" : reqmode &
54990+ GR_APPEND ? " appending" : "");
54991+ return reqmode;
54992+ } else
54993+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
54994+ {
54995+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
54996+ reqmode & GR_READ ? " reading" : "",
54997+ reqmode & GR_WRITE ? " writing" : reqmode &
54998+ GR_APPEND ? " appending" : "");
54999+ return 0;
55000+ } else if (unlikely((mode & reqmode) != reqmode))
55001+ return 0;
55002+
55003+ return reqmode;
55004+}
55005+
55006+__u32
55007+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
55008+ const int fmode)
55009+{
55010+ __u32 mode, reqmode = GR_FIND;
55011+
55012+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
55013+ reqmode |= GR_EXEC;
55014+ if (fmode & S_IWOTH)
55015+ reqmode |= GR_WRITE;
55016+ if (fmode & S_IROTH)
55017+ reqmode |= GR_READ;
55018+
55019+ mode =
55020+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
55021+ mnt);
55022+
55023+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
55024+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
55025+ reqmode & GR_READ ? " reading" : "",
55026+ reqmode & GR_WRITE ? " writing" : "",
55027+ reqmode & GR_EXEC ? " executing" : "");
55028+ return reqmode;
55029+ } else
55030+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
55031+ {
55032+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
55033+ reqmode & GR_READ ? " reading" : "",
55034+ reqmode & GR_WRITE ? " writing" : "",
55035+ reqmode & GR_EXEC ? " executing" : "");
55036+ return 0;
55037+ } else if (unlikely((mode & reqmode) != reqmode))
55038+ return 0;
55039+
55040+ return reqmode;
55041+}
55042+
55043+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
55044+{
55045+ __u32 mode;
55046+
55047+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
55048+
55049+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
55050+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
55051+ return mode;
55052+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
55053+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
55054+ return 0;
55055+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
55056+ return 0;
55057+
55058+ return (reqmode);
55059+}
55060+
55061+__u32
55062+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
55063+{
55064+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
55065+}
55066+
55067+__u32
55068+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
55069+{
55070+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
55071+}
55072+
55073+__u32
55074+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
55075+{
55076+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
55077+}
55078+
55079+__u32
55080+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
55081+{
55082+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
55083+}
55084+
55085+__u32
55086+gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
55087+ mode_t mode)
55088+{
55089+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
55090+ return 1;
55091+
55092+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
55093+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
55094+ GR_FCHMOD_ACL_MSG);
55095+ } else {
55096+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
55097+ }
55098+}
55099+
55100+__u32
55101+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
55102+ mode_t mode)
55103+{
55104+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
55105+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
55106+ GR_CHMOD_ACL_MSG);
55107+ } else {
55108+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
55109+ }
55110+}
55111+
55112+__u32
55113+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
55114+{
55115+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
55116+}
55117+
55118+__u32
55119+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
55120+{
55121+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
55122+}
55123+
55124+__u32
55125+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
55126+{
55127+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
55128+}
55129+
55130+__u32
55131+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
55132+{
55133+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
55134+ GR_UNIXCONNECT_ACL_MSG);
55135+}
55136+
55137+/* hardlinks require at minimum create and link permission,
55138+ any additional privilege required is based on the
55139+ privilege of the file being linked to
55140+*/
55141+__u32
55142+gr_acl_handle_link(const struct dentry * new_dentry,
55143+ const struct dentry * parent_dentry,
55144+ const struct vfsmount * parent_mnt,
55145+ const struct dentry * old_dentry,
55146+ const struct vfsmount * old_mnt, const char *to)
55147+{
55148+ __u32 mode;
55149+ __u32 needmode = GR_CREATE | GR_LINK;
55150+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
55151+
55152+ mode =
55153+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
55154+ old_mnt);
55155+
55156+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
55157+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
55158+ return mode;
55159+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
55160+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
55161+ return 0;
55162+ } else if (unlikely((mode & needmode) != needmode))
55163+ return 0;
55164+
55165+ return 1;
55166+}
55167+
55168+__u32
55169+gr_acl_handle_symlink(const struct dentry * new_dentry,
55170+ const struct dentry * parent_dentry,
55171+ const struct vfsmount * parent_mnt, const char *from)
55172+{
55173+ __u32 needmode = GR_WRITE | GR_CREATE;
55174+ __u32 mode;
55175+
55176+ mode =
55177+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
55178+ GR_CREATE | GR_AUDIT_CREATE |
55179+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
55180+
55181+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
55182+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
55183+ return mode;
55184+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
55185+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
55186+ return 0;
55187+ } else if (unlikely((mode & needmode) != needmode))
55188+ return 0;
55189+
55190+ return (GR_WRITE | GR_CREATE);
55191+}
55192+
55193+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
55194+{
55195+ __u32 mode;
55196+
55197+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
55198+
55199+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
55200+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
55201+ return mode;
55202+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
55203+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
55204+ return 0;
55205+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
55206+ return 0;
55207+
55208+ return (reqmode);
55209+}
55210+
55211+__u32
55212+gr_acl_handle_mknod(const struct dentry * new_dentry,
55213+ const struct dentry * parent_dentry,
55214+ const struct vfsmount * parent_mnt,
55215+ const int mode)
55216+{
55217+ __u32 reqmode = GR_WRITE | GR_CREATE;
55218+ if (unlikely(mode & (S_ISUID | S_ISGID)))
55219+ reqmode |= GR_SETID;
55220+
55221+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
55222+ reqmode, GR_MKNOD_ACL_MSG);
55223+}
55224+
55225+__u32
55226+gr_acl_handle_mkdir(const struct dentry *new_dentry,
55227+ const struct dentry *parent_dentry,
55228+ const struct vfsmount *parent_mnt)
55229+{
55230+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
55231+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
55232+}
55233+
55234+#define RENAME_CHECK_SUCCESS(old, new) \
55235+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
55236+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
55237+
55238+int
55239+gr_acl_handle_rename(struct dentry *new_dentry,
55240+ struct dentry *parent_dentry,
55241+ const struct vfsmount *parent_mnt,
55242+ struct dentry *old_dentry,
55243+ struct inode *old_parent_inode,
55244+ struct vfsmount *old_mnt, const char *newname)
55245+{
55246+ __u32 comp1, comp2;
55247+ int error = 0;
55248+
55249+ if (unlikely(!gr_acl_is_enabled()))
55250+ return 0;
55251+
55252+ if (!new_dentry->d_inode) {
55253+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
55254+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
55255+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
55256+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
55257+ GR_DELETE | GR_AUDIT_DELETE |
55258+ GR_AUDIT_READ | GR_AUDIT_WRITE |
55259+ GR_SUPPRESS, old_mnt);
55260+ } else {
55261+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
55262+ GR_CREATE | GR_DELETE |
55263+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
55264+ GR_AUDIT_READ | GR_AUDIT_WRITE |
55265+ GR_SUPPRESS, parent_mnt);
55266+ comp2 =
55267+ gr_search_file(old_dentry,
55268+ GR_READ | GR_WRITE | GR_AUDIT_READ |
55269+ GR_DELETE | GR_AUDIT_DELETE |
55270+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
55271+ }
55272+
55273+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
55274+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
55275+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
55276+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
55277+ && !(comp2 & GR_SUPPRESS)) {
55278+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
55279+ error = -EACCES;
55280+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
55281+ error = -EACCES;
55282+
55283+ return error;
55284+}
55285+
55286+void
55287+gr_acl_handle_exit(void)
55288+{
55289+ u16 id;
55290+ char *rolename;
55291+ struct file *exec_file;
55292+
55293+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
55294+ !(current->role->roletype & GR_ROLE_PERSIST))) {
55295+ id = current->acl_role_id;
55296+ rolename = current->role->rolename;
55297+ gr_set_acls(1);
55298+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
55299+ }
55300+
55301+ write_lock(&grsec_exec_file_lock);
55302+ exec_file = current->exec_file;
55303+ current->exec_file = NULL;
55304+ write_unlock(&grsec_exec_file_lock);
55305+
55306+ if (exec_file)
55307+ fput(exec_file);
55308+}
55309+
55310+int
55311+gr_acl_handle_procpidmem(const struct task_struct *task)
55312+{
55313+ if (unlikely(!gr_acl_is_enabled()))
55314+ return 0;
55315+
55316+ if (task != current && task->acl->mode & GR_PROTPROCFD)
55317+ return -EACCES;
55318+
55319+ return 0;
55320+}
55321diff -urNp linux-2.6.32.48/grsecurity/gracl_ip.c linux-2.6.32.48/grsecurity/gracl_ip.c
55322--- linux-2.6.32.48/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
55323+++ linux-2.6.32.48/grsecurity/gracl_ip.c 2011-11-15 19:59:43.000000000 -0500
55324@@ -0,0 +1,382 @@
55325+#include <linux/kernel.h>
55326+#include <asm/uaccess.h>
55327+#include <asm/errno.h>
55328+#include <net/sock.h>
55329+#include <linux/file.h>
55330+#include <linux/fs.h>
55331+#include <linux/net.h>
55332+#include <linux/in.h>
55333+#include <linux/skbuff.h>
55334+#include <linux/ip.h>
55335+#include <linux/udp.h>
55336+#include <linux/smp_lock.h>
55337+#include <linux/types.h>
55338+#include <linux/sched.h>
55339+#include <linux/netdevice.h>
55340+#include <linux/inetdevice.h>
55341+#include <linux/gracl.h>
55342+#include <linux/grsecurity.h>
55343+#include <linux/grinternal.h>
55344+
55345+#define GR_BIND 0x01
55346+#define GR_CONNECT 0x02
55347+#define GR_INVERT 0x04
55348+#define GR_BINDOVERRIDE 0x08
55349+#define GR_CONNECTOVERRIDE 0x10
55350+#define GR_SOCK_FAMILY 0x20
55351+
55352+static const char * gr_protocols[IPPROTO_MAX] = {
55353+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
55354+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
55355+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
55356+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
55357+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
55358+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
55359+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
55360+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
55361+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
55362+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
55363+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
55364+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
55365+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
55366+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
55367+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
55368+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
55369+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
55370+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
55371+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
55372+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
55373+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
55374+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
55375+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
55376+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
55377+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
55378+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
55379+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
55380+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
55381+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
55382+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
55383+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
55384+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
55385+ };
55386+
55387+static const char * gr_socktypes[SOCK_MAX] = {
55388+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
55389+ "unknown:7", "unknown:8", "unknown:9", "packet"
55390+ };
55391+
55392+static const char * gr_sockfamilies[AF_MAX+1] = {
55393+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
55394+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
55395+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
55396+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154"
55397+ };
55398+
55399+const char *
55400+gr_proto_to_name(unsigned char proto)
55401+{
55402+ return gr_protocols[proto];
55403+}
55404+
55405+const char *
55406+gr_socktype_to_name(unsigned char type)
55407+{
55408+ return gr_socktypes[type];
55409+}
55410+
55411+const char *
55412+gr_sockfamily_to_name(unsigned char family)
55413+{
55414+ return gr_sockfamilies[family];
55415+}
55416+
55417+int
55418+gr_search_socket(const int domain, const int type, const int protocol)
55419+{
55420+ struct acl_subject_label *curr;
55421+ const struct cred *cred = current_cred();
55422+
55423+ if (unlikely(!gr_acl_is_enabled()))
55424+ goto exit;
55425+
55426+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
55427+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
55428+ goto exit; // let the kernel handle it
55429+
55430+ curr = current->acl;
55431+
55432+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
55433+ /* the family is allowed, if this is PF_INET allow it only if
55434+ the extra sock type/protocol checks pass */
55435+ if (domain == PF_INET)
55436+ goto inet_check;
55437+ goto exit;
55438+ } else {
55439+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
55440+ __u32 fakeip = 0;
55441+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
55442+ current->role->roletype, cred->uid,
55443+ cred->gid, current->exec_file ?
55444+ gr_to_filename(current->exec_file->f_path.dentry,
55445+ current->exec_file->f_path.mnt) :
55446+ curr->filename, curr->filename,
55447+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
55448+ &current->signal->saved_ip);
55449+ goto exit;
55450+ }
55451+ goto exit_fail;
55452+ }
55453+
55454+inet_check:
55455+ /* the rest of this checking is for IPv4 only */
55456+ if (!curr->ips)
55457+ goto exit;
55458+
55459+ if ((curr->ip_type & (1 << type)) &&
55460+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
55461+ goto exit;
55462+
55463+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
55464+ /* we don't place acls on raw sockets , and sometimes
55465+ dgram/ip sockets are opened for ioctl and not
55466+ bind/connect, so we'll fake a bind learn log */
55467+ if (type == SOCK_RAW || type == SOCK_PACKET) {
55468+ __u32 fakeip = 0;
55469+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
55470+ current->role->roletype, cred->uid,
55471+ cred->gid, current->exec_file ?
55472+ gr_to_filename(current->exec_file->f_path.dentry,
55473+ current->exec_file->f_path.mnt) :
55474+ curr->filename, curr->filename,
55475+ &fakeip, 0, type,
55476+ protocol, GR_CONNECT, &current->signal->saved_ip);
55477+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
55478+ __u32 fakeip = 0;
55479+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
55480+ current->role->roletype, cred->uid,
55481+ cred->gid, current->exec_file ?
55482+ gr_to_filename(current->exec_file->f_path.dentry,
55483+ current->exec_file->f_path.mnt) :
55484+ curr->filename, curr->filename,
55485+ &fakeip, 0, type,
55486+ protocol, GR_BIND, &current->signal->saved_ip);
55487+ }
55488+ /* we'll log when they use connect or bind */
55489+ goto exit;
55490+ }
55491+
55492+exit_fail:
55493+ if (domain == PF_INET)
55494+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
55495+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
55496+ else
55497+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
55498+ gr_socktype_to_name(type), protocol);
55499+
55500+ return 0;
55501+exit:
55502+ return 1;
55503+}
55504+
55505+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
55506+{
55507+ if ((ip->mode & mode) &&
55508+ (ip_port >= ip->low) &&
55509+ (ip_port <= ip->high) &&
55510+ ((ntohl(ip_addr) & our_netmask) ==
55511+ (ntohl(our_addr) & our_netmask))
55512+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
55513+ && (ip->type & (1 << type))) {
55514+ if (ip->mode & GR_INVERT)
55515+ return 2; // specifically denied
55516+ else
55517+ return 1; // allowed
55518+ }
55519+
55520+ return 0; // not specifically allowed, may continue parsing
55521+}
55522+
55523+static int
55524+gr_search_connectbind(const int full_mode, struct sock *sk,
55525+ struct sockaddr_in *addr, const int type)
55526+{
55527+ char iface[IFNAMSIZ] = {0};
55528+ struct acl_subject_label *curr;
55529+ struct acl_ip_label *ip;
55530+ struct inet_sock *isk;
55531+ struct net_device *dev;
55532+ struct in_device *idev;
55533+ unsigned long i;
55534+ int ret;
55535+ int mode = full_mode & (GR_BIND | GR_CONNECT);
55536+ __u32 ip_addr = 0;
55537+ __u32 our_addr;
55538+ __u32 our_netmask;
55539+ char *p;
55540+ __u16 ip_port = 0;
55541+ const struct cred *cred = current_cred();
55542+
55543+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
55544+ return 0;
55545+
55546+ curr = current->acl;
55547+ isk = inet_sk(sk);
55548+
55549+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
55550+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
55551+ addr->sin_addr.s_addr = curr->inaddr_any_override;
55552+ if ((full_mode & GR_CONNECT) && isk->saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
55553+ struct sockaddr_in saddr;
55554+ int err;
55555+
55556+ saddr.sin_family = AF_INET;
55557+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
55558+ saddr.sin_port = isk->sport;
55559+
55560+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
55561+ if (err)
55562+ return err;
55563+
55564+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
55565+ if (err)
55566+ return err;
55567+ }
55568+
55569+ if (!curr->ips)
55570+ return 0;
55571+
55572+ ip_addr = addr->sin_addr.s_addr;
55573+ ip_port = ntohs(addr->sin_port);
55574+
55575+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
55576+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
55577+ current->role->roletype, cred->uid,
55578+ cred->gid, current->exec_file ?
55579+ gr_to_filename(current->exec_file->f_path.dentry,
55580+ current->exec_file->f_path.mnt) :
55581+ curr->filename, curr->filename,
55582+ &ip_addr, ip_port, type,
55583+ sk->sk_protocol, mode, &current->signal->saved_ip);
55584+ return 0;
55585+ }
55586+
55587+ for (i = 0; i < curr->ip_num; i++) {
55588+ ip = *(curr->ips + i);
55589+ if (ip->iface != NULL) {
55590+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
55591+ p = strchr(iface, ':');
55592+ if (p != NULL)
55593+ *p = '\0';
55594+ dev = dev_get_by_name(sock_net(sk), iface);
55595+ if (dev == NULL)
55596+ continue;
55597+ idev = in_dev_get(dev);
55598+ if (idev == NULL) {
55599+ dev_put(dev);
55600+ continue;
55601+ }
55602+ rcu_read_lock();
55603+ for_ifa(idev) {
55604+ if (!strcmp(ip->iface, ifa->ifa_label)) {
55605+ our_addr = ifa->ifa_address;
55606+ our_netmask = 0xffffffff;
55607+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
55608+ if (ret == 1) {
55609+ rcu_read_unlock();
55610+ in_dev_put(idev);
55611+ dev_put(dev);
55612+ return 0;
55613+ } else if (ret == 2) {
55614+ rcu_read_unlock();
55615+ in_dev_put(idev);
55616+ dev_put(dev);
55617+ goto denied;
55618+ }
55619+ }
55620+ } endfor_ifa(idev);
55621+ rcu_read_unlock();
55622+ in_dev_put(idev);
55623+ dev_put(dev);
55624+ } else {
55625+ our_addr = ip->addr;
55626+ our_netmask = ip->netmask;
55627+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
55628+ if (ret == 1)
55629+ return 0;
55630+ else if (ret == 2)
55631+ goto denied;
55632+ }
55633+ }
55634+
55635+denied:
55636+ if (mode == GR_BIND)
55637+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
55638+ else if (mode == GR_CONNECT)
55639+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
55640+
55641+ return -EACCES;
55642+}
55643+
55644+int
55645+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
55646+{
55647+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
55648+}
55649+
55650+int
55651+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
55652+{
55653+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
55654+}
55655+
55656+int gr_search_listen(struct socket *sock)
55657+{
55658+ struct sock *sk = sock->sk;
55659+ struct sockaddr_in addr;
55660+
55661+ addr.sin_addr.s_addr = inet_sk(sk)->saddr;
55662+ addr.sin_port = inet_sk(sk)->sport;
55663+
55664+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
55665+}
55666+
55667+int gr_search_accept(struct socket *sock)
55668+{
55669+ struct sock *sk = sock->sk;
55670+ struct sockaddr_in addr;
55671+
55672+ addr.sin_addr.s_addr = inet_sk(sk)->saddr;
55673+ addr.sin_port = inet_sk(sk)->sport;
55674+
55675+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
55676+}
55677+
55678+int
55679+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
55680+{
55681+ if (addr)
55682+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
55683+ else {
55684+ struct sockaddr_in sin;
55685+ const struct inet_sock *inet = inet_sk(sk);
55686+
55687+ sin.sin_addr.s_addr = inet->daddr;
55688+ sin.sin_port = inet->dport;
55689+
55690+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
55691+ }
55692+}
55693+
55694+int
55695+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
55696+{
55697+ struct sockaddr_in sin;
55698+
55699+ if (unlikely(skb->len < sizeof (struct udphdr)))
55700+ return 0; // skip this packet
55701+
55702+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
55703+ sin.sin_port = udp_hdr(skb)->source;
55704+
55705+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
55706+}
55707diff -urNp linux-2.6.32.48/grsecurity/gracl_learn.c linux-2.6.32.48/grsecurity/gracl_learn.c
55708--- linux-2.6.32.48/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
55709+++ linux-2.6.32.48/grsecurity/gracl_learn.c 2011-11-15 19:59:43.000000000 -0500
55710@@ -0,0 +1,208 @@
55711+#include <linux/kernel.h>
55712+#include <linux/mm.h>
55713+#include <linux/sched.h>
55714+#include <linux/poll.h>
55715+#include <linux/smp_lock.h>
55716+#include <linux/string.h>
55717+#include <linux/file.h>
55718+#include <linux/types.h>
55719+#include <linux/vmalloc.h>
55720+#include <linux/grinternal.h>
55721+
55722+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
55723+ size_t count, loff_t *ppos);
55724+extern int gr_acl_is_enabled(void);
55725+
55726+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
55727+static int gr_learn_attached;
55728+
55729+/* use a 512k buffer */
55730+#define LEARN_BUFFER_SIZE (512 * 1024)
55731+
55732+static DEFINE_SPINLOCK(gr_learn_lock);
55733+static DEFINE_MUTEX(gr_learn_user_mutex);
55734+
55735+/* we need to maintain two buffers, so that the kernel context of grlearn
55736+ uses a semaphore around the userspace copying, and the other kernel contexts
55737+ use a spinlock when copying into the buffer, since they cannot sleep
55738+*/
55739+static char *learn_buffer;
55740+static char *learn_buffer_user;
55741+static int learn_buffer_len;
55742+static int learn_buffer_user_len;
55743+
55744+static ssize_t
55745+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
55746+{
55747+ DECLARE_WAITQUEUE(wait, current);
55748+ ssize_t retval = 0;
55749+
55750+ add_wait_queue(&learn_wait, &wait);
55751+ set_current_state(TASK_INTERRUPTIBLE);
55752+ do {
55753+ mutex_lock(&gr_learn_user_mutex);
55754+ spin_lock(&gr_learn_lock);
55755+ if (learn_buffer_len)
55756+ break;
55757+ spin_unlock(&gr_learn_lock);
55758+ mutex_unlock(&gr_learn_user_mutex);
55759+ if (file->f_flags & O_NONBLOCK) {
55760+ retval = -EAGAIN;
55761+ goto out;
55762+ }
55763+ if (signal_pending(current)) {
55764+ retval = -ERESTARTSYS;
55765+ goto out;
55766+ }
55767+
55768+ schedule();
55769+ } while (1);
55770+
55771+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
55772+ learn_buffer_user_len = learn_buffer_len;
55773+ retval = learn_buffer_len;
55774+ learn_buffer_len = 0;
55775+
55776+ spin_unlock(&gr_learn_lock);
55777+
55778+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
55779+ retval = -EFAULT;
55780+
55781+ mutex_unlock(&gr_learn_user_mutex);
55782+out:
55783+ set_current_state(TASK_RUNNING);
55784+ remove_wait_queue(&learn_wait, &wait);
55785+ return retval;
55786+}
55787+
55788+static unsigned int
55789+poll_learn(struct file * file, poll_table * wait)
55790+{
55791+ poll_wait(file, &learn_wait, wait);
55792+
55793+ if (learn_buffer_len)
55794+ return (POLLIN | POLLRDNORM);
55795+
55796+ return 0;
55797+}
55798+
55799+void
55800+gr_clear_learn_entries(void)
55801+{
55802+ char *tmp;
55803+
55804+ mutex_lock(&gr_learn_user_mutex);
55805+ spin_lock(&gr_learn_lock);
55806+ tmp = learn_buffer;
55807+ learn_buffer = NULL;
55808+ spin_unlock(&gr_learn_lock);
55809+ if (tmp)
55810+ vfree(tmp);
55811+ if (learn_buffer_user != NULL) {
55812+ vfree(learn_buffer_user);
55813+ learn_buffer_user = NULL;
55814+ }
55815+ learn_buffer_len = 0;
55816+ mutex_unlock(&gr_learn_user_mutex);
55817+
55818+ return;
55819+}
55820+
55821+void
55822+gr_add_learn_entry(const char *fmt, ...)
55823+{
55824+ va_list args;
55825+ unsigned int len;
55826+
55827+ if (!gr_learn_attached)
55828+ return;
55829+
55830+ spin_lock(&gr_learn_lock);
55831+
55832+ /* leave a gap at the end so we know when it's "full" but don't have to
55833+ compute the exact length of the string we're trying to append
55834+ */
55835+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
55836+ spin_unlock(&gr_learn_lock);
55837+ wake_up_interruptible(&learn_wait);
55838+ return;
55839+ }
55840+ if (learn_buffer == NULL) {
55841+ spin_unlock(&gr_learn_lock);
55842+ return;
55843+ }
55844+
55845+ va_start(args, fmt);
55846+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
55847+ va_end(args);
55848+
55849+ learn_buffer_len += len + 1;
55850+
55851+ spin_unlock(&gr_learn_lock);
55852+ wake_up_interruptible(&learn_wait);
55853+
55854+ return;
55855+}
55856+
55857+static int
55858+open_learn(struct inode *inode, struct file *file)
55859+{
55860+ if (file->f_mode & FMODE_READ && gr_learn_attached)
55861+ return -EBUSY;
55862+ if (file->f_mode & FMODE_READ) {
55863+ int retval = 0;
55864+ mutex_lock(&gr_learn_user_mutex);
55865+ if (learn_buffer == NULL)
55866+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
55867+ if (learn_buffer_user == NULL)
55868+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
55869+ if (learn_buffer == NULL) {
55870+ retval = -ENOMEM;
55871+ goto out_error;
55872+ }
55873+ if (learn_buffer_user == NULL) {
55874+ retval = -ENOMEM;
55875+ goto out_error;
55876+ }
55877+ learn_buffer_len = 0;
55878+ learn_buffer_user_len = 0;
55879+ gr_learn_attached = 1;
55880+out_error:
55881+ mutex_unlock(&gr_learn_user_mutex);
55882+ return retval;
55883+ }
55884+ return 0;
55885+}
55886+
55887+static int
55888+close_learn(struct inode *inode, struct file *file)
55889+{
55890+ if (file->f_mode & FMODE_READ) {
55891+ char *tmp = NULL;
55892+ mutex_lock(&gr_learn_user_mutex);
55893+ spin_lock(&gr_learn_lock);
55894+ tmp = learn_buffer;
55895+ learn_buffer = NULL;
55896+ spin_unlock(&gr_learn_lock);
55897+ if (tmp)
55898+ vfree(tmp);
55899+ if (learn_buffer_user != NULL) {
55900+ vfree(learn_buffer_user);
55901+ learn_buffer_user = NULL;
55902+ }
55903+ learn_buffer_len = 0;
55904+ learn_buffer_user_len = 0;
55905+ gr_learn_attached = 0;
55906+ mutex_unlock(&gr_learn_user_mutex);
55907+ }
55908+
55909+ return 0;
55910+}
55911+
55912+const struct file_operations grsec_fops = {
55913+ .read = read_learn,
55914+ .write = write_grsec_handler,
55915+ .open = open_learn,
55916+ .release = close_learn,
55917+ .poll = poll_learn,
55918+};
55919diff -urNp linux-2.6.32.48/grsecurity/gracl_res.c linux-2.6.32.48/grsecurity/gracl_res.c
55920--- linux-2.6.32.48/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
55921+++ linux-2.6.32.48/grsecurity/gracl_res.c 2011-11-15 19:59:43.000000000 -0500
55922@@ -0,0 +1,67 @@
55923+#include <linux/kernel.h>
55924+#include <linux/sched.h>
55925+#include <linux/gracl.h>
55926+#include <linux/grinternal.h>
55927+
55928+static const char *restab_log[] = {
55929+ [RLIMIT_CPU] = "RLIMIT_CPU",
55930+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
55931+ [RLIMIT_DATA] = "RLIMIT_DATA",
55932+ [RLIMIT_STACK] = "RLIMIT_STACK",
55933+ [RLIMIT_CORE] = "RLIMIT_CORE",
55934+ [RLIMIT_RSS] = "RLIMIT_RSS",
55935+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
55936+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
55937+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
55938+ [RLIMIT_AS] = "RLIMIT_AS",
55939+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
55940+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
55941+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
55942+ [RLIMIT_NICE] = "RLIMIT_NICE",
55943+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
55944+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
55945+ [GR_CRASH_RES] = "RLIMIT_CRASH"
55946+};
55947+
55948+void
55949+gr_log_resource(const struct task_struct *task,
55950+ const int res, const unsigned long wanted, const int gt)
55951+{
55952+ const struct cred *cred;
55953+ unsigned long rlim;
55954+
55955+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
55956+ return;
55957+
55958+ // not yet supported resource
55959+ if (unlikely(!restab_log[res]))
55960+ return;
55961+
55962+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
55963+ rlim = task->signal->rlim[res].rlim_max;
55964+ else
55965+ rlim = task->signal->rlim[res].rlim_cur;
55966+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
55967+ return;
55968+
55969+ rcu_read_lock();
55970+ cred = __task_cred(task);
55971+
55972+ if (res == RLIMIT_NPROC &&
55973+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
55974+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
55975+ goto out_rcu_unlock;
55976+ else if (res == RLIMIT_MEMLOCK &&
55977+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
55978+ goto out_rcu_unlock;
55979+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
55980+ goto out_rcu_unlock;
55981+ rcu_read_unlock();
55982+
55983+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
55984+
55985+ return;
55986+out_rcu_unlock:
55987+ rcu_read_unlock();
55988+ return;
55989+}
55990diff -urNp linux-2.6.32.48/grsecurity/gracl_segv.c linux-2.6.32.48/grsecurity/gracl_segv.c
55991--- linux-2.6.32.48/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
55992+++ linux-2.6.32.48/grsecurity/gracl_segv.c 2011-11-15 19:59:43.000000000 -0500
55993@@ -0,0 +1,287 @@
55994+#include <linux/kernel.h>
55995+#include <linux/mm.h>
55996+#include <asm/uaccess.h>
55997+#include <asm/errno.h>
55998+#include <asm/mman.h>
55999+#include <net/sock.h>
56000+#include <linux/file.h>
56001+#include <linux/fs.h>
56002+#include <linux/net.h>
56003+#include <linux/in.h>
56004+#include <linux/smp_lock.h>
56005+#include <linux/slab.h>
56006+#include <linux/types.h>
56007+#include <linux/sched.h>
56008+#include <linux/timer.h>
56009+#include <linux/gracl.h>
56010+#include <linux/grsecurity.h>
56011+#include <linux/grinternal.h>
56012+
56013+static struct crash_uid *uid_set;
56014+static unsigned short uid_used;
56015+static DEFINE_SPINLOCK(gr_uid_lock);
56016+extern rwlock_t gr_inode_lock;
56017+extern struct acl_subject_label *
56018+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
56019+ struct acl_role_label *role);
56020+extern int gr_fake_force_sig(int sig, struct task_struct *t);
56021+
56022+int
56023+gr_init_uidset(void)
56024+{
56025+ uid_set =
56026+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
56027+ uid_used = 0;
56028+
56029+ return uid_set ? 1 : 0;
56030+}
56031+
56032+void
56033+gr_free_uidset(void)
56034+{
56035+ if (uid_set)
56036+ kfree(uid_set);
56037+
56038+ return;
56039+}
56040+
56041+int
56042+gr_find_uid(const uid_t uid)
56043+{
56044+ struct crash_uid *tmp = uid_set;
56045+ uid_t buid;
56046+ int low = 0, high = uid_used - 1, mid;
56047+
56048+ while (high >= low) {
56049+ mid = (low + high) >> 1;
56050+ buid = tmp[mid].uid;
56051+ if (buid == uid)
56052+ return mid;
56053+ if (buid > uid)
56054+ high = mid - 1;
56055+ if (buid < uid)
56056+ low = mid + 1;
56057+ }
56058+
56059+ return -1;
56060+}
56061+
56062+static __inline__ void
56063+gr_insertsort(void)
56064+{
56065+ unsigned short i, j;
56066+ struct crash_uid index;
56067+
56068+ for (i = 1; i < uid_used; i++) {
56069+ index = uid_set[i];
56070+ j = i;
56071+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
56072+ uid_set[j] = uid_set[j - 1];
56073+ j--;
56074+ }
56075+ uid_set[j] = index;
56076+ }
56077+
56078+ return;
56079+}
56080+
56081+static __inline__ void
56082+gr_insert_uid(const uid_t uid, const unsigned long expires)
56083+{
56084+ int loc;
56085+
56086+ if (uid_used == GR_UIDTABLE_MAX)
56087+ return;
56088+
56089+ loc = gr_find_uid(uid);
56090+
56091+ if (loc >= 0) {
56092+ uid_set[loc].expires = expires;
56093+ return;
56094+ }
56095+
56096+ uid_set[uid_used].uid = uid;
56097+ uid_set[uid_used].expires = expires;
56098+ uid_used++;
56099+
56100+ gr_insertsort();
56101+
56102+ return;
56103+}
56104+
56105+void
56106+gr_remove_uid(const unsigned short loc)
56107+{
56108+ unsigned short i;
56109+
56110+ for (i = loc + 1; i < uid_used; i++)
56111+ uid_set[i - 1] = uid_set[i];
56112+
56113+ uid_used--;
56114+
56115+ return;
56116+}
56117+
56118+int
56119+gr_check_crash_uid(const uid_t uid)
56120+{
56121+ int loc;
56122+ int ret = 0;
56123+
56124+ if (unlikely(!gr_acl_is_enabled()))
56125+ return 0;
56126+
56127+ spin_lock(&gr_uid_lock);
56128+ loc = gr_find_uid(uid);
56129+
56130+ if (loc < 0)
56131+ goto out_unlock;
56132+
56133+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
56134+ gr_remove_uid(loc);
56135+ else
56136+ ret = 1;
56137+
56138+out_unlock:
56139+ spin_unlock(&gr_uid_lock);
56140+ return ret;
56141+}
56142+
56143+static __inline__ int
56144+proc_is_setxid(const struct cred *cred)
56145+{
56146+ if (cred->uid != cred->euid || cred->uid != cred->suid ||
56147+ cred->uid != cred->fsuid)
56148+ return 1;
56149+ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
56150+ cred->gid != cred->fsgid)
56151+ return 1;
56152+
56153+ return 0;
56154+}
56155+
56156+void
56157+gr_handle_crash(struct task_struct *task, const int sig)
56158+{
56159+ struct acl_subject_label *curr;
56160+ struct acl_subject_label *curr2;
56161+ struct task_struct *tsk, *tsk2;
56162+ const struct cred *cred;
56163+ const struct cred *cred2;
56164+
56165+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
56166+ return;
56167+
56168+ if (unlikely(!gr_acl_is_enabled()))
56169+ return;
56170+
56171+ curr = task->acl;
56172+
56173+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
56174+ return;
56175+
56176+ if (time_before_eq(curr->expires, get_seconds())) {
56177+ curr->expires = 0;
56178+ curr->crashes = 0;
56179+ }
56180+
56181+ curr->crashes++;
56182+
56183+ if (!curr->expires)
56184+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
56185+
56186+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
56187+ time_after(curr->expires, get_seconds())) {
56188+ rcu_read_lock();
56189+ cred = __task_cred(task);
56190+ if (cred->uid && proc_is_setxid(cred)) {
56191+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
56192+ spin_lock(&gr_uid_lock);
56193+ gr_insert_uid(cred->uid, curr->expires);
56194+ spin_unlock(&gr_uid_lock);
56195+ curr->expires = 0;
56196+ curr->crashes = 0;
56197+ read_lock(&tasklist_lock);
56198+ do_each_thread(tsk2, tsk) {
56199+ cred2 = __task_cred(tsk);
56200+ if (tsk != task && cred2->uid == cred->uid)
56201+ gr_fake_force_sig(SIGKILL, tsk);
56202+ } while_each_thread(tsk2, tsk);
56203+ read_unlock(&tasklist_lock);
56204+ } else {
56205+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
56206+ read_lock(&tasklist_lock);
56207+ read_lock(&grsec_exec_file_lock);
56208+ do_each_thread(tsk2, tsk) {
56209+ if (likely(tsk != task)) {
56210+ curr2 = tsk->acl;
56211+
56212+ // if this thread has the same subject as the one that triggered
56213+ // RES_CRASH and it's the same binary, kill it
56214+ if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
56215+ gr_fake_force_sig(SIGKILL, tsk);
56216+ }
56217+ } while_each_thread(tsk2, tsk);
56218+ read_unlock(&grsec_exec_file_lock);
56219+ read_unlock(&tasklist_lock);
56220+ }
56221+ rcu_read_unlock();
56222+ }
56223+
56224+ return;
56225+}
56226+
56227+int
56228+gr_check_crash_exec(const struct file *filp)
56229+{
56230+ struct acl_subject_label *curr;
56231+
56232+ if (unlikely(!gr_acl_is_enabled()))
56233+ return 0;
56234+
56235+ read_lock(&gr_inode_lock);
56236+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
56237+ filp->f_path.dentry->d_inode->i_sb->s_dev,
56238+ current->role);
56239+ read_unlock(&gr_inode_lock);
56240+
56241+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
56242+ (!curr->crashes && !curr->expires))
56243+ return 0;
56244+
56245+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
56246+ time_after(curr->expires, get_seconds()))
56247+ return 1;
56248+ else if (time_before_eq(curr->expires, get_seconds())) {
56249+ curr->crashes = 0;
56250+ curr->expires = 0;
56251+ }
56252+
56253+ return 0;
56254+}
56255+
56256+void
56257+gr_handle_alertkill(struct task_struct *task)
56258+{
56259+ struct acl_subject_label *curracl;
56260+ __u32 curr_ip;
56261+ struct task_struct *p, *p2;
56262+
56263+ if (unlikely(!gr_acl_is_enabled()))
56264+ return;
56265+
56266+ curracl = task->acl;
56267+ curr_ip = task->signal->curr_ip;
56268+
56269+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
56270+ read_lock(&tasklist_lock);
56271+ do_each_thread(p2, p) {
56272+ if (p->signal->curr_ip == curr_ip)
56273+ gr_fake_force_sig(SIGKILL, p);
56274+ } while_each_thread(p2, p);
56275+ read_unlock(&tasklist_lock);
56276+ } else if (curracl->mode & GR_KILLPROC)
56277+ gr_fake_force_sig(SIGKILL, task);
56278+
56279+ return;
56280+}
56281diff -urNp linux-2.6.32.48/grsecurity/gracl_shm.c linux-2.6.32.48/grsecurity/gracl_shm.c
56282--- linux-2.6.32.48/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
56283+++ linux-2.6.32.48/grsecurity/gracl_shm.c 2011-11-15 19:59:43.000000000 -0500
56284@@ -0,0 +1,40 @@
56285+#include <linux/kernel.h>
56286+#include <linux/mm.h>
56287+#include <linux/sched.h>
56288+#include <linux/file.h>
56289+#include <linux/ipc.h>
56290+#include <linux/gracl.h>
56291+#include <linux/grsecurity.h>
56292+#include <linux/grinternal.h>
56293+
56294+int
56295+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
56296+ const time_t shm_createtime, const uid_t cuid, const int shmid)
56297+{
56298+ struct task_struct *task;
56299+
56300+ if (!gr_acl_is_enabled())
56301+ return 1;
56302+
56303+ rcu_read_lock();
56304+ read_lock(&tasklist_lock);
56305+
56306+ task = find_task_by_vpid(shm_cprid);
56307+
56308+ if (unlikely(!task))
56309+ task = find_task_by_vpid(shm_lapid);
56310+
56311+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
56312+ (task->pid == shm_lapid)) &&
56313+ (task->acl->mode & GR_PROTSHM) &&
56314+ (task->acl != current->acl))) {
56315+ read_unlock(&tasklist_lock);
56316+ rcu_read_unlock();
56317+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
56318+ return 0;
56319+ }
56320+ read_unlock(&tasklist_lock);
56321+ rcu_read_unlock();
56322+
56323+ return 1;
56324+}
56325diff -urNp linux-2.6.32.48/grsecurity/grsec_chdir.c linux-2.6.32.48/grsecurity/grsec_chdir.c
56326--- linux-2.6.32.48/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
56327+++ linux-2.6.32.48/grsecurity/grsec_chdir.c 2011-11-15 19:59:43.000000000 -0500
56328@@ -0,0 +1,19 @@
56329+#include <linux/kernel.h>
56330+#include <linux/sched.h>
56331+#include <linux/fs.h>
56332+#include <linux/file.h>
56333+#include <linux/grsecurity.h>
56334+#include <linux/grinternal.h>
56335+
56336+void
56337+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
56338+{
56339+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
56340+ if ((grsec_enable_chdir && grsec_enable_group &&
56341+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
56342+ !grsec_enable_group)) {
56343+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
56344+ }
56345+#endif
56346+ return;
56347+}
56348diff -urNp linux-2.6.32.48/grsecurity/grsec_chroot.c linux-2.6.32.48/grsecurity/grsec_chroot.c
56349--- linux-2.6.32.48/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
56350+++ linux-2.6.32.48/grsecurity/grsec_chroot.c 2011-11-15 19:59:43.000000000 -0500
56351@@ -0,0 +1,386 @@
56352+#include <linux/kernel.h>
56353+#include <linux/module.h>
56354+#include <linux/sched.h>
56355+#include <linux/file.h>
56356+#include <linux/fs.h>
56357+#include <linux/mount.h>
56358+#include <linux/types.h>
56359+#include <linux/pid_namespace.h>
56360+#include <linux/grsecurity.h>
56361+#include <linux/grinternal.h>
56362+
56363+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
56364+{
56365+#ifdef CONFIG_GRKERNSEC
56366+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
56367+ path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
56368+ task->gr_is_chrooted = 1;
56369+ else
56370+ task->gr_is_chrooted = 0;
56371+
56372+ task->gr_chroot_dentry = path->dentry;
56373+#endif
56374+ return;
56375+}
56376+
56377+void gr_clear_chroot_entries(struct task_struct *task)
56378+{
56379+#ifdef CONFIG_GRKERNSEC
56380+ task->gr_is_chrooted = 0;
56381+ task->gr_chroot_dentry = NULL;
56382+#endif
56383+ return;
56384+}
56385+
56386+int
56387+gr_handle_chroot_unix(const pid_t pid)
56388+{
56389+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
56390+ struct task_struct *p;
56391+
56392+ if (unlikely(!grsec_enable_chroot_unix))
56393+ return 1;
56394+
56395+ if (likely(!proc_is_chrooted(current)))
56396+ return 1;
56397+
56398+ rcu_read_lock();
56399+ read_lock(&tasklist_lock);
56400+
56401+ p = find_task_by_vpid_unrestricted(pid);
56402+ if (unlikely(p && !have_same_root(current, p))) {
56403+ read_unlock(&tasklist_lock);
56404+ rcu_read_unlock();
56405+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
56406+ return 0;
56407+ }
56408+ read_unlock(&tasklist_lock);
56409+ rcu_read_unlock();
56410+#endif
56411+ return 1;
56412+}
56413+
56414+int
56415+gr_handle_chroot_nice(void)
56416+{
56417+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
56418+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
56419+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
56420+ return -EPERM;
56421+ }
56422+#endif
56423+ return 0;
56424+}
56425+
56426+int
56427+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
56428+{
56429+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
56430+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
56431+ && proc_is_chrooted(current)) {
56432+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
56433+ return -EACCES;
56434+ }
56435+#endif
56436+ return 0;
56437+}
56438+
56439+int
56440+gr_handle_chroot_rawio(const struct inode *inode)
56441+{
56442+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56443+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
56444+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
56445+ return 1;
56446+#endif
56447+ return 0;
56448+}
56449+
56450+int
56451+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
56452+{
56453+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
56454+ struct task_struct *p;
56455+ int ret = 0;
56456+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
56457+ return ret;
56458+
56459+ read_lock(&tasklist_lock);
56460+ do_each_pid_task(pid, type, p) {
56461+ if (!have_same_root(current, p)) {
56462+ ret = 1;
56463+ goto out;
56464+ }
56465+ } while_each_pid_task(pid, type, p);
56466+out:
56467+ read_unlock(&tasklist_lock);
56468+ return ret;
56469+#endif
56470+ return 0;
56471+}
56472+
56473+int
56474+gr_pid_is_chrooted(struct task_struct *p)
56475+{
56476+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
56477+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
56478+ return 0;
56479+
56480+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
56481+ !have_same_root(current, p)) {
56482+ return 1;
56483+ }
56484+#endif
56485+ return 0;
56486+}
56487+
56488+EXPORT_SYMBOL(gr_pid_is_chrooted);
56489+
56490+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
56491+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
56492+{
56493+ struct dentry *dentry = (struct dentry *)u_dentry;
56494+ struct vfsmount *mnt = (struct vfsmount *)u_mnt;
56495+ struct dentry *realroot;
56496+ struct vfsmount *realrootmnt;
56497+ struct dentry *currentroot;
56498+ struct vfsmount *currentmnt;
56499+ struct task_struct *reaper = &init_task;
56500+ int ret = 1;
56501+
56502+ read_lock(&reaper->fs->lock);
56503+ realrootmnt = mntget(reaper->fs->root.mnt);
56504+ realroot = dget(reaper->fs->root.dentry);
56505+ read_unlock(&reaper->fs->lock);
56506+
56507+ read_lock(&current->fs->lock);
56508+ currentmnt = mntget(current->fs->root.mnt);
56509+ currentroot = dget(current->fs->root.dentry);
56510+ read_unlock(&current->fs->lock);
56511+
56512+ spin_lock(&dcache_lock);
56513+ for (;;) {
56514+ if (unlikely((dentry == realroot && mnt == realrootmnt)
56515+ || (dentry == currentroot && mnt == currentmnt)))
56516+ break;
56517+ if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) {
56518+ if (mnt->mnt_parent == mnt)
56519+ break;
56520+ dentry = mnt->mnt_mountpoint;
56521+ mnt = mnt->mnt_parent;
56522+ continue;
56523+ }
56524+ dentry = dentry->d_parent;
56525+ }
56526+ spin_unlock(&dcache_lock);
56527+
56528+ dput(currentroot);
56529+ mntput(currentmnt);
56530+
56531+ /* access is outside of chroot */
56532+ if (dentry == realroot && mnt == realrootmnt)
56533+ ret = 0;
56534+
56535+ dput(realroot);
56536+ mntput(realrootmnt);
56537+ return ret;
56538+}
56539+#endif
56540+
56541+int
56542+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
56543+{
56544+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
56545+ if (!grsec_enable_chroot_fchdir)
56546+ return 1;
56547+
56548+ if (!proc_is_chrooted(current))
56549+ return 1;
56550+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
56551+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
56552+ return 0;
56553+ }
56554+#endif
56555+ return 1;
56556+}
56557+
56558+int
56559+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
56560+ const time_t shm_createtime)
56561+{
56562+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
56563+ struct task_struct *p;
56564+ time_t starttime;
56565+
56566+ if (unlikely(!grsec_enable_chroot_shmat))
56567+ return 1;
56568+
56569+ if (likely(!proc_is_chrooted(current)))
56570+ return 1;
56571+
56572+ rcu_read_lock();
56573+ read_lock(&tasklist_lock);
56574+
56575+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
56576+ starttime = p->start_time.tv_sec;
56577+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
56578+ if (have_same_root(current, p)) {
56579+ goto allow;
56580+ } else {
56581+ read_unlock(&tasklist_lock);
56582+ rcu_read_unlock();
56583+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
56584+ return 0;
56585+ }
56586+ }
56587+ /* creator exited, pid reuse, fall through to next check */
56588+ }
56589+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
56590+ if (unlikely(!have_same_root(current, p))) {
56591+ read_unlock(&tasklist_lock);
56592+ rcu_read_unlock();
56593+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
56594+ return 0;
56595+ }
56596+ }
56597+
56598+allow:
56599+ read_unlock(&tasklist_lock);
56600+ rcu_read_unlock();
56601+#endif
56602+ return 1;
56603+}
56604+
56605+void
56606+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
56607+{
56608+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
56609+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
56610+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
56611+#endif
56612+ return;
56613+}
56614+
56615+int
56616+gr_handle_chroot_mknod(const struct dentry *dentry,
56617+ const struct vfsmount *mnt, const int mode)
56618+{
56619+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
56620+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
56621+ proc_is_chrooted(current)) {
56622+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
56623+ return -EPERM;
56624+ }
56625+#endif
56626+ return 0;
56627+}
56628+
56629+int
56630+gr_handle_chroot_mount(const struct dentry *dentry,
56631+ const struct vfsmount *mnt, const char *dev_name)
56632+{
56633+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
56634+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
56635+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none" , dentry, mnt);
56636+ return -EPERM;
56637+ }
56638+#endif
56639+ return 0;
56640+}
56641+
56642+int
56643+gr_handle_chroot_pivot(void)
56644+{
56645+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
56646+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
56647+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
56648+ return -EPERM;
56649+ }
56650+#endif
56651+ return 0;
56652+}
56653+
56654+int
56655+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
56656+{
56657+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
56658+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
56659+ !gr_is_outside_chroot(dentry, mnt)) {
56660+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
56661+ return -EPERM;
56662+ }
56663+#endif
56664+ return 0;
56665+}
56666+
56667+extern const char *captab_log[];
56668+extern int captab_log_entries;
56669+
56670+int
56671+gr_chroot_is_capable(const int cap)
56672+{
56673+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56674+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
56675+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
56676+ if (cap_raised(chroot_caps, cap)) {
56677+ const struct cred *creds = current_cred();
56678+ if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
56679+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
56680+ }
56681+ return 0;
56682+ }
56683+ }
56684+#endif
56685+ return 1;
56686+}
56687+
56688+int
56689+gr_chroot_is_capable_nolog(const int cap)
56690+{
56691+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56692+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
56693+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
56694+ if (cap_raised(chroot_caps, cap)) {
56695+ return 0;
56696+ }
56697+ }
56698+#endif
56699+ return 1;
56700+}
56701+
56702+int
56703+gr_handle_chroot_sysctl(const int op)
56704+{
56705+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
56706+ if (grsec_enable_chroot_sysctl && proc_is_chrooted(current)
56707+ && (op & MAY_WRITE))
56708+ return -EACCES;
56709+#endif
56710+ return 0;
56711+}
56712+
56713+void
56714+gr_handle_chroot_chdir(struct path *path)
56715+{
56716+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
56717+ if (grsec_enable_chroot_chdir)
56718+ set_fs_pwd(current->fs, path);
56719+#endif
56720+ return;
56721+}
56722+
56723+int
56724+gr_handle_chroot_chmod(const struct dentry *dentry,
56725+ const struct vfsmount *mnt, const int mode)
56726+{
56727+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
56728+ /* allow chmod +s on directories, but not on files */
56729+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
56730+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
56731+ proc_is_chrooted(current)) {
56732+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
56733+ return -EPERM;
56734+ }
56735+#endif
56736+ return 0;
56737+}
56738diff -urNp linux-2.6.32.48/grsecurity/grsec_disabled.c linux-2.6.32.48/grsecurity/grsec_disabled.c
56739--- linux-2.6.32.48/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
56740+++ linux-2.6.32.48/grsecurity/grsec_disabled.c 2011-11-15 19:59:43.000000000 -0500
56741@@ -0,0 +1,439 @@
56742+#include <linux/kernel.h>
56743+#include <linux/module.h>
56744+#include <linux/sched.h>
56745+#include <linux/file.h>
56746+#include <linux/fs.h>
56747+#include <linux/kdev_t.h>
56748+#include <linux/net.h>
56749+#include <linux/in.h>
56750+#include <linux/ip.h>
56751+#include <linux/skbuff.h>
56752+#include <linux/sysctl.h>
56753+
56754+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
56755+void
56756+pax_set_initial_flags(struct linux_binprm *bprm)
56757+{
56758+ return;
56759+}
56760+#endif
56761+
56762+#ifdef CONFIG_SYSCTL
56763+__u32
56764+gr_handle_sysctl(const struct ctl_table * table, const int op)
56765+{
56766+ return 0;
56767+}
56768+#endif
56769+
56770+#ifdef CONFIG_TASKSTATS
56771+int gr_is_taskstats_denied(int pid)
56772+{
56773+ return 0;
56774+}
56775+#endif
56776+
56777+int
56778+gr_acl_is_enabled(void)
56779+{
56780+ return 0;
56781+}
56782+
56783+void
56784+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
56785+{
56786+ return;
56787+}
56788+
56789+int
56790+gr_handle_rawio(const struct inode *inode)
56791+{
56792+ return 0;
56793+}
56794+
56795+void
56796+gr_acl_handle_psacct(struct task_struct *task, const long code)
56797+{
56798+ return;
56799+}
56800+
56801+int
56802+gr_handle_ptrace(struct task_struct *task, const long request)
56803+{
56804+ return 0;
56805+}
56806+
56807+int
56808+gr_handle_proc_ptrace(struct task_struct *task)
56809+{
56810+ return 0;
56811+}
56812+
56813+void
56814+gr_learn_resource(const struct task_struct *task,
56815+ const int res, const unsigned long wanted, const int gt)
56816+{
56817+ return;
56818+}
56819+
56820+int
56821+gr_set_acls(const int type)
56822+{
56823+ return 0;
56824+}
56825+
56826+int
56827+gr_check_hidden_task(const struct task_struct *tsk)
56828+{
56829+ return 0;
56830+}
56831+
56832+int
56833+gr_check_protected_task(const struct task_struct *task)
56834+{
56835+ return 0;
56836+}
56837+
56838+int
56839+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
56840+{
56841+ return 0;
56842+}
56843+
56844+void
56845+gr_copy_label(struct task_struct *tsk)
56846+{
56847+ return;
56848+}
56849+
56850+void
56851+gr_set_pax_flags(struct task_struct *task)
56852+{
56853+ return;
56854+}
56855+
56856+int
56857+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
56858+ const int unsafe_share)
56859+{
56860+ return 0;
56861+}
56862+
56863+void
56864+gr_handle_delete(const ino_t ino, const dev_t dev)
56865+{
56866+ return;
56867+}
56868+
56869+void
56870+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
56871+{
56872+ return;
56873+}
56874+
56875+void
56876+gr_handle_crash(struct task_struct *task, const int sig)
56877+{
56878+ return;
56879+}
56880+
56881+int
56882+gr_check_crash_exec(const struct file *filp)
56883+{
56884+ return 0;
56885+}
56886+
56887+int
56888+gr_check_crash_uid(const uid_t uid)
56889+{
56890+ return 0;
56891+}
56892+
56893+void
56894+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
56895+ struct dentry *old_dentry,
56896+ struct dentry *new_dentry,
56897+ struct vfsmount *mnt, const __u8 replace)
56898+{
56899+ return;
56900+}
56901+
56902+int
56903+gr_search_socket(const int family, const int type, const int protocol)
56904+{
56905+ return 1;
56906+}
56907+
56908+int
56909+gr_search_connectbind(const int mode, const struct socket *sock,
56910+ const struct sockaddr_in *addr)
56911+{
56912+ return 0;
56913+}
56914+
56915+void
56916+gr_handle_alertkill(struct task_struct *task)
56917+{
56918+ return;
56919+}
56920+
56921+__u32
56922+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
56923+{
56924+ return 1;
56925+}
56926+
56927+__u32
56928+gr_acl_handle_hidden_file(const struct dentry * dentry,
56929+ const struct vfsmount * mnt)
56930+{
56931+ return 1;
56932+}
56933+
56934+__u32
56935+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
56936+ const int fmode)
56937+{
56938+ return 1;
56939+}
56940+
56941+__u32
56942+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
56943+{
56944+ return 1;
56945+}
56946+
56947+__u32
56948+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
56949+{
56950+ return 1;
56951+}
56952+
56953+int
56954+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
56955+ unsigned int *vm_flags)
56956+{
56957+ return 1;
56958+}
56959+
56960+__u32
56961+gr_acl_handle_truncate(const struct dentry * dentry,
56962+ const struct vfsmount * mnt)
56963+{
56964+ return 1;
56965+}
56966+
56967+__u32
56968+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
56969+{
56970+ return 1;
56971+}
56972+
56973+__u32
56974+gr_acl_handle_access(const struct dentry * dentry,
56975+ const struct vfsmount * mnt, const int fmode)
56976+{
56977+ return 1;
56978+}
56979+
56980+__u32
56981+gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
56982+ mode_t mode)
56983+{
56984+ return 1;
56985+}
56986+
56987+__u32
56988+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
56989+ mode_t mode)
56990+{
56991+ return 1;
56992+}
56993+
56994+__u32
56995+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
56996+{
56997+ return 1;
56998+}
56999+
57000+__u32
57001+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
57002+{
57003+ return 1;
57004+}
57005+
57006+void
57007+grsecurity_init(void)
57008+{
57009+ return;
57010+}
57011+
57012+__u32
57013+gr_acl_handle_mknod(const struct dentry * new_dentry,
57014+ const struct dentry * parent_dentry,
57015+ const struct vfsmount * parent_mnt,
57016+ const int mode)
57017+{
57018+ return 1;
57019+}
57020+
57021+__u32
57022+gr_acl_handle_mkdir(const struct dentry * new_dentry,
57023+ const struct dentry * parent_dentry,
57024+ const struct vfsmount * parent_mnt)
57025+{
57026+ return 1;
57027+}
57028+
57029+__u32
57030+gr_acl_handle_symlink(const struct dentry * new_dentry,
57031+ const struct dentry * parent_dentry,
57032+ const struct vfsmount * parent_mnt, const char *from)
57033+{
57034+ return 1;
57035+}
57036+
57037+__u32
57038+gr_acl_handle_link(const struct dentry * new_dentry,
57039+ const struct dentry * parent_dentry,
57040+ const struct vfsmount * parent_mnt,
57041+ const struct dentry * old_dentry,
57042+ const struct vfsmount * old_mnt, const char *to)
57043+{
57044+ return 1;
57045+}
57046+
57047+int
57048+gr_acl_handle_rename(const struct dentry *new_dentry,
57049+ const struct dentry *parent_dentry,
57050+ const struct vfsmount *parent_mnt,
57051+ const struct dentry *old_dentry,
57052+ const struct inode *old_parent_inode,
57053+ const struct vfsmount *old_mnt, const char *newname)
57054+{
57055+ return 0;
57056+}
57057+
57058+int
57059+gr_acl_handle_filldir(const struct file *file, const char *name,
57060+ const int namelen, const ino_t ino)
57061+{
57062+ return 1;
57063+}
57064+
57065+int
57066+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
57067+ const time_t shm_createtime, const uid_t cuid, const int shmid)
57068+{
57069+ return 1;
57070+}
57071+
57072+int
57073+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
57074+{
57075+ return 0;
57076+}
57077+
57078+int
57079+gr_search_accept(const struct socket *sock)
57080+{
57081+ return 0;
57082+}
57083+
57084+int
57085+gr_search_listen(const struct socket *sock)
57086+{
57087+ return 0;
57088+}
57089+
57090+int
57091+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
57092+{
57093+ return 0;
57094+}
57095+
57096+__u32
57097+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
57098+{
57099+ return 1;
57100+}
57101+
57102+__u32
57103+gr_acl_handle_creat(const struct dentry * dentry,
57104+ const struct dentry * p_dentry,
57105+ const struct vfsmount * p_mnt, const int fmode,
57106+ const int imode)
57107+{
57108+ return 1;
57109+}
57110+
57111+void
57112+gr_acl_handle_exit(void)
57113+{
57114+ return;
57115+}
57116+
57117+int
57118+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
57119+{
57120+ return 1;
57121+}
57122+
57123+void
57124+gr_set_role_label(const uid_t uid, const gid_t gid)
57125+{
57126+ return;
57127+}
57128+
57129+int
57130+gr_acl_handle_procpidmem(const struct task_struct *task)
57131+{
57132+ return 0;
57133+}
57134+
57135+int
57136+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
57137+{
57138+ return 0;
57139+}
57140+
57141+int
57142+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
57143+{
57144+ return 0;
57145+}
57146+
57147+void
57148+gr_set_kernel_label(struct task_struct *task)
57149+{
57150+ return;
57151+}
57152+
57153+int
57154+gr_check_user_change(int real, int effective, int fs)
57155+{
57156+ return 0;
57157+}
57158+
57159+int
57160+gr_check_group_change(int real, int effective, int fs)
57161+{
57162+ return 0;
57163+}
57164+
57165+int gr_acl_enable_at_secure(void)
57166+{
57167+ return 0;
57168+}
57169+
57170+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
57171+{
57172+ return dentry->d_inode->i_sb->s_dev;
57173+}
57174+
57175+EXPORT_SYMBOL(gr_learn_resource);
57176+EXPORT_SYMBOL(gr_set_kernel_label);
57177+#ifdef CONFIG_SECURITY
57178+EXPORT_SYMBOL(gr_check_user_change);
57179+EXPORT_SYMBOL(gr_check_group_change);
57180+#endif
57181diff -urNp linux-2.6.32.48/grsecurity/grsec_exec.c linux-2.6.32.48/grsecurity/grsec_exec.c
57182--- linux-2.6.32.48/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
57183+++ linux-2.6.32.48/grsecurity/grsec_exec.c 2011-11-15 19:59:43.000000000 -0500
57184@@ -0,0 +1,204 @@
57185+#include <linux/kernel.h>
57186+#include <linux/sched.h>
57187+#include <linux/file.h>
57188+#include <linux/binfmts.h>
57189+#include <linux/smp_lock.h>
57190+#include <linux/fs.h>
57191+#include <linux/types.h>
57192+#include <linux/grdefs.h>
57193+#include <linux/grinternal.h>
57194+#include <linux/capability.h>
57195+#include <linux/compat.h>
57196+#include <linux/module.h>
57197+
57198+#include <asm/uaccess.h>
57199+
57200+#ifdef CONFIG_GRKERNSEC_EXECLOG
57201+static char gr_exec_arg_buf[132];
57202+static DEFINE_MUTEX(gr_exec_arg_mutex);
57203+#endif
57204+
57205+void
57206+gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv)
57207+{
57208+#ifdef CONFIG_GRKERNSEC_EXECLOG
57209+ char *grarg = gr_exec_arg_buf;
57210+ unsigned int i, x, execlen = 0;
57211+ char c;
57212+
57213+ if (!((grsec_enable_execlog && grsec_enable_group &&
57214+ in_group_p(grsec_audit_gid))
57215+ || (grsec_enable_execlog && !grsec_enable_group)))
57216+ return;
57217+
57218+ mutex_lock(&gr_exec_arg_mutex);
57219+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
57220+
57221+ if (unlikely(argv == NULL))
57222+ goto log;
57223+
57224+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
57225+ const char __user *p;
57226+ unsigned int len;
57227+
57228+ if (copy_from_user(&p, argv + i, sizeof(p)))
57229+ goto log;
57230+ if (!p)
57231+ goto log;
57232+ len = strnlen_user(p, 128 - execlen);
57233+ if (len > 128 - execlen)
57234+ len = 128 - execlen;
57235+ else if (len > 0)
57236+ len--;
57237+ if (copy_from_user(grarg + execlen, p, len))
57238+ goto log;
57239+
57240+ /* rewrite unprintable characters */
57241+ for (x = 0; x < len; x++) {
57242+ c = *(grarg + execlen + x);
57243+ if (c < 32 || c > 126)
57244+ *(grarg + execlen + x) = ' ';
57245+ }
57246+
57247+ execlen += len;
57248+ *(grarg + execlen) = ' ';
57249+ *(grarg + execlen + 1) = '\0';
57250+ execlen++;
57251+ }
57252+
57253+ log:
57254+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
57255+ bprm->file->f_path.mnt, grarg);
57256+ mutex_unlock(&gr_exec_arg_mutex);
57257+#endif
57258+ return;
57259+}
57260+
57261+#ifdef CONFIG_COMPAT
57262+void
57263+gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv)
57264+{
57265+#ifdef CONFIG_GRKERNSEC_EXECLOG
57266+ char *grarg = gr_exec_arg_buf;
57267+ unsigned int i, x, execlen = 0;
57268+ char c;
57269+
57270+ if (!((grsec_enable_execlog && grsec_enable_group &&
57271+ in_group_p(grsec_audit_gid))
57272+ || (grsec_enable_execlog && !grsec_enable_group)))
57273+ return;
57274+
57275+ mutex_lock(&gr_exec_arg_mutex);
57276+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
57277+
57278+ if (unlikely(argv == NULL))
57279+ goto log;
57280+
57281+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
57282+ compat_uptr_t p;
57283+ unsigned int len;
57284+
57285+ if (get_user(p, argv + i))
57286+ goto log;
57287+ len = strnlen_user(compat_ptr(p), 128 - execlen);
57288+ if (len > 128 - execlen)
57289+ len = 128 - execlen;
57290+ else if (len > 0)
57291+ len--;
57292+ else
57293+ goto log;
57294+ if (copy_from_user(grarg + execlen, compat_ptr(p), len))
57295+ goto log;
57296+
57297+ /* rewrite unprintable characters */
57298+ for (x = 0; x < len; x++) {
57299+ c = *(grarg + execlen + x);
57300+ if (c < 32 || c > 126)
57301+ *(grarg + execlen + x) = ' ';
57302+ }
57303+
57304+ execlen += len;
57305+ *(grarg + execlen) = ' ';
57306+ *(grarg + execlen + 1) = '\0';
57307+ execlen++;
57308+ }
57309+
57310+ log:
57311+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
57312+ bprm->file->f_path.mnt, grarg);
57313+ mutex_unlock(&gr_exec_arg_mutex);
57314+#endif
57315+ return;
57316+}
57317+#endif
57318+
57319+#ifdef CONFIG_GRKERNSEC
57320+extern int gr_acl_is_capable(const int cap);
57321+extern int gr_acl_is_capable_nolog(const int cap);
57322+extern int gr_chroot_is_capable(const int cap);
57323+extern int gr_chroot_is_capable_nolog(const int cap);
57324+#endif
57325+
57326+const char *captab_log[] = {
57327+ "CAP_CHOWN",
57328+ "CAP_DAC_OVERRIDE",
57329+ "CAP_DAC_READ_SEARCH",
57330+ "CAP_FOWNER",
57331+ "CAP_FSETID",
57332+ "CAP_KILL",
57333+ "CAP_SETGID",
57334+ "CAP_SETUID",
57335+ "CAP_SETPCAP",
57336+ "CAP_LINUX_IMMUTABLE",
57337+ "CAP_NET_BIND_SERVICE",
57338+ "CAP_NET_BROADCAST",
57339+ "CAP_NET_ADMIN",
57340+ "CAP_NET_RAW",
57341+ "CAP_IPC_LOCK",
57342+ "CAP_IPC_OWNER",
57343+ "CAP_SYS_MODULE",
57344+ "CAP_SYS_RAWIO",
57345+ "CAP_SYS_CHROOT",
57346+ "CAP_SYS_PTRACE",
57347+ "CAP_SYS_PACCT",
57348+ "CAP_SYS_ADMIN",
57349+ "CAP_SYS_BOOT",
57350+ "CAP_SYS_NICE",
57351+ "CAP_SYS_RESOURCE",
57352+ "CAP_SYS_TIME",
57353+ "CAP_SYS_TTY_CONFIG",
57354+ "CAP_MKNOD",
57355+ "CAP_LEASE",
57356+ "CAP_AUDIT_WRITE",
57357+ "CAP_AUDIT_CONTROL",
57358+ "CAP_SETFCAP",
57359+ "CAP_MAC_OVERRIDE",
57360+ "CAP_MAC_ADMIN"
57361+};
57362+
57363+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
57364+
57365+int gr_is_capable(const int cap)
57366+{
57367+#ifdef CONFIG_GRKERNSEC
57368+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
57369+ return 1;
57370+ return 0;
57371+#else
57372+ return 1;
57373+#endif
57374+}
57375+
57376+int gr_is_capable_nolog(const int cap)
57377+{
57378+#ifdef CONFIG_GRKERNSEC
57379+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
57380+ return 1;
57381+ return 0;
57382+#else
57383+ return 1;
57384+#endif
57385+}
57386+
57387+EXPORT_SYMBOL(gr_is_capable);
57388+EXPORT_SYMBOL(gr_is_capable_nolog);
57389diff -urNp linux-2.6.32.48/grsecurity/grsec_fifo.c linux-2.6.32.48/grsecurity/grsec_fifo.c
57390--- linux-2.6.32.48/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
57391+++ linux-2.6.32.48/grsecurity/grsec_fifo.c 2011-11-15 19:59:43.000000000 -0500
57392@@ -0,0 +1,24 @@
57393+#include <linux/kernel.h>
57394+#include <linux/sched.h>
57395+#include <linux/fs.h>
57396+#include <linux/file.h>
57397+#include <linux/grinternal.h>
57398+
57399+int
57400+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
57401+ const struct dentry *dir, const int flag, const int acc_mode)
57402+{
57403+#ifdef CONFIG_GRKERNSEC_FIFO
57404+ const struct cred *cred = current_cred();
57405+
57406+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
57407+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
57408+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
57409+ (cred->fsuid != dentry->d_inode->i_uid)) {
57410+ if (!inode_permission(dentry->d_inode, acc_mode))
57411+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
57412+ return -EACCES;
57413+ }
57414+#endif
57415+ return 0;
57416+}
57417diff -urNp linux-2.6.32.48/grsecurity/grsec_fork.c linux-2.6.32.48/grsecurity/grsec_fork.c
57418--- linux-2.6.32.48/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
57419+++ linux-2.6.32.48/grsecurity/grsec_fork.c 2011-11-15 19:59:43.000000000 -0500
57420@@ -0,0 +1,23 @@
57421+#include <linux/kernel.h>
57422+#include <linux/sched.h>
57423+#include <linux/grsecurity.h>
57424+#include <linux/grinternal.h>
57425+#include <linux/errno.h>
57426+
57427+void
57428+gr_log_forkfail(const int retval)
57429+{
57430+#ifdef CONFIG_GRKERNSEC_FORKFAIL
57431+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
57432+ switch (retval) {
57433+ case -EAGAIN:
57434+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
57435+ break;
57436+ case -ENOMEM:
57437+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
57438+ break;
57439+ }
57440+ }
57441+#endif
57442+ return;
57443+}
57444diff -urNp linux-2.6.32.48/grsecurity/grsec_init.c linux-2.6.32.48/grsecurity/grsec_init.c
57445--- linux-2.6.32.48/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
57446+++ linux-2.6.32.48/grsecurity/grsec_init.c 2011-11-15 19:59:43.000000000 -0500
57447@@ -0,0 +1,270 @@
57448+#include <linux/kernel.h>
57449+#include <linux/sched.h>
57450+#include <linux/mm.h>
57451+#include <linux/smp_lock.h>
57452+#include <linux/gracl.h>
57453+#include <linux/slab.h>
57454+#include <linux/vmalloc.h>
57455+#include <linux/percpu.h>
57456+#include <linux/module.h>
57457+
57458+int grsec_enable_brute;
57459+int grsec_enable_link;
57460+int grsec_enable_dmesg;
57461+int grsec_enable_harden_ptrace;
57462+int grsec_enable_fifo;
57463+int grsec_enable_execlog;
57464+int grsec_enable_signal;
57465+int grsec_enable_forkfail;
57466+int grsec_enable_audit_ptrace;
57467+int grsec_enable_time;
57468+int grsec_enable_audit_textrel;
57469+int grsec_enable_group;
57470+int grsec_audit_gid;
57471+int grsec_enable_chdir;
57472+int grsec_enable_mount;
57473+int grsec_enable_rofs;
57474+int grsec_enable_chroot_findtask;
57475+int grsec_enable_chroot_mount;
57476+int grsec_enable_chroot_shmat;
57477+int grsec_enable_chroot_fchdir;
57478+int grsec_enable_chroot_double;
57479+int grsec_enable_chroot_pivot;
57480+int grsec_enable_chroot_chdir;
57481+int grsec_enable_chroot_chmod;
57482+int grsec_enable_chroot_mknod;
57483+int grsec_enable_chroot_nice;
57484+int grsec_enable_chroot_execlog;
57485+int grsec_enable_chroot_caps;
57486+int grsec_enable_chroot_sysctl;
57487+int grsec_enable_chroot_unix;
57488+int grsec_enable_tpe;
57489+int grsec_tpe_gid;
57490+int grsec_enable_blackhole;
57491+#ifdef CONFIG_IPV6_MODULE
57492+EXPORT_SYMBOL(grsec_enable_blackhole);
57493+#endif
57494+int grsec_lastack_retries;
57495+int grsec_enable_tpe_all;
57496+int grsec_enable_tpe_invert;
57497+int grsec_enable_socket_all;
57498+int grsec_socket_all_gid;
57499+int grsec_enable_socket_client;
57500+int grsec_socket_client_gid;
57501+int grsec_enable_socket_server;
57502+int grsec_socket_server_gid;
57503+int grsec_resource_logging;
57504+int grsec_disable_privio;
57505+int grsec_enable_log_rwxmaps;
57506+int grsec_lock;
57507+
57508+DEFINE_SPINLOCK(grsec_alert_lock);
57509+unsigned long grsec_alert_wtime = 0;
57510+unsigned long grsec_alert_fyet = 0;
57511+
57512+DEFINE_SPINLOCK(grsec_audit_lock);
57513+
57514+DEFINE_RWLOCK(grsec_exec_file_lock);
57515+
57516+char *gr_shared_page[4];
57517+
57518+char *gr_alert_log_fmt;
57519+char *gr_audit_log_fmt;
57520+char *gr_alert_log_buf;
57521+char *gr_audit_log_buf;
57522+
57523+extern struct gr_arg *gr_usermode;
57524+extern unsigned char *gr_system_salt;
57525+extern unsigned char *gr_system_sum;
57526+
57527+void __init
57528+grsecurity_init(void)
57529+{
57530+ int j;
57531+ /* create the per-cpu shared pages */
57532+
57533+#ifdef CONFIG_X86
57534+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
57535+#endif
57536+
57537+ for (j = 0; j < 4; j++) {
57538+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
57539+ if (gr_shared_page[j] == NULL) {
57540+ panic("Unable to allocate grsecurity shared page");
57541+ return;
57542+ }
57543+ }
57544+
57545+ /* allocate log buffers */
57546+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
57547+ if (!gr_alert_log_fmt) {
57548+ panic("Unable to allocate grsecurity alert log format buffer");
57549+ return;
57550+ }
57551+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
57552+ if (!gr_audit_log_fmt) {
57553+ panic("Unable to allocate grsecurity audit log format buffer");
57554+ return;
57555+ }
57556+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
57557+ if (!gr_alert_log_buf) {
57558+ panic("Unable to allocate grsecurity alert log buffer");
57559+ return;
57560+ }
57561+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
57562+ if (!gr_audit_log_buf) {
57563+ panic("Unable to allocate grsecurity audit log buffer");
57564+ return;
57565+ }
57566+
57567+ /* allocate memory for authentication structure */
57568+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
57569+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
57570+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
57571+
57572+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
57573+ panic("Unable to allocate grsecurity authentication structure");
57574+ return;
57575+ }
57576+
57577+
57578+#ifdef CONFIG_GRKERNSEC_IO
57579+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
57580+ grsec_disable_privio = 1;
57581+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
57582+ grsec_disable_privio = 1;
57583+#else
57584+ grsec_disable_privio = 0;
57585+#endif
57586+#endif
57587+
57588+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
57589+ /* for backward compatibility, tpe_invert always defaults to on if
57590+ enabled in the kernel
57591+ */
57592+ grsec_enable_tpe_invert = 1;
57593+#endif
57594+
57595+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
57596+#ifndef CONFIG_GRKERNSEC_SYSCTL
57597+ grsec_lock = 1;
57598+#endif
57599+
57600+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
57601+ grsec_enable_audit_textrel = 1;
57602+#endif
57603+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
57604+ grsec_enable_log_rwxmaps = 1;
57605+#endif
57606+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
57607+ grsec_enable_group = 1;
57608+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
57609+#endif
57610+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
57611+ grsec_enable_chdir = 1;
57612+#endif
57613+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
57614+ grsec_enable_harden_ptrace = 1;
57615+#endif
57616+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
57617+ grsec_enable_mount = 1;
57618+#endif
57619+#ifdef CONFIG_GRKERNSEC_LINK
57620+ grsec_enable_link = 1;
57621+#endif
57622+#ifdef CONFIG_GRKERNSEC_BRUTE
57623+ grsec_enable_brute = 1;
57624+#endif
57625+#ifdef CONFIG_GRKERNSEC_DMESG
57626+ grsec_enable_dmesg = 1;
57627+#endif
57628+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
57629+ grsec_enable_blackhole = 1;
57630+ grsec_lastack_retries = 4;
57631+#endif
57632+#ifdef CONFIG_GRKERNSEC_FIFO
57633+ grsec_enable_fifo = 1;
57634+#endif
57635+#ifdef CONFIG_GRKERNSEC_EXECLOG
57636+ grsec_enable_execlog = 1;
57637+#endif
57638+#ifdef CONFIG_GRKERNSEC_SIGNAL
57639+ grsec_enable_signal = 1;
57640+#endif
57641+#ifdef CONFIG_GRKERNSEC_FORKFAIL
57642+ grsec_enable_forkfail = 1;
57643+#endif
57644+#ifdef CONFIG_GRKERNSEC_TIME
57645+ grsec_enable_time = 1;
57646+#endif
57647+#ifdef CONFIG_GRKERNSEC_RESLOG
57648+ grsec_resource_logging = 1;
57649+#endif
57650+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
57651+ grsec_enable_chroot_findtask = 1;
57652+#endif
57653+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
57654+ grsec_enable_chroot_unix = 1;
57655+#endif
57656+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
57657+ grsec_enable_chroot_mount = 1;
57658+#endif
57659+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
57660+ grsec_enable_chroot_fchdir = 1;
57661+#endif
57662+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
57663+ grsec_enable_chroot_shmat = 1;
57664+#endif
57665+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
57666+ grsec_enable_audit_ptrace = 1;
57667+#endif
57668+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
57669+ grsec_enable_chroot_double = 1;
57670+#endif
57671+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
57672+ grsec_enable_chroot_pivot = 1;
57673+#endif
57674+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
57675+ grsec_enable_chroot_chdir = 1;
57676+#endif
57677+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
57678+ grsec_enable_chroot_chmod = 1;
57679+#endif
57680+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
57681+ grsec_enable_chroot_mknod = 1;
57682+#endif
57683+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
57684+ grsec_enable_chroot_nice = 1;
57685+#endif
57686+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
57687+ grsec_enable_chroot_execlog = 1;
57688+#endif
57689+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
57690+ grsec_enable_chroot_caps = 1;
57691+#endif
57692+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
57693+ grsec_enable_chroot_sysctl = 1;
57694+#endif
57695+#ifdef CONFIG_GRKERNSEC_TPE
57696+ grsec_enable_tpe = 1;
57697+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
57698+#ifdef CONFIG_GRKERNSEC_TPE_ALL
57699+ grsec_enable_tpe_all = 1;
57700+#endif
57701+#endif
57702+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
57703+ grsec_enable_socket_all = 1;
57704+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
57705+#endif
57706+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
57707+ grsec_enable_socket_client = 1;
57708+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
57709+#endif
57710+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
57711+ grsec_enable_socket_server = 1;
57712+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
57713+#endif
57714+#endif
57715+
57716+ return;
57717+}
57718diff -urNp linux-2.6.32.48/grsecurity/grsec_link.c linux-2.6.32.48/grsecurity/grsec_link.c
57719--- linux-2.6.32.48/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
57720+++ linux-2.6.32.48/grsecurity/grsec_link.c 2011-11-15 19:59:43.000000000 -0500
57721@@ -0,0 +1,43 @@
57722+#include <linux/kernel.h>
57723+#include <linux/sched.h>
57724+#include <linux/fs.h>
57725+#include <linux/file.h>
57726+#include <linux/grinternal.h>
57727+
57728+int
57729+gr_handle_follow_link(const struct inode *parent,
57730+ const struct inode *inode,
57731+ const struct dentry *dentry, const struct vfsmount *mnt)
57732+{
57733+#ifdef CONFIG_GRKERNSEC_LINK
57734+ const struct cred *cred = current_cred();
57735+
57736+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
57737+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
57738+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
57739+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
57740+ return -EACCES;
57741+ }
57742+#endif
57743+ return 0;
57744+}
57745+
57746+int
57747+gr_handle_hardlink(const struct dentry *dentry,
57748+ const struct vfsmount *mnt,
57749+ struct inode *inode, const int mode, const char *to)
57750+{
57751+#ifdef CONFIG_GRKERNSEC_LINK
57752+ const struct cred *cred = current_cred();
57753+
57754+ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
57755+ (!S_ISREG(mode) || (mode & S_ISUID) ||
57756+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
57757+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
57758+ !capable(CAP_FOWNER) && cred->uid) {
57759+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
57760+ return -EPERM;
57761+ }
57762+#endif
57763+ return 0;
57764+}
57765diff -urNp linux-2.6.32.48/grsecurity/grsec_log.c linux-2.6.32.48/grsecurity/grsec_log.c
57766--- linux-2.6.32.48/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
57767+++ linux-2.6.32.48/grsecurity/grsec_log.c 2011-11-15 19:59:43.000000000 -0500
57768@@ -0,0 +1,322 @@
57769+#include <linux/kernel.h>
57770+#include <linux/sched.h>
57771+#include <linux/file.h>
57772+#include <linux/tty.h>
57773+#include <linux/fs.h>
57774+#include <linux/grinternal.h>
57775+
57776+#ifdef CONFIG_TREE_PREEMPT_RCU
57777+#define DISABLE_PREEMPT() preempt_disable()
57778+#define ENABLE_PREEMPT() preempt_enable()
57779+#else
57780+#define DISABLE_PREEMPT()
57781+#define ENABLE_PREEMPT()
57782+#endif
57783+
57784+#define BEGIN_LOCKS(x) \
57785+ DISABLE_PREEMPT(); \
57786+ rcu_read_lock(); \
57787+ read_lock(&tasklist_lock); \
57788+ read_lock(&grsec_exec_file_lock); \
57789+ if (x != GR_DO_AUDIT) \
57790+ spin_lock(&grsec_alert_lock); \
57791+ else \
57792+ spin_lock(&grsec_audit_lock)
57793+
57794+#define END_LOCKS(x) \
57795+ if (x != GR_DO_AUDIT) \
57796+ spin_unlock(&grsec_alert_lock); \
57797+ else \
57798+ spin_unlock(&grsec_audit_lock); \
57799+ read_unlock(&grsec_exec_file_lock); \
57800+ read_unlock(&tasklist_lock); \
57801+ rcu_read_unlock(); \
57802+ ENABLE_PREEMPT(); \
57803+ if (x == GR_DONT_AUDIT) \
57804+ gr_handle_alertkill(current)
57805+
57806+enum {
57807+ FLOODING,
57808+ NO_FLOODING
57809+};
57810+
57811+extern char *gr_alert_log_fmt;
57812+extern char *gr_audit_log_fmt;
57813+extern char *gr_alert_log_buf;
57814+extern char *gr_audit_log_buf;
57815+
57816+static int gr_log_start(int audit)
57817+{
57818+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
57819+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
57820+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57821+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
57822+ unsigned long curr_secs = get_seconds();
57823+
57824+ if (audit == GR_DO_AUDIT)
57825+ goto set_fmt;
57826+
57827+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
57828+ grsec_alert_wtime = curr_secs;
57829+ grsec_alert_fyet = 0;
57830+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
57831+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
57832+ grsec_alert_fyet++;
57833+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
57834+ grsec_alert_wtime = curr_secs;
57835+ grsec_alert_fyet++;
57836+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
57837+ return FLOODING;
57838+ }
57839+ else return FLOODING;
57840+
57841+set_fmt:
57842+#endif
57843+ memset(buf, 0, PAGE_SIZE);
57844+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
57845+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
57846+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
57847+ } else if (current->signal->curr_ip) {
57848+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
57849+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
57850+ } else if (gr_acl_is_enabled()) {
57851+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
57852+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
57853+ } else {
57854+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
57855+ strcpy(buf, fmt);
57856+ }
57857+
57858+ return NO_FLOODING;
57859+}
57860+
57861+static void gr_log_middle(int audit, const char *msg, va_list ap)
57862+ __attribute__ ((format (printf, 2, 0)));
57863+
57864+static void gr_log_middle(int audit, const char *msg, va_list ap)
57865+{
57866+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57867+ unsigned int len = strlen(buf);
57868+
57869+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
57870+
57871+ return;
57872+}
57873+
57874+static void gr_log_middle_varargs(int audit, const char *msg, ...)
57875+ __attribute__ ((format (printf, 2, 3)));
57876+
57877+static void gr_log_middle_varargs(int audit, const char *msg, ...)
57878+{
57879+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57880+ unsigned int len = strlen(buf);
57881+ va_list ap;
57882+
57883+ va_start(ap, msg);
57884+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
57885+ va_end(ap);
57886+
57887+ return;
57888+}
57889+
57890+static void gr_log_end(int audit, int append_default)
57891+{
57892+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57893+
57894+ if (append_default) {
57895+ unsigned int len = strlen(buf);
57896+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
57897+ }
57898+
57899+ printk("%s\n", buf);
57900+
57901+ return;
57902+}
57903+
57904+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
57905+{
57906+ int logtype;
57907+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
57908+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
57909+ void *voidptr = NULL;
57910+ int num1 = 0, num2 = 0;
57911+ unsigned long ulong1 = 0, ulong2 = 0;
57912+ struct dentry *dentry = NULL;
57913+ struct vfsmount *mnt = NULL;
57914+ struct file *file = NULL;
57915+ struct task_struct *task = NULL;
57916+ const struct cred *cred, *pcred;
57917+ va_list ap;
57918+
57919+ BEGIN_LOCKS(audit);
57920+ logtype = gr_log_start(audit);
57921+ if (logtype == FLOODING) {
57922+ END_LOCKS(audit);
57923+ return;
57924+ }
57925+ va_start(ap, argtypes);
57926+ switch (argtypes) {
57927+ case GR_TTYSNIFF:
57928+ task = va_arg(ap, struct task_struct *);
57929+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
57930+ break;
57931+ case GR_SYSCTL_HIDDEN:
57932+ str1 = va_arg(ap, char *);
57933+ gr_log_middle_varargs(audit, msg, result, str1);
57934+ break;
57935+ case GR_RBAC:
57936+ dentry = va_arg(ap, struct dentry *);
57937+ mnt = va_arg(ap, struct vfsmount *);
57938+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
57939+ break;
57940+ case GR_RBAC_STR:
57941+ dentry = va_arg(ap, struct dentry *);
57942+ mnt = va_arg(ap, struct vfsmount *);
57943+ str1 = va_arg(ap, char *);
57944+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
57945+ break;
57946+ case GR_STR_RBAC:
57947+ str1 = va_arg(ap, char *);
57948+ dentry = va_arg(ap, struct dentry *);
57949+ mnt = va_arg(ap, struct vfsmount *);
57950+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
57951+ break;
57952+ case GR_RBAC_MODE2:
57953+ dentry = va_arg(ap, struct dentry *);
57954+ mnt = va_arg(ap, struct vfsmount *);
57955+ str1 = va_arg(ap, char *);
57956+ str2 = va_arg(ap, char *);
57957+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
57958+ break;
57959+ case GR_RBAC_MODE3:
57960+ dentry = va_arg(ap, struct dentry *);
57961+ mnt = va_arg(ap, struct vfsmount *);
57962+ str1 = va_arg(ap, char *);
57963+ str2 = va_arg(ap, char *);
57964+ str3 = va_arg(ap, char *);
57965+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
57966+ break;
57967+ case GR_FILENAME:
57968+ dentry = va_arg(ap, struct dentry *);
57969+ mnt = va_arg(ap, struct vfsmount *);
57970+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
57971+ break;
57972+ case GR_STR_FILENAME:
57973+ str1 = va_arg(ap, char *);
57974+ dentry = va_arg(ap, struct dentry *);
57975+ mnt = va_arg(ap, struct vfsmount *);
57976+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
57977+ break;
57978+ case GR_FILENAME_STR:
57979+ dentry = va_arg(ap, struct dentry *);
57980+ mnt = va_arg(ap, struct vfsmount *);
57981+ str1 = va_arg(ap, char *);
57982+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
57983+ break;
57984+ case GR_FILENAME_TWO_INT:
57985+ dentry = va_arg(ap, struct dentry *);
57986+ mnt = va_arg(ap, struct vfsmount *);
57987+ num1 = va_arg(ap, int);
57988+ num2 = va_arg(ap, int);
57989+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
57990+ break;
57991+ case GR_FILENAME_TWO_INT_STR:
57992+ dentry = va_arg(ap, struct dentry *);
57993+ mnt = va_arg(ap, struct vfsmount *);
57994+ num1 = va_arg(ap, int);
57995+ num2 = va_arg(ap, int);
57996+ str1 = va_arg(ap, char *);
57997+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
57998+ break;
57999+ case GR_TEXTREL:
58000+ file = va_arg(ap, struct file *);
58001+ ulong1 = va_arg(ap, unsigned long);
58002+ ulong2 = va_arg(ap, unsigned long);
58003+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
58004+ break;
58005+ case GR_PTRACE:
58006+ task = va_arg(ap, struct task_struct *);
58007+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
58008+ break;
58009+ case GR_RESOURCE:
58010+ task = va_arg(ap, struct task_struct *);
58011+ cred = __task_cred(task);
58012+ pcred = __task_cred(task->real_parent);
58013+ ulong1 = va_arg(ap, unsigned long);
58014+ str1 = va_arg(ap, char *);
58015+ ulong2 = va_arg(ap, unsigned long);
58016+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58017+ break;
58018+ case GR_CAP:
58019+ task = va_arg(ap, struct task_struct *);
58020+ cred = __task_cred(task);
58021+ pcred = __task_cred(task->real_parent);
58022+ str1 = va_arg(ap, char *);
58023+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58024+ break;
58025+ case GR_SIG:
58026+ str1 = va_arg(ap, char *);
58027+ voidptr = va_arg(ap, void *);
58028+ gr_log_middle_varargs(audit, msg, str1, voidptr);
58029+ break;
58030+ case GR_SIG2:
58031+ task = va_arg(ap, struct task_struct *);
58032+ cred = __task_cred(task);
58033+ pcred = __task_cred(task->real_parent);
58034+ num1 = va_arg(ap, int);
58035+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58036+ break;
58037+ case GR_CRASH1:
58038+ task = va_arg(ap, struct task_struct *);
58039+ cred = __task_cred(task);
58040+ pcred = __task_cred(task->real_parent);
58041+ ulong1 = va_arg(ap, unsigned long);
58042+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
58043+ break;
58044+ case GR_CRASH2:
58045+ task = va_arg(ap, struct task_struct *);
58046+ cred = __task_cred(task);
58047+ pcred = __task_cred(task->real_parent);
58048+ ulong1 = va_arg(ap, unsigned long);
58049+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
58050+ break;
58051+ case GR_RWXMAP:
58052+ file = va_arg(ap, struct file *);
58053+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
58054+ break;
58055+ case GR_PSACCT:
58056+ {
58057+ unsigned int wday, cday;
58058+ __u8 whr, chr;
58059+ __u8 wmin, cmin;
58060+ __u8 wsec, csec;
58061+ char cur_tty[64] = { 0 };
58062+ char parent_tty[64] = { 0 };
58063+
58064+ task = va_arg(ap, struct task_struct *);
58065+ wday = va_arg(ap, unsigned int);
58066+ cday = va_arg(ap, unsigned int);
58067+ whr = va_arg(ap, int);
58068+ chr = va_arg(ap, int);
58069+ wmin = va_arg(ap, int);
58070+ cmin = va_arg(ap, int);
58071+ wsec = va_arg(ap, int);
58072+ csec = va_arg(ap, int);
58073+ ulong1 = va_arg(ap, unsigned long);
58074+ cred = __task_cred(task);
58075+ pcred = __task_cred(task->real_parent);
58076+
58077+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58078+ }
58079+ break;
58080+ default:
58081+ gr_log_middle(audit, msg, ap);
58082+ }
58083+ va_end(ap);
58084+ // these don't need DEFAULTSECARGS printed on the end
58085+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
58086+ gr_log_end(audit, 0);
58087+ else
58088+ gr_log_end(audit, 1);
58089+ END_LOCKS(audit);
58090+}
58091diff -urNp linux-2.6.32.48/grsecurity/grsec_mem.c linux-2.6.32.48/grsecurity/grsec_mem.c
58092--- linux-2.6.32.48/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
58093+++ linux-2.6.32.48/grsecurity/grsec_mem.c 2011-11-15 19:59:43.000000000 -0500
58094@@ -0,0 +1,33 @@
58095+#include <linux/kernel.h>
58096+#include <linux/sched.h>
58097+#include <linux/mm.h>
58098+#include <linux/mman.h>
58099+#include <linux/grinternal.h>
58100+
58101+void
58102+gr_handle_ioperm(void)
58103+{
58104+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
58105+ return;
58106+}
58107+
58108+void
58109+gr_handle_iopl(void)
58110+{
58111+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
58112+ return;
58113+}
58114+
58115+void
58116+gr_handle_mem_readwrite(u64 from, u64 to)
58117+{
58118+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
58119+ return;
58120+}
58121+
58122+void
58123+gr_handle_vm86(void)
58124+{
58125+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
58126+ return;
58127+}
58128diff -urNp linux-2.6.32.48/grsecurity/grsec_mount.c linux-2.6.32.48/grsecurity/grsec_mount.c
58129--- linux-2.6.32.48/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
58130+++ linux-2.6.32.48/grsecurity/grsec_mount.c 2011-11-15 19:59:43.000000000 -0500
58131@@ -0,0 +1,62 @@
58132+#include <linux/kernel.h>
58133+#include <linux/sched.h>
58134+#include <linux/mount.h>
58135+#include <linux/grsecurity.h>
58136+#include <linux/grinternal.h>
58137+
58138+void
58139+gr_log_remount(const char *devname, const int retval)
58140+{
58141+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58142+ if (grsec_enable_mount && (retval >= 0))
58143+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
58144+#endif
58145+ return;
58146+}
58147+
58148+void
58149+gr_log_unmount(const char *devname, const int retval)
58150+{
58151+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58152+ if (grsec_enable_mount && (retval >= 0))
58153+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
58154+#endif
58155+ return;
58156+}
58157+
58158+void
58159+gr_log_mount(const char *from, const char *to, const int retval)
58160+{
58161+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58162+ if (grsec_enable_mount && (retval >= 0))
58163+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
58164+#endif
58165+ return;
58166+}
58167+
58168+int
58169+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
58170+{
58171+#ifdef CONFIG_GRKERNSEC_ROFS
58172+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
58173+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
58174+ return -EPERM;
58175+ } else
58176+ return 0;
58177+#endif
58178+ return 0;
58179+}
58180+
58181+int
58182+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
58183+{
58184+#ifdef CONFIG_GRKERNSEC_ROFS
58185+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
58186+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
58187+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
58188+ return -EPERM;
58189+ } else
58190+ return 0;
58191+#endif
58192+ return 0;
58193+}
58194diff -urNp linux-2.6.32.48/grsecurity/grsec_pax.c linux-2.6.32.48/grsecurity/grsec_pax.c
58195--- linux-2.6.32.48/grsecurity/grsec_pax.c 1969-12-31 19:00:00.000000000 -0500
58196+++ linux-2.6.32.48/grsecurity/grsec_pax.c 2011-11-15 19:59:43.000000000 -0500
58197@@ -0,0 +1,36 @@
58198+#include <linux/kernel.h>
58199+#include <linux/sched.h>
58200+#include <linux/mm.h>
58201+#include <linux/file.h>
58202+#include <linux/grinternal.h>
58203+#include <linux/grsecurity.h>
58204+
58205+void
58206+gr_log_textrel(struct vm_area_struct * vma)
58207+{
58208+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
58209+ if (grsec_enable_audit_textrel)
58210+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
58211+#endif
58212+ return;
58213+}
58214+
58215+void
58216+gr_log_rwxmmap(struct file *file)
58217+{
58218+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58219+ if (grsec_enable_log_rwxmaps)
58220+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
58221+#endif
58222+ return;
58223+}
58224+
58225+void
58226+gr_log_rwxmprotect(struct file *file)
58227+{
58228+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58229+ if (grsec_enable_log_rwxmaps)
58230+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
58231+#endif
58232+ return;
58233+}
58234diff -urNp linux-2.6.32.48/grsecurity/grsec_ptrace.c linux-2.6.32.48/grsecurity/grsec_ptrace.c
58235--- linux-2.6.32.48/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
58236+++ linux-2.6.32.48/grsecurity/grsec_ptrace.c 2011-11-15 19:59:43.000000000 -0500
58237@@ -0,0 +1,14 @@
58238+#include <linux/kernel.h>
58239+#include <linux/sched.h>
58240+#include <linux/grinternal.h>
58241+#include <linux/grsecurity.h>
58242+
58243+void
58244+gr_audit_ptrace(struct task_struct *task)
58245+{
58246+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
58247+ if (grsec_enable_audit_ptrace)
58248+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
58249+#endif
58250+ return;
58251+}
58252diff -urNp linux-2.6.32.48/grsecurity/grsec_sig.c linux-2.6.32.48/grsecurity/grsec_sig.c
58253--- linux-2.6.32.48/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
58254+++ linux-2.6.32.48/grsecurity/grsec_sig.c 2011-11-15 19:59:43.000000000 -0500
58255@@ -0,0 +1,205 @@
58256+#include <linux/kernel.h>
58257+#include <linux/sched.h>
58258+#include <linux/delay.h>
58259+#include <linux/grsecurity.h>
58260+#include <linux/grinternal.h>
58261+#include <linux/hardirq.h>
58262+
58263+char *signames[] = {
58264+ [SIGSEGV] = "Segmentation fault",
58265+ [SIGILL] = "Illegal instruction",
58266+ [SIGABRT] = "Abort",
58267+ [SIGBUS] = "Invalid alignment/Bus error"
58268+};
58269+
58270+void
58271+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
58272+{
58273+#ifdef CONFIG_GRKERNSEC_SIGNAL
58274+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
58275+ (sig == SIGABRT) || (sig == SIGBUS))) {
58276+ if (t->pid == current->pid) {
58277+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
58278+ } else {
58279+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
58280+ }
58281+ }
58282+#endif
58283+ return;
58284+}
58285+
58286+int
58287+gr_handle_signal(const struct task_struct *p, const int sig)
58288+{
58289+#ifdef CONFIG_GRKERNSEC
58290+ if (current->pid > 1 && gr_check_protected_task(p)) {
58291+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
58292+ return -EPERM;
58293+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
58294+ return -EPERM;
58295+ }
58296+#endif
58297+ return 0;
58298+}
58299+
58300+#ifdef CONFIG_GRKERNSEC
58301+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
58302+
58303+int gr_fake_force_sig(int sig, struct task_struct *t)
58304+{
58305+ unsigned long int flags;
58306+ int ret, blocked, ignored;
58307+ struct k_sigaction *action;
58308+
58309+ spin_lock_irqsave(&t->sighand->siglock, flags);
58310+ action = &t->sighand->action[sig-1];
58311+ ignored = action->sa.sa_handler == SIG_IGN;
58312+ blocked = sigismember(&t->blocked, sig);
58313+ if (blocked || ignored) {
58314+ action->sa.sa_handler = SIG_DFL;
58315+ if (blocked) {
58316+ sigdelset(&t->blocked, sig);
58317+ recalc_sigpending_and_wake(t);
58318+ }
58319+ }
58320+ if (action->sa.sa_handler == SIG_DFL)
58321+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
58322+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
58323+
58324+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
58325+
58326+ return ret;
58327+}
58328+#endif
58329+
58330+#ifdef CONFIG_GRKERNSEC_BRUTE
58331+#define GR_USER_BAN_TIME (15 * 60)
58332+
58333+static int __get_dumpable(unsigned long mm_flags)
58334+{
58335+ int ret;
58336+
58337+ ret = mm_flags & MMF_DUMPABLE_MASK;
58338+ return (ret >= 2) ? 2 : ret;
58339+}
58340+#endif
58341+
58342+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
58343+{
58344+#ifdef CONFIG_GRKERNSEC_BRUTE
58345+ uid_t uid = 0;
58346+
58347+ if (!grsec_enable_brute)
58348+ return;
58349+
58350+ rcu_read_lock();
58351+ read_lock(&tasklist_lock);
58352+ read_lock(&grsec_exec_file_lock);
58353+ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
58354+ p->real_parent->brute = 1;
58355+ else {
58356+ const struct cred *cred = __task_cred(p), *cred2;
58357+ struct task_struct *tsk, *tsk2;
58358+
58359+ if (!__get_dumpable(mm_flags) && cred->uid) {
58360+ struct user_struct *user;
58361+
58362+ uid = cred->uid;
58363+
58364+ /* this is put upon execution past expiration */
58365+ user = find_user(uid);
58366+ if (user == NULL)
58367+ goto unlock;
58368+ user->banned = 1;
58369+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
58370+ if (user->ban_expires == ~0UL)
58371+ user->ban_expires--;
58372+
58373+ do_each_thread(tsk2, tsk) {
58374+ cred2 = __task_cred(tsk);
58375+ if (tsk != p && cred2->uid == uid)
58376+ gr_fake_force_sig(SIGKILL, tsk);
58377+ } while_each_thread(tsk2, tsk);
58378+ }
58379+ }
58380+unlock:
58381+ read_unlock(&grsec_exec_file_lock);
58382+ read_unlock(&tasklist_lock);
58383+ rcu_read_unlock();
58384+
58385+ if (uid)
58386+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
58387+#endif
58388+ return;
58389+}
58390+
58391+void gr_handle_brute_check(void)
58392+{
58393+#ifdef CONFIG_GRKERNSEC_BRUTE
58394+ if (current->brute)
58395+ msleep(30 * 1000);
58396+#endif
58397+ return;
58398+}
58399+
58400+void gr_handle_kernel_exploit(void)
58401+{
58402+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
58403+ const struct cred *cred;
58404+ struct task_struct *tsk, *tsk2;
58405+ struct user_struct *user;
58406+ uid_t uid;
58407+
58408+ if (in_irq() || in_serving_softirq() || in_nmi())
58409+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
58410+
58411+ uid = current_uid();
58412+
58413+ if (uid == 0)
58414+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
58415+ else {
58416+ /* kill all the processes of this user, hold a reference
58417+ to their creds struct, and prevent them from creating
58418+ another process until system reset
58419+ */
58420+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
58421+ /* we intentionally leak this ref */
58422+ user = get_uid(current->cred->user);
58423+ if (user) {
58424+ user->banned = 1;
58425+ user->ban_expires = ~0UL;
58426+ }
58427+
58428+ read_lock(&tasklist_lock);
58429+ do_each_thread(tsk2, tsk) {
58430+ cred = __task_cred(tsk);
58431+ if (cred->uid == uid)
58432+ gr_fake_force_sig(SIGKILL, tsk);
58433+ } while_each_thread(tsk2, tsk);
58434+ read_unlock(&tasklist_lock);
58435+ }
58436+#endif
58437+}
58438+
58439+int __gr_process_user_ban(struct user_struct *user)
58440+{
58441+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
58442+ if (unlikely(user->banned)) {
58443+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
58444+ user->banned = 0;
58445+ user->ban_expires = 0;
58446+ free_uid(user);
58447+ } else
58448+ return -EPERM;
58449+ }
58450+#endif
58451+ return 0;
58452+}
58453+
58454+int gr_process_user_ban(void)
58455+{
58456+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
58457+ return __gr_process_user_ban(current->cred->user);
58458+#endif
58459+ return 0;
58460+}
58461diff -urNp linux-2.6.32.48/grsecurity/grsec_sock.c linux-2.6.32.48/grsecurity/grsec_sock.c
58462--- linux-2.6.32.48/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
58463+++ linux-2.6.32.48/grsecurity/grsec_sock.c 2011-11-15 19:59:43.000000000 -0500
58464@@ -0,0 +1,275 @@
58465+#include <linux/kernel.h>
58466+#include <linux/module.h>
58467+#include <linux/sched.h>
58468+#include <linux/file.h>
58469+#include <linux/net.h>
58470+#include <linux/in.h>
58471+#include <linux/ip.h>
58472+#include <net/sock.h>
58473+#include <net/inet_sock.h>
58474+#include <linux/grsecurity.h>
58475+#include <linux/grinternal.h>
58476+#include <linux/gracl.h>
58477+
58478+kernel_cap_t gr_cap_rtnetlink(struct sock *sock);
58479+EXPORT_SYMBOL(gr_cap_rtnetlink);
58480+
58481+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
58482+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
58483+
58484+EXPORT_SYMBOL(gr_search_udp_recvmsg);
58485+EXPORT_SYMBOL(gr_search_udp_sendmsg);
58486+
58487+#ifdef CONFIG_UNIX_MODULE
58488+EXPORT_SYMBOL(gr_acl_handle_unix);
58489+EXPORT_SYMBOL(gr_acl_handle_mknod);
58490+EXPORT_SYMBOL(gr_handle_chroot_unix);
58491+EXPORT_SYMBOL(gr_handle_create);
58492+#endif
58493+
58494+#ifdef CONFIG_GRKERNSEC
58495+#define gr_conn_table_size 32749
58496+struct conn_table_entry {
58497+ struct conn_table_entry *next;
58498+ struct signal_struct *sig;
58499+};
58500+
58501+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
58502+DEFINE_SPINLOCK(gr_conn_table_lock);
58503+
58504+extern const char * gr_socktype_to_name(unsigned char type);
58505+extern const char * gr_proto_to_name(unsigned char proto);
58506+extern const char * gr_sockfamily_to_name(unsigned char family);
58507+
58508+static __inline__ int
58509+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
58510+{
58511+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
58512+}
58513+
58514+static __inline__ int
58515+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
58516+ __u16 sport, __u16 dport)
58517+{
58518+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
58519+ sig->gr_sport == sport && sig->gr_dport == dport))
58520+ return 1;
58521+ else
58522+ return 0;
58523+}
58524+
58525+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
58526+{
58527+ struct conn_table_entry **match;
58528+ unsigned int index;
58529+
58530+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
58531+ sig->gr_sport, sig->gr_dport,
58532+ gr_conn_table_size);
58533+
58534+ newent->sig = sig;
58535+
58536+ match = &gr_conn_table[index];
58537+ newent->next = *match;
58538+ *match = newent;
58539+
58540+ return;
58541+}
58542+
58543+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
58544+{
58545+ struct conn_table_entry *match, *last = NULL;
58546+ unsigned int index;
58547+
58548+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
58549+ sig->gr_sport, sig->gr_dport,
58550+ gr_conn_table_size);
58551+
58552+ match = gr_conn_table[index];
58553+ while (match && !conn_match(match->sig,
58554+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
58555+ sig->gr_dport)) {
58556+ last = match;
58557+ match = match->next;
58558+ }
58559+
58560+ if (match) {
58561+ if (last)
58562+ last->next = match->next;
58563+ else
58564+ gr_conn_table[index] = NULL;
58565+ kfree(match);
58566+ }
58567+
58568+ return;
58569+}
58570+
58571+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
58572+ __u16 sport, __u16 dport)
58573+{
58574+ struct conn_table_entry *match;
58575+ unsigned int index;
58576+
58577+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
58578+
58579+ match = gr_conn_table[index];
58580+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
58581+ match = match->next;
58582+
58583+ if (match)
58584+ return match->sig;
58585+ else
58586+ return NULL;
58587+}
58588+
58589+#endif
58590+
58591+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
58592+{
58593+#ifdef CONFIG_GRKERNSEC
58594+ struct signal_struct *sig = task->signal;
58595+ struct conn_table_entry *newent;
58596+
58597+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
58598+ if (newent == NULL)
58599+ return;
58600+ /* no bh lock needed since we are called with bh disabled */
58601+ spin_lock(&gr_conn_table_lock);
58602+ gr_del_task_from_ip_table_nolock(sig);
58603+ sig->gr_saddr = inet->rcv_saddr;
58604+ sig->gr_daddr = inet->daddr;
58605+ sig->gr_sport = inet->sport;
58606+ sig->gr_dport = inet->dport;
58607+ gr_add_to_task_ip_table_nolock(sig, newent);
58608+ spin_unlock(&gr_conn_table_lock);
58609+#endif
58610+ return;
58611+}
58612+
58613+void gr_del_task_from_ip_table(struct task_struct *task)
58614+{
58615+#ifdef CONFIG_GRKERNSEC
58616+ spin_lock_bh(&gr_conn_table_lock);
58617+ gr_del_task_from_ip_table_nolock(task->signal);
58618+ spin_unlock_bh(&gr_conn_table_lock);
58619+#endif
58620+ return;
58621+}
58622+
58623+void
58624+gr_attach_curr_ip(const struct sock *sk)
58625+{
58626+#ifdef CONFIG_GRKERNSEC
58627+ struct signal_struct *p, *set;
58628+ const struct inet_sock *inet = inet_sk(sk);
58629+
58630+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
58631+ return;
58632+
58633+ set = current->signal;
58634+
58635+ spin_lock_bh(&gr_conn_table_lock);
58636+ p = gr_lookup_task_ip_table(inet->daddr, inet->rcv_saddr,
58637+ inet->dport, inet->sport);
58638+ if (unlikely(p != NULL)) {
58639+ set->curr_ip = p->curr_ip;
58640+ set->used_accept = 1;
58641+ gr_del_task_from_ip_table_nolock(p);
58642+ spin_unlock_bh(&gr_conn_table_lock);
58643+ return;
58644+ }
58645+ spin_unlock_bh(&gr_conn_table_lock);
58646+
58647+ set->curr_ip = inet->daddr;
58648+ set->used_accept = 1;
58649+#endif
58650+ return;
58651+}
58652+
58653+int
58654+gr_handle_sock_all(const int family, const int type, const int protocol)
58655+{
58656+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
58657+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
58658+ (family != AF_UNIX)) {
58659+ if (family == AF_INET)
58660+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
58661+ else
58662+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
58663+ return -EACCES;
58664+ }
58665+#endif
58666+ return 0;
58667+}
58668+
58669+int
58670+gr_handle_sock_server(const struct sockaddr *sck)
58671+{
58672+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
58673+ if (grsec_enable_socket_server &&
58674+ in_group_p(grsec_socket_server_gid) &&
58675+ sck && (sck->sa_family != AF_UNIX) &&
58676+ (sck->sa_family != AF_LOCAL)) {
58677+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
58678+ return -EACCES;
58679+ }
58680+#endif
58681+ return 0;
58682+}
58683+
58684+int
58685+gr_handle_sock_server_other(const struct sock *sck)
58686+{
58687+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
58688+ if (grsec_enable_socket_server &&
58689+ in_group_p(grsec_socket_server_gid) &&
58690+ sck && (sck->sk_family != AF_UNIX) &&
58691+ (sck->sk_family != AF_LOCAL)) {
58692+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
58693+ return -EACCES;
58694+ }
58695+#endif
58696+ return 0;
58697+}
58698+
58699+int
58700+gr_handle_sock_client(const struct sockaddr *sck)
58701+{
58702+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
58703+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
58704+ sck && (sck->sa_family != AF_UNIX) &&
58705+ (sck->sa_family != AF_LOCAL)) {
58706+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
58707+ return -EACCES;
58708+ }
58709+#endif
58710+ return 0;
58711+}
58712+
58713+kernel_cap_t
58714+gr_cap_rtnetlink(struct sock *sock)
58715+{
58716+#ifdef CONFIG_GRKERNSEC
58717+ if (!gr_acl_is_enabled())
58718+ return current_cap();
58719+ else if (sock->sk_protocol == NETLINK_ISCSI &&
58720+ cap_raised(current_cap(), CAP_SYS_ADMIN) &&
58721+ gr_is_capable(CAP_SYS_ADMIN))
58722+ return current_cap();
58723+ else if (sock->sk_protocol == NETLINK_AUDIT &&
58724+ cap_raised(current_cap(), CAP_AUDIT_WRITE) &&
58725+ gr_is_capable(CAP_AUDIT_WRITE) &&
58726+ cap_raised(current_cap(), CAP_AUDIT_CONTROL) &&
58727+ gr_is_capable(CAP_AUDIT_CONTROL))
58728+ return current_cap();
58729+ else if (cap_raised(current_cap(), CAP_NET_ADMIN) &&
58730+ ((sock->sk_protocol == NETLINK_ROUTE) ?
58731+ gr_is_capable_nolog(CAP_NET_ADMIN) :
58732+ gr_is_capable(CAP_NET_ADMIN)))
58733+ return current_cap();
58734+ else
58735+ return __cap_empty_set;
58736+#else
58737+ return current_cap();
58738+#endif
58739+}
58740diff -urNp linux-2.6.32.48/grsecurity/grsec_sysctl.c linux-2.6.32.48/grsecurity/grsec_sysctl.c
58741--- linux-2.6.32.48/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
58742+++ linux-2.6.32.48/grsecurity/grsec_sysctl.c 2011-11-15 19:59:43.000000000 -0500
58743@@ -0,0 +1,479 @@
58744+#include <linux/kernel.h>
58745+#include <linux/sched.h>
58746+#include <linux/sysctl.h>
58747+#include <linux/grsecurity.h>
58748+#include <linux/grinternal.h>
58749+
58750+int
58751+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
58752+{
58753+#ifdef CONFIG_GRKERNSEC_SYSCTL
58754+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
58755+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
58756+ return -EACCES;
58757+ }
58758+#endif
58759+ return 0;
58760+}
58761+
58762+#ifdef CONFIG_GRKERNSEC_ROFS
58763+static int __maybe_unused one = 1;
58764+#endif
58765+
58766+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
58767+ctl_table grsecurity_table[] = {
58768+#ifdef CONFIG_GRKERNSEC_SYSCTL
58769+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
58770+#ifdef CONFIG_GRKERNSEC_IO
58771+ {
58772+ .ctl_name = CTL_UNNUMBERED,
58773+ .procname = "disable_priv_io",
58774+ .data = &grsec_disable_privio,
58775+ .maxlen = sizeof(int),
58776+ .mode = 0600,
58777+ .proc_handler = &proc_dointvec,
58778+ },
58779+#endif
58780+#endif
58781+#ifdef CONFIG_GRKERNSEC_LINK
58782+ {
58783+ .ctl_name = CTL_UNNUMBERED,
58784+ .procname = "linking_restrictions",
58785+ .data = &grsec_enable_link,
58786+ .maxlen = sizeof(int),
58787+ .mode = 0600,
58788+ .proc_handler = &proc_dointvec,
58789+ },
58790+#endif
58791+#ifdef CONFIG_GRKERNSEC_BRUTE
58792+ {
58793+ .ctl_name = CTL_UNNUMBERED,
58794+ .procname = "deter_bruteforce",
58795+ .data = &grsec_enable_brute,
58796+ .maxlen = sizeof(int),
58797+ .mode = 0600,
58798+ .proc_handler = &proc_dointvec,
58799+ },
58800+#endif
58801+#ifdef CONFIG_GRKERNSEC_FIFO
58802+ {
58803+ .ctl_name = CTL_UNNUMBERED,
58804+ .procname = "fifo_restrictions",
58805+ .data = &grsec_enable_fifo,
58806+ .maxlen = sizeof(int),
58807+ .mode = 0600,
58808+ .proc_handler = &proc_dointvec,
58809+ },
58810+#endif
58811+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
58812+ {
58813+ .ctl_name = CTL_UNNUMBERED,
58814+ .procname = "ip_blackhole",
58815+ .data = &grsec_enable_blackhole,
58816+ .maxlen = sizeof(int),
58817+ .mode = 0600,
58818+ .proc_handler = &proc_dointvec,
58819+ },
58820+ {
58821+ .ctl_name = CTL_UNNUMBERED,
58822+ .procname = "lastack_retries",
58823+ .data = &grsec_lastack_retries,
58824+ .maxlen = sizeof(int),
58825+ .mode = 0600,
58826+ .proc_handler = &proc_dointvec,
58827+ },
58828+#endif
58829+#ifdef CONFIG_GRKERNSEC_EXECLOG
58830+ {
58831+ .ctl_name = CTL_UNNUMBERED,
58832+ .procname = "exec_logging",
58833+ .data = &grsec_enable_execlog,
58834+ .maxlen = sizeof(int),
58835+ .mode = 0600,
58836+ .proc_handler = &proc_dointvec,
58837+ },
58838+#endif
58839+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58840+ {
58841+ .ctl_name = CTL_UNNUMBERED,
58842+ .procname = "rwxmap_logging",
58843+ .data = &grsec_enable_log_rwxmaps,
58844+ .maxlen = sizeof(int),
58845+ .mode = 0600,
58846+ .proc_handler = &proc_dointvec,
58847+ },
58848+#endif
58849+#ifdef CONFIG_GRKERNSEC_SIGNAL
58850+ {
58851+ .ctl_name = CTL_UNNUMBERED,
58852+ .procname = "signal_logging",
58853+ .data = &grsec_enable_signal,
58854+ .maxlen = sizeof(int),
58855+ .mode = 0600,
58856+ .proc_handler = &proc_dointvec,
58857+ },
58858+#endif
58859+#ifdef CONFIG_GRKERNSEC_FORKFAIL
58860+ {
58861+ .ctl_name = CTL_UNNUMBERED,
58862+ .procname = "forkfail_logging",
58863+ .data = &grsec_enable_forkfail,
58864+ .maxlen = sizeof(int),
58865+ .mode = 0600,
58866+ .proc_handler = &proc_dointvec,
58867+ },
58868+#endif
58869+#ifdef CONFIG_GRKERNSEC_TIME
58870+ {
58871+ .ctl_name = CTL_UNNUMBERED,
58872+ .procname = "timechange_logging",
58873+ .data = &grsec_enable_time,
58874+ .maxlen = sizeof(int),
58875+ .mode = 0600,
58876+ .proc_handler = &proc_dointvec,
58877+ },
58878+#endif
58879+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
58880+ {
58881+ .ctl_name = CTL_UNNUMBERED,
58882+ .procname = "chroot_deny_shmat",
58883+ .data = &grsec_enable_chroot_shmat,
58884+ .maxlen = sizeof(int),
58885+ .mode = 0600,
58886+ .proc_handler = &proc_dointvec,
58887+ },
58888+#endif
58889+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
58890+ {
58891+ .ctl_name = CTL_UNNUMBERED,
58892+ .procname = "chroot_deny_unix",
58893+ .data = &grsec_enable_chroot_unix,
58894+ .maxlen = sizeof(int),
58895+ .mode = 0600,
58896+ .proc_handler = &proc_dointvec,
58897+ },
58898+#endif
58899+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
58900+ {
58901+ .ctl_name = CTL_UNNUMBERED,
58902+ .procname = "chroot_deny_mount",
58903+ .data = &grsec_enable_chroot_mount,
58904+ .maxlen = sizeof(int),
58905+ .mode = 0600,
58906+ .proc_handler = &proc_dointvec,
58907+ },
58908+#endif
58909+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
58910+ {
58911+ .ctl_name = CTL_UNNUMBERED,
58912+ .procname = "chroot_deny_fchdir",
58913+ .data = &grsec_enable_chroot_fchdir,
58914+ .maxlen = sizeof(int),
58915+ .mode = 0600,
58916+ .proc_handler = &proc_dointvec,
58917+ },
58918+#endif
58919+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
58920+ {
58921+ .ctl_name = CTL_UNNUMBERED,
58922+ .procname = "chroot_deny_chroot",
58923+ .data = &grsec_enable_chroot_double,
58924+ .maxlen = sizeof(int),
58925+ .mode = 0600,
58926+ .proc_handler = &proc_dointvec,
58927+ },
58928+#endif
58929+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
58930+ {
58931+ .ctl_name = CTL_UNNUMBERED,
58932+ .procname = "chroot_deny_pivot",
58933+ .data = &grsec_enable_chroot_pivot,
58934+ .maxlen = sizeof(int),
58935+ .mode = 0600,
58936+ .proc_handler = &proc_dointvec,
58937+ },
58938+#endif
58939+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
58940+ {
58941+ .ctl_name = CTL_UNNUMBERED,
58942+ .procname = "chroot_enforce_chdir",
58943+ .data = &grsec_enable_chroot_chdir,
58944+ .maxlen = sizeof(int),
58945+ .mode = 0600,
58946+ .proc_handler = &proc_dointvec,
58947+ },
58948+#endif
58949+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
58950+ {
58951+ .ctl_name = CTL_UNNUMBERED,
58952+ .procname = "chroot_deny_chmod",
58953+ .data = &grsec_enable_chroot_chmod,
58954+ .maxlen = sizeof(int),
58955+ .mode = 0600,
58956+ .proc_handler = &proc_dointvec,
58957+ },
58958+#endif
58959+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
58960+ {
58961+ .ctl_name = CTL_UNNUMBERED,
58962+ .procname = "chroot_deny_mknod",
58963+ .data = &grsec_enable_chroot_mknod,
58964+ .maxlen = sizeof(int),
58965+ .mode = 0600,
58966+ .proc_handler = &proc_dointvec,
58967+ },
58968+#endif
58969+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
58970+ {
58971+ .ctl_name = CTL_UNNUMBERED,
58972+ .procname = "chroot_restrict_nice",
58973+ .data = &grsec_enable_chroot_nice,
58974+ .maxlen = sizeof(int),
58975+ .mode = 0600,
58976+ .proc_handler = &proc_dointvec,
58977+ },
58978+#endif
58979+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
58980+ {
58981+ .ctl_name = CTL_UNNUMBERED,
58982+ .procname = "chroot_execlog",
58983+ .data = &grsec_enable_chroot_execlog,
58984+ .maxlen = sizeof(int),
58985+ .mode = 0600,
58986+ .proc_handler = &proc_dointvec,
58987+ },
58988+#endif
58989+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
58990+ {
58991+ .ctl_name = CTL_UNNUMBERED,
58992+ .procname = "chroot_caps",
58993+ .data = &grsec_enable_chroot_caps,
58994+ .maxlen = sizeof(int),
58995+ .mode = 0600,
58996+ .proc_handler = &proc_dointvec,
58997+ },
58998+#endif
58999+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
59000+ {
59001+ .ctl_name = CTL_UNNUMBERED,
59002+ .procname = "chroot_deny_sysctl",
59003+ .data = &grsec_enable_chroot_sysctl,
59004+ .maxlen = sizeof(int),
59005+ .mode = 0600,
59006+ .proc_handler = &proc_dointvec,
59007+ },
59008+#endif
59009+#ifdef CONFIG_GRKERNSEC_TPE
59010+ {
59011+ .ctl_name = CTL_UNNUMBERED,
59012+ .procname = "tpe",
59013+ .data = &grsec_enable_tpe,
59014+ .maxlen = sizeof(int),
59015+ .mode = 0600,
59016+ .proc_handler = &proc_dointvec,
59017+ },
59018+ {
59019+ .ctl_name = CTL_UNNUMBERED,
59020+ .procname = "tpe_gid",
59021+ .data = &grsec_tpe_gid,
59022+ .maxlen = sizeof(int),
59023+ .mode = 0600,
59024+ .proc_handler = &proc_dointvec,
59025+ },
59026+#endif
59027+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
59028+ {
59029+ .ctl_name = CTL_UNNUMBERED,
59030+ .procname = "tpe_invert",
59031+ .data = &grsec_enable_tpe_invert,
59032+ .maxlen = sizeof(int),
59033+ .mode = 0600,
59034+ .proc_handler = &proc_dointvec,
59035+ },
59036+#endif
59037+#ifdef CONFIG_GRKERNSEC_TPE_ALL
59038+ {
59039+ .ctl_name = CTL_UNNUMBERED,
59040+ .procname = "tpe_restrict_all",
59041+ .data = &grsec_enable_tpe_all,
59042+ .maxlen = sizeof(int),
59043+ .mode = 0600,
59044+ .proc_handler = &proc_dointvec,
59045+ },
59046+#endif
59047+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
59048+ {
59049+ .ctl_name = CTL_UNNUMBERED,
59050+ .procname = "socket_all",
59051+ .data = &grsec_enable_socket_all,
59052+ .maxlen = sizeof(int),
59053+ .mode = 0600,
59054+ .proc_handler = &proc_dointvec,
59055+ },
59056+ {
59057+ .ctl_name = CTL_UNNUMBERED,
59058+ .procname = "socket_all_gid",
59059+ .data = &grsec_socket_all_gid,
59060+ .maxlen = sizeof(int),
59061+ .mode = 0600,
59062+ .proc_handler = &proc_dointvec,
59063+ },
59064+#endif
59065+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
59066+ {
59067+ .ctl_name = CTL_UNNUMBERED,
59068+ .procname = "socket_client",
59069+ .data = &grsec_enable_socket_client,
59070+ .maxlen = sizeof(int),
59071+ .mode = 0600,
59072+ .proc_handler = &proc_dointvec,
59073+ },
59074+ {
59075+ .ctl_name = CTL_UNNUMBERED,
59076+ .procname = "socket_client_gid",
59077+ .data = &grsec_socket_client_gid,
59078+ .maxlen = sizeof(int),
59079+ .mode = 0600,
59080+ .proc_handler = &proc_dointvec,
59081+ },
59082+#endif
59083+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
59084+ {
59085+ .ctl_name = CTL_UNNUMBERED,
59086+ .procname = "socket_server",
59087+ .data = &grsec_enable_socket_server,
59088+ .maxlen = sizeof(int),
59089+ .mode = 0600,
59090+ .proc_handler = &proc_dointvec,
59091+ },
59092+ {
59093+ .ctl_name = CTL_UNNUMBERED,
59094+ .procname = "socket_server_gid",
59095+ .data = &grsec_socket_server_gid,
59096+ .maxlen = sizeof(int),
59097+ .mode = 0600,
59098+ .proc_handler = &proc_dointvec,
59099+ },
59100+#endif
59101+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
59102+ {
59103+ .ctl_name = CTL_UNNUMBERED,
59104+ .procname = "audit_group",
59105+ .data = &grsec_enable_group,
59106+ .maxlen = sizeof(int),
59107+ .mode = 0600,
59108+ .proc_handler = &proc_dointvec,
59109+ },
59110+ {
59111+ .ctl_name = CTL_UNNUMBERED,
59112+ .procname = "audit_gid",
59113+ .data = &grsec_audit_gid,
59114+ .maxlen = sizeof(int),
59115+ .mode = 0600,
59116+ .proc_handler = &proc_dointvec,
59117+ },
59118+#endif
59119+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
59120+ {
59121+ .ctl_name = CTL_UNNUMBERED,
59122+ .procname = "audit_chdir",
59123+ .data = &grsec_enable_chdir,
59124+ .maxlen = sizeof(int),
59125+ .mode = 0600,
59126+ .proc_handler = &proc_dointvec,
59127+ },
59128+#endif
59129+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
59130+ {
59131+ .ctl_name = CTL_UNNUMBERED,
59132+ .procname = "audit_mount",
59133+ .data = &grsec_enable_mount,
59134+ .maxlen = sizeof(int),
59135+ .mode = 0600,
59136+ .proc_handler = &proc_dointvec,
59137+ },
59138+#endif
59139+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
59140+ {
59141+ .ctl_name = CTL_UNNUMBERED,
59142+ .procname = "audit_textrel",
59143+ .data = &grsec_enable_audit_textrel,
59144+ .maxlen = sizeof(int),
59145+ .mode = 0600,
59146+ .proc_handler = &proc_dointvec,
59147+ },
59148+#endif
59149+#ifdef CONFIG_GRKERNSEC_DMESG
59150+ {
59151+ .ctl_name = CTL_UNNUMBERED,
59152+ .procname = "dmesg",
59153+ .data = &grsec_enable_dmesg,
59154+ .maxlen = sizeof(int),
59155+ .mode = 0600,
59156+ .proc_handler = &proc_dointvec,
59157+ },
59158+#endif
59159+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59160+ {
59161+ .ctl_name = CTL_UNNUMBERED,
59162+ .procname = "chroot_findtask",
59163+ .data = &grsec_enable_chroot_findtask,
59164+ .maxlen = sizeof(int),
59165+ .mode = 0600,
59166+ .proc_handler = &proc_dointvec,
59167+ },
59168+#endif
59169+#ifdef CONFIG_GRKERNSEC_RESLOG
59170+ {
59171+ .ctl_name = CTL_UNNUMBERED,
59172+ .procname = "resource_logging",
59173+ .data = &grsec_resource_logging,
59174+ .maxlen = sizeof(int),
59175+ .mode = 0600,
59176+ .proc_handler = &proc_dointvec,
59177+ },
59178+#endif
59179+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
59180+ {
59181+ .ctl_name = CTL_UNNUMBERED,
59182+ .procname = "audit_ptrace",
59183+ .data = &grsec_enable_audit_ptrace,
59184+ .maxlen = sizeof(int),
59185+ .mode = 0600,
59186+ .proc_handler = &proc_dointvec,
59187+ },
59188+#endif
59189+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
59190+ {
59191+ .ctl_name = CTL_UNNUMBERED,
59192+ .procname = "harden_ptrace",
59193+ .data = &grsec_enable_harden_ptrace,
59194+ .maxlen = sizeof(int),
59195+ .mode = 0600,
59196+ .proc_handler = &proc_dointvec,
59197+ },
59198+#endif
59199+ {
59200+ .ctl_name = CTL_UNNUMBERED,
59201+ .procname = "grsec_lock",
59202+ .data = &grsec_lock,
59203+ .maxlen = sizeof(int),
59204+ .mode = 0600,
59205+ .proc_handler = &proc_dointvec,
59206+ },
59207+#endif
59208+#ifdef CONFIG_GRKERNSEC_ROFS
59209+ {
59210+ .ctl_name = CTL_UNNUMBERED,
59211+ .procname = "romount_protect",
59212+ .data = &grsec_enable_rofs,
59213+ .maxlen = sizeof(int),
59214+ .mode = 0600,
59215+ .proc_handler = &proc_dointvec_minmax,
59216+ .extra1 = &one,
59217+ .extra2 = &one,
59218+ },
59219+#endif
59220+ { .ctl_name = 0 }
59221+};
59222+#endif
59223diff -urNp linux-2.6.32.48/grsecurity/grsec_time.c linux-2.6.32.48/grsecurity/grsec_time.c
59224--- linux-2.6.32.48/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
59225+++ linux-2.6.32.48/grsecurity/grsec_time.c 2011-11-15 19:59:43.000000000 -0500
59226@@ -0,0 +1,16 @@
59227+#include <linux/kernel.h>
59228+#include <linux/sched.h>
59229+#include <linux/grinternal.h>
59230+#include <linux/module.h>
59231+
59232+void
59233+gr_log_timechange(void)
59234+{
59235+#ifdef CONFIG_GRKERNSEC_TIME
59236+ if (grsec_enable_time)
59237+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
59238+#endif
59239+ return;
59240+}
59241+
59242+EXPORT_SYMBOL(gr_log_timechange);
59243diff -urNp linux-2.6.32.48/grsecurity/grsec_tpe.c linux-2.6.32.48/grsecurity/grsec_tpe.c
59244--- linux-2.6.32.48/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
59245+++ linux-2.6.32.48/grsecurity/grsec_tpe.c 2011-11-15 19:59:43.000000000 -0500
59246@@ -0,0 +1,39 @@
59247+#include <linux/kernel.h>
59248+#include <linux/sched.h>
59249+#include <linux/file.h>
59250+#include <linux/fs.h>
59251+#include <linux/grinternal.h>
59252+
59253+extern int gr_acl_tpe_check(void);
59254+
59255+int
59256+gr_tpe_allow(const struct file *file)
59257+{
59258+#ifdef CONFIG_GRKERNSEC
59259+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
59260+ const struct cred *cred = current_cred();
59261+
59262+ if (cred->uid && ((grsec_enable_tpe &&
59263+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
59264+ ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
59265+ (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
59266+#else
59267+ in_group_p(grsec_tpe_gid)
59268+#endif
59269+ ) || gr_acl_tpe_check()) &&
59270+ (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
59271+ (inode->i_mode & S_IWOTH))))) {
59272+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
59273+ return 0;
59274+ }
59275+#ifdef CONFIG_GRKERNSEC_TPE_ALL
59276+ if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
59277+ ((inode->i_uid && (inode->i_uid != cred->uid)) ||
59278+ (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
59279+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
59280+ return 0;
59281+ }
59282+#endif
59283+#endif
59284+ return 1;
59285+}
59286diff -urNp linux-2.6.32.48/grsecurity/grsum.c linux-2.6.32.48/grsecurity/grsum.c
59287--- linux-2.6.32.48/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
59288+++ linux-2.6.32.48/grsecurity/grsum.c 2011-11-15 19:59:43.000000000 -0500
59289@@ -0,0 +1,61 @@
59290+#include <linux/err.h>
59291+#include <linux/kernel.h>
59292+#include <linux/sched.h>
59293+#include <linux/mm.h>
59294+#include <linux/scatterlist.h>
59295+#include <linux/crypto.h>
59296+#include <linux/gracl.h>
59297+
59298+
59299+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
59300+#error "crypto and sha256 must be built into the kernel"
59301+#endif
59302+
59303+int
59304+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
59305+{
59306+ char *p;
59307+ struct crypto_hash *tfm;
59308+ struct hash_desc desc;
59309+ struct scatterlist sg;
59310+ unsigned char temp_sum[GR_SHA_LEN];
59311+ volatile int retval = 0;
59312+ volatile int dummy = 0;
59313+ unsigned int i;
59314+
59315+ sg_init_table(&sg, 1);
59316+
59317+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
59318+ if (IS_ERR(tfm)) {
59319+ /* should never happen, since sha256 should be built in */
59320+ return 1;
59321+ }
59322+
59323+ desc.tfm = tfm;
59324+ desc.flags = 0;
59325+
59326+ crypto_hash_init(&desc);
59327+
59328+ p = salt;
59329+ sg_set_buf(&sg, p, GR_SALT_LEN);
59330+ crypto_hash_update(&desc, &sg, sg.length);
59331+
59332+ p = entry->pw;
59333+ sg_set_buf(&sg, p, strlen(p));
59334+
59335+ crypto_hash_update(&desc, &sg, sg.length);
59336+
59337+ crypto_hash_final(&desc, temp_sum);
59338+
59339+ memset(entry->pw, 0, GR_PW_LEN);
59340+
59341+ for (i = 0; i < GR_SHA_LEN; i++)
59342+ if (sum[i] != temp_sum[i])
59343+ retval = 1;
59344+ else
59345+ dummy = 1; // waste a cycle
59346+
59347+ crypto_free_hash(tfm);
59348+
59349+ return retval;
59350+}
59351diff -urNp linux-2.6.32.48/grsecurity/Kconfig linux-2.6.32.48/grsecurity/Kconfig
59352--- linux-2.6.32.48/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
59353+++ linux-2.6.32.48/grsecurity/Kconfig 2011-11-15 19:59:43.000000000 -0500
59354@@ -0,0 +1,1036 @@
59355+#
59356+# grecurity configuration
59357+#
59358+
59359+menu "Grsecurity"
59360+
59361+config GRKERNSEC
59362+ bool "Grsecurity"
59363+ select CRYPTO
59364+ select CRYPTO_SHA256
59365+ help
59366+ If you say Y here, you will be able to configure many features
59367+ that will enhance the security of your system. It is highly
59368+ recommended that you say Y here and read through the help
59369+ for each option so that you fully understand the features and
59370+ can evaluate their usefulness for your machine.
59371+
59372+choice
59373+ prompt "Security Level"
59374+ depends on GRKERNSEC
59375+ default GRKERNSEC_CUSTOM
59376+
59377+config GRKERNSEC_LOW
59378+ bool "Low"
59379+ select GRKERNSEC_LINK
59380+ select GRKERNSEC_FIFO
59381+ select GRKERNSEC_RANDNET
59382+ select GRKERNSEC_DMESG
59383+ select GRKERNSEC_CHROOT
59384+ select GRKERNSEC_CHROOT_CHDIR
59385+
59386+ help
59387+ If you choose this option, several of the grsecurity options will
59388+ be enabled that will give you greater protection against a number
59389+ of attacks, while assuring that none of your software will have any
59390+ conflicts with the additional security measures. If you run a lot
59391+ of unusual software, or you are having problems with the higher
59392+ security levels, you should say Y here. With this option, the
59393+ following features are enabled:
59394+
59395+ - Linking restrictions
59396+ - FIFO restrictions
59397+ - Restricted dmesg
59398+ - Enforced chdir("/") on chroot
59399+ - Runtime module disabling
59400+
59401+config GRKERNSEC_MEDIUM
59402+ bool "Medium"
59403+ select PAX
59404+ select PAX_EI_PAX
59405+ select PAX_PT_PAX_FLAGS
59406+ select PAX_HAVE_ACL_FLAGS
59407+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
59408+ select GRKERNSEC_CHROOT
59409+ select GRKERNSEC_CHROOT_SYSCTL
59410+ select GRKERNSEC_LINK
59411+ select GRKERNSEC_FIFO
59412+ select GRKERNSEC_DMESG
59413+ select GRKERNSEC_RANDNET
59414+ select GRKERNSEC_FORKFAIL
59415+ select GRKERNSEC_TIME
59416+ select GRKERNSEC_SIGNAL
59417+ select GRKERNSEC_CHROOT
59418+ select GRKERNSEC_CHROOT_UNIX
59419+ select GRKERNSEC_CHROOT_MOUNT
59420+ select GRKERNSEC_CHROOT_PIVOT
59421+ select GRKERNSEC_CHROOT_DOUBLE
59422+ select GRKERNSEC_CHROOT_CHDIR
59423+ select GRKERNSEC_CHROOT_MKNOD
59424+ select GRKERNSEC_PROC
59425+ select GRKERNSEC_PROC_USERGROUP
59426+ select PAX_RANDUSTACK
59427+ select PAX_ASLR
59428+ select PAX_RANDMMAP
59429+ select PAX_REFCOUNT if (X86 || SPARC64)
59430+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
59431+
59432+ help
59433+ If you say Y here, several features in addition to those included
59434+ in the low additional security level will be enabled. These
59435+ features provide even more security to your system, though in rare
59436+ cases they may be incompatible with very old or poorly written
59437+ software. If you enable this option, make sure that your auth
59438+ service (identd) is running as gid 1001. With this option,
59439+ the following features (in addition to those provided in the
59440+ low additional security level) will be enabled:
59441+
59442+ - Failed fork logging
59443+ - Time change logging
59444+ - Signal logging
59445+ - Deny mounts in chroot
59446+ - Deny double chrooting
59447+ - Deny sysctl writes in chroot
59448+ - Deny mknod in chroot
59449+ - Deny access to abstract AF_UNIX sockets out of chroot
59450+ - Deny pivot_root in chroot
59451+ - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
59452+ - /proc restrictions with special GID set to 10 (usually wheel)
59453+ - Address Space Layout Randomization (ASLR)
59454+ - Prevent exploitation of most refcount overflows
59455+ - Bounds checking of copying between the kernel and userland
59456+
59457+config GRKERNSEC_HIGH
59458+ bool "High"
59459+ select GRKERNSEC_LINK
59460+ select GRKERNSEC_FIFO
59461+ select GRKERNSEC_DMESG
59462+ select GRKERNSEC_FORKFAIL
59463+ select GRKERNSEC_TIME
59464+ select GRKERNSEC_SIGNAL
59465+ select GRKERNSEC_CHROOT
59466+ select GRKERNSEC_CHROOT_SHMAT
59467+ select GRKERNSEC_CHROOT_UNIX
59468+ select GRKERNSEC_CHROOT_MOUNT
59469+ select GRKERNSEC_CHROOT_FCHDIR
59470+ select GRKERNSEC_CHROOT_PIVOT
59471+ select GRKERNSEC_CHROOT_DOUBLE
59472+ select GRKERNSEC_CHROOT_CHDIR
59473+ select GRKERNSEC_CHROOT_MKNOD
59474+ select GRKERNSEC_CHROOT_CAPS
59475+ select GRKERNSEC_CHROOT_SYSCTL
59476+ select GRKERNSEC_CHROOT_FINDTASK
59477+ select GRKERNSEC_SYSFS_RESTRICT
59478+ select GRKERNSEC_PROC
59479+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
59480+ select GRKERNSEC_HIDESYM
59481+ select GRKERNSEC_BRUTE
59482+ select GRKERNSEC_PROC_USERGROUP
59483+ select GRKERNSEC_KMEM
59484+ select GRKERNSEC_RESLOG
59485+ select GRKERNSEC_RANDNET
59486+ select GRKERNSEC_PROC_ADD
59487+ select GRKERNSEC_CHROOT_CHMOD
59488+ select GRKERNSEC_CHROOT_NICE
59489+ select GRKERNSEC_AUDIT_MOUNT
59490+ select GRKERNSEC_MODHARDEN if (MODULES)
59491+ select GRKERNSEC_HARDEN_PTRACE
59492+ select GRKERNSEC_VM86 if (X86_32)
59493+ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
59494+ select PAX
59495+ select PAX_RANDUSTACK
59496+ select PAX_ASLR
59497+ select PAX_RANDMMAP
59498+ select PAX_NOEXEC
59499+ select PAX_MPROTECT
59500+ select PAX_EI_PAX
59501+ select PAX_PT_PAX_FLAGS
59502+ select PAX_HAVE_ACL_FLAGS
59503+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
59504+ select PAX_MEMORY_UDEREF if (X86 && !XEN)
59505+ select PAX_RANDKSTACK if (X86_TSC && X86)
59506+ select PAX_SEGMEXEC if (X86_32)
59507+ select PAX_PAGEEXEC
59508+ select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
59509+ select PAX_EMUTRAMP if (PARISC)
59510+ select PAX_EMUSIGRT if (PARISC)
59511+ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
59512+ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
59513+ select PAX_REFCOUNT if (X86 || SPARC64)
59514+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
59515+ help
59516+ If you say Y here, many of the features of grsecurity will be
59517+ enabled, which will protect you against many kinds of attacks
59518+ against your system. The heightened security comes at a cost
59519+ of an increased chance of incompatibilities with rare software
59520+ on your machine. Since this security level enables PaX, you should
59521+ view <http://pax.grsecurity.net> and read about the PaX
59522+ project. While you are there, download chpax and run it on
59523+ binaries that cause problems with PaX. Also remember that
59524+ since the /proc restrictions are enabled, you must run your
59525+ identd as gid 1001. This security level enables the following
59526+ features in addition to those listed in the low and medium
59527+ security levels:
59528+
59529+ - Additional /proc restrictions
59530+ - Chmod restrictions in chroot
59531+ - No signals, ptrace, or viewing of processes outside of chroot
59532+ - Capability restrictions in chroot
59533+ - Deny fchdir out of chroot
59534+ - Priority restrictions in chroot
59535+ - Segmentation-based implementation of PaX
59536+ - Mprotect restrictions
59537+ - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
59538+ - Kernel stack randomization
59539+ - Mount/unmount/remount logging
59540+ - Kernel symbol hiding
59541+ - Hardening of module auto-loading
59542+ - Ptrace restrictions
59543+ - Restricted vm86 mode
59544+ - Restricted sysfs/debugfs
59545+ - Active kernel exploit response
59546+
59547+config GRKERNSEC_CUSTOM
59548+ bool "Custom"
59549+ help
59550+ If you say Y here, you will be able to configure every grsecurity
59551+ option, which allows you to enable many more features that aren't
59552+ covered in the basic security levels. These additional features
59553+ include TPE, socket restrictions, and the sysctl system for
59554+ grsecurity. It is advised that you read through the help for
59555+ each option to determine its usefulness in your situation.
59556+
59557+endchoice
59558+
59559+menu "Address Space Protection"
59560+depends on GRKERNSEC
59561+
59562+config GRKERNSEC_KMEM
59563+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
59564+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
59565+ help
59566+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
59567+ be written to or read from to modify or leak the contents of the running
59568+ kernel. /dev/port will also not be allowed to be opened. If you have module
59569+ support disabled, enabling this will close up four ways that are
59570+ currently used to insert malicious code into the running kernel.
59571+ Even with all these features enabled, we still highly recommend that
59572+ you use the RBAC system, as it is still possible for an attacker to
59573+ modify the running kernel through privileged I/O granted by ioperm/iopl.
59574+ If you are not using XFree86, you may be able to stop this additional
59575+ case by enabling the 'Disable privileged I/O' option. Though nothing
59576+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
59577+ but only to video memory, which is the only writing we allow in this
59578+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
59579+ not be allowed to mprotect it with PROT_WRITE later.
59580+ It is highly recommended that you say Y here if you meet all the
59581+ conditions above.
59582+
59583+config GRKERNSEC_VM86
59584+ bool "Restrict VM86 mode"
59585+ depends on X86_32
59586+
59587+ help
59588+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
59589+ make use of a special execution mode on 32bit x86 processors called
59590+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
59591+ video cards and will still work with this option enabled. The purpose
59592+ of the option is to prevent exploitation of emulation errors in
59593+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
59594+ Nearly all users should be able to enable this option.
59595+
59596+config GRKERNSEC_IO
59597+ bool "Disable privileged I/O"
59598+ depends on X86
59599+ select RTC_CLASS
59600+ select RTC_INTF_DEV
59601+ select RTC_DRV_CMOS
59602+
59603+ help
59604+ If you say Y here, all ioperm and iopl calls will return an error.
59605+ Ioperm and iopl can be used to modify the running kernel.
59606+ Unfortunately, some programs need this access to operate properly,
59607+ the most notable of which are XFree86 and hwclock. hwclock can be
59608+ remedied by having RTC support in the kernel, so real-time
59609+ clock support is enabled if this option is enabled, to ensure
59610+ that hwclock operates correctly. XFree86 still will not
59611+ operate correctly with this option enabled, so DO NOT CHOOSE Y
59612+ IF YOU USE XFree86. If you use XFree86 and you still want to
59613+ protect your kernel against modification, use the RBAC system.
59614+
59615+config GRKERNSEC_PROC_MEMMAP
59616+ bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
59617+ default y if (PAX_NOEXEC || PAX_ASLR)
59618+ depends on PAX_NOEXEC || PAX_ASLR
59619+ help
59620+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
59621+ give no information about the addresses of its mappings if
59622+ PaX features that rely on random addresses are enabled on the task.
59623+ If you use PaX it is greatly recommended that you say Y here as it
59624+ closes up a hole that makes the full ASLR useless for suid
59625+ binaries.
59626+
59627+config GRKERNSEC_BRUTE
59628+ bool "Deter exploit bruteforcing"
59629+ help
59630+ If you say Y here, attempts to bruteforce exploits against forking
59631+ daemons such as apache or sshd, as well as against suid/sgid binaries
59632+ will be deterred. When a child of a forking daemon is killed by PaX
59633+ or crashes due to an illegal instruction or other suspicious signal,
59634+ the parent process will be delayed 30 seconds upon every subsequent
59635+ fork until the administrator is able to assess the situation and
59636+ restart the daemon.
59637+ In the suid/sgid case, the attempt is logged, the user has all their
59638+ processes terminated, and they are prevented from executing any further
59639+ processes for 15 minutes.
59640+ It is recommended that you also enable signal logging in the auditing
59641+ section so that logs are generated when a process triggers a suspicious
59642+ signal.
59643+ If the sysctl option is enabled, a sysctl option with name
59644+ "deter_bruteforce" is created.
59645+
59646+config GRKERNSEC_MODHARDEN
59647+ bool "Harden module auto-loading"
59648+ depends on MODULES
59649+ help
59650+ If you say Y here, module auto-loading in response to use of some
59651+ feature implemented by an unloaded module will be restricted to
59652+ root users. Enabling this option helps defend against attacks
59653+ by unprivileged users who abuse the auto-loading behavior to
59654+ cause a vulnerable module to load that is then exploited.
59655+
59656+ If this option prevents a legitimate use of auto-loading for a
59657+ non-root user, the administrator can execute modprobe manually
59658+ with the exact name of the module mentioned in the alert log.
59659+ Alternatively, the administrator can add the module to the list
59660+ of modules loaded at boot by modifying init scripts.
59661+
59662+ Modification of init scripts will most likely be needed on
59663+ Ubuntu servers with encrypted home directory support enabled,
59664+ as the first non-root user logging in will cause the ecb(aes),
59665+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
59666+
59667+config GRKERNSEC_HIDESYM
59668+ bool "Hide kernel symbols"
59669+ help
59670+ If you say Y here, getting information on loaded modules, and
59671+ displaying all kernel symbols through a syscall will be restricted
59672+ to users with CAP_SYS_MODULE. For software compatibility reasons,
59673+ /proc/kallsyms will be restricted to the root user. The RBAC
59674+ system can hide that entry even from root.
59675+
59676+ This option also prevents leaking of kernel addresses through
59677+ several /proc entries.
59678+
59679+ Note that this option is only effective provided the following
59680+ conditions are met:
59681+ 1) The kernel using grsecurity is not precompiled by some distribution
59682+ 2) You have also enabled GRKERNSEC_DMESG
59683+ 3) You are using the RBAC system and hiding other files such as your
59684+ kernel image and System.map. Alternatively, enabling this option
59685+ causes the permissions on /boot, /lib/modules, and the kernel
59686+ source directory to change at compile time to prevent
59687+ reading by non-root users.
59688+ If the above conditions are met, this option will aid in providing a
59689+ useful protection against local kernel exploitation of overflows
59690+ and arbitrary read/write vulnerabilities.
59691+
59692+config GRKERNSEC_KERN_LOCKOUT
59693+ bool "Active kernel exploit response"
59694+ depends on X86 || ARM || PPC || SPARC
59695+ help
59696+ If you say Y here, when a PaX alert is triggered due to suspicious
59697+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
59698+ or an OOPs occurs due to bad memory accesses, instead of just
59699+ terminating the offending process (and potentially allowing
59700+ a subsequent exploit from the same user), we will take one of two
59701+ actions:
59702+ If the user was root, we will panic the system
59703+ If the user was non-root, we will log the attempt, terminate
59704+ all processes owned by the user, then prevent them from creating
59705+ any new processes until the system is restarted
59706+ This deters repeated kernel exploitation/bruteforcing attempts
59707+ and is useful for later forensics.
59708+
59709+endmenu
59710+menu "Role Based Access Control Options"
59711+depends on GRKERNSEC
59712+
59713+config GRKERNSEC_RBAC_DEBUG
59714+ bool
59715+
59716+config GRKERNSEC_NO_RBAC
59717+ bool "Disable RBAC system"
59718+ help
59719+ If you say Y here, the /dev/grsec device will be removed from the kernel,
59720+ preventing the RBAC system from being enabled. You should only say Y
59721+ here if you have no intention of using the RBAC system, so as to prevent
59722+ an attacker with root access from misusing the RBAC system to hide files
59723+ and processes when loadable module support and /dev/[k]mem have been
59724+ locked down.
59725+
59726+config GRKERNSEC_ACL_HIDEKERN
59727+ bool "Hide kernel processes"
59728+ help
59729+ If you say Y here, all kernel threads will be hidden to all
59730+ processes but those whose subject has the "view hidden processes"
59731+ flag.
59732+
59733+config GRKERNSEC_ACL_MAXTRIES
59734+ int "Maximum tries before password lockout"
59735+ default 3
59736+ help
59737+ This option enforces the maximum number of times a user can attempt
59738+ to authorize themselves with the grsecurity RBAC system before being
59739+ denied the ability to attempt authorization again for a specified time.
59740+ The lower the number, the harder it will be to brute-force a password.
59741+
59742+config GRKERNSEC_ACL_TIMEOUT
59743+ int "Time to wait after max password tries, in seconds"
59744+ default 30
59745+ help
59746+ This option specifies the time the user must wait after attempting to
59747+ authorize to the RBAC system with the maximum number of invalid
59748+ passwords. The higher the number, the harder it will be to brute-force
59749+ a password.
59750+
59751+endmenu
59752+menu "Filesystem Protections"
59753+depends on GRKERNSEC
59754+
59755+config GRKERNSEC_PROC
59756+ bool "Proc restrictions"
59757+ help
59758+ If you say Y here, the permissions of the /proc filesystem
59759+ will be altered to enhance system security and privacy. You MUST
59760+ choose either a user only restriction or a user and group restriction.
59761+ Depending upon the option you choose, you can either restrict users to
59762+ see only the processes they themselves run, or choose a group that can
59763+ view all processes and files normally restricted to root if you choose
59764+ the "restrict to user only" option. NOTE: If you're running identd as
59765+ a non-root user, you will have to run it as the group you specify here.
59766+
59767+config GRKERNSEC_PROC_USER
59768+ bool "Restrict /proc to user only"
59769+ depends on GRKERNSEC_PROC
59770+ help
59771+ If you say Y here, non-root users will only be able to view their own
59772+ processes, and restricts them from viewing network-related information,
59773+ and viewing kernel symbol and module information.
59774+
59775+config GRKERNSEC_PROC_USERGROUP
59776+ bool "Allow special group"
59777+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
59778+ help
59779+ If you say Y here, you will be able to select a group that will be
59780+ able to view all processes and network-related information. If you've
59781+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
59782+ remain hidden. This option is useful if you want to run identd as
59783+ a non-root user.
59784+
59785+config GRKERNSEC_PROC_GID
59786+ int "GID for special group"
59787+ depends on GRKERNSEC_PROC_USERGROUP
59788+ default 1001
59789+
59790+config GRKERNSEC_PROC_ADD
59791+ bool "Additional restrictions"
59792+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
59793+ help
59794+ If you say Y here, additional restrictions will be placed on
59795+ /proc that keep normal users from viewing device information and
59796+ slabinfo information that could be useful for exploits.
59797+
59798+config GRKERNSEC_LINK
59799+ bool "Linking restrictions"
59800+ help
59801+ If you say Y here, /tmp race exploits will be prevented, since users
59802+ will no longer be able to follow symlinks owned by other users in
59803+ world-writable +t directories (e.g. /tmp), unless the owner of the
59804+ symlink is the owner of the directory. users will also not be
59805+ able to hardlink to files they do not own. If the sysctl option is
59806+ enabled, a sysctl option with name "linking_restrictions" is created.
59807+
59808+config GRKERNSEC_FIFO
59809+ bool "FIFO restrictions"
59810+ help
59811+ If you say Y here, users will not be able to write to FIFOs they don't
59812+ own in world-writable +t directories (e.g. /tmp), unless the owner of
59813+ the FIFO is the same owner of the directory it's held in. If the sysctl
59814+ option is enabled, a sysctl option with name "fifo_restrictions" is
59815+ created.
59816+
59817+config GRKERNSEC_SYSFS_RESTRICT
59818+ bool "Sysfs/debugfs restriction"
59819+ depends on SYSFS
59820+ help
59821+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
59822+ any filesystem normally mounted under it (e.g. debugfs) will only
59823+ be accessible by root. These filesystems generally provide access
59824+ to hardware and debug information that isn't appropriate for unprivileged
59825+ users of the system. Sysfs and debugfs have also become a large source
59826+ of new vulnerabilities, ranging from infoleaks to local compromise.
59827+ There has been very little oversight with an eye toward security involved
59828+ in adding new exporters of information to these filesystems, so their
59829+ use is discouraged.
59830+ This option is equivalent to a chmod 0700 of the mount paths.
59831+
59832+config GRKERNSEC_ROFS
59833+ bool "Runtime read-only mount protection"
59834+ help
59835+ If you say Y here, a sysctl option with name "romount_protect" will
59836+ be created. By setting this option to 1 at runtime, filesystems
59837+ will be protected in the following ways:
59838+ * No new writable mounts will be allowed
59839+ * Existing read-only mounts won't be able to be remounted read/write
59840+ * Write operations will be denied on all block devices
59841+ This option acts independently of grsec_lock: once it is set to 1,
59842+ it cannot be turned off. Therefore, please be mindful of the resulting
59843+ behavior if this option is enabled in an init script on a read-only
59844+ filesystem. This feature is mainly intended for secure embedded systems.
59845+
59846+config GRKERNSEC_CHROOT
59847+ bool "Chroot jail restrictions"
59848+ help
59849+ If you say Y here, you will be able to choose several options that will
59850+ make breaking out of a chrooted jail much more difficult. If you
59851+ encounter no software incompatibilities with the following options, it
59852+ is recommended that you enable each one.
59853+
59854+config GRKERNSEC_CHROOT_MOUNT
59855+ bool "Deny mounts"
59856+ depends on GRKERNSEC_CHROOT
59857+ help
59858+ If you say Y here, processes inside a chroot will not be able to
59859+ mount or remount filesystems. If the sysctl option is enabled, a
59860+ sysctl option with name "chroot_deny_mount" is created.
59861+
59862+config GRKERNSEC_CHROOT_DOUBLE
59863+ bool "Deny double-chroots"
59864+ depends on GRKERNSEC_CHROOT
59865+ help
59866+ If you say Y here, processes inside a chroot will not be able to chroot
59867+ again outside the chroot. This is a widely used method of breaking
59868+ out of a chroot jail and should not be allowed. If the sysctl
59869+ option is enabled, a sysctl option with name
59870+ "chroot_deny_chroot" is created.
59871+
59872+config GRKERNSEC_CHROOT_PIVOT
59873+ bool "Deny pivot_root in chroot"
59874+ depends on GRKERNSEC_CHROOT
59875+ help
59876+ If you say Y here, processes inside a chroot will not be able to use
59877+ a function called pivot_root() that was introduced in Linux 2.3.41. It
59878+ works similar to chroot in that it changes the root filesystem. This
59879+ function could be misused in a chrooted process to attempt to break out
59880+ of the chroot, and therefore should not be allowed. If the sysctl
59881+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
59882+ created.
59883+
59884+config GRKERNSEC_CHROOT_CHDIR
59885+ bool "Enforce chdir(\"/\") on all chroots"
59886+ depends on GRKERNSEC_CHROOT
59887+ help
59888+ If you say Y here, the current working directory of all newly-chrooted
59889+ applications will be set to the the root directory of the chroot.
59890+ The man page on chroot(2) states:
59891+ Note that this call does not change the current working
59892+ directory, so that `.' can be outside the tree rooted at
59893+ `/'. In particular, the super-user can escape from a
59894+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
59895+
59896+ It is recommended that you say Y here, since it's not known to break
59897+ any software. If the sysctl option is enabled, a sysctl option with
59898+ name "chroot_enforce_chdir" is created.
59899+
59900+config GRKERNSEC_CHROOT_CHMOD
59901+ bool "Deny (f)chmod +s"
59902+ depends on GRKERNSEC_CHROOT
59903+ help
59904+ If you say Y here, processes inside a chroot will not be able to chmod
59905+ or fchmod files to make them have suid or sgid bits. This protects
59906+ against another published method of breaking a chroot. If the sysctl
59907+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
59908+ created.
59909+
59910+config GRKERNSEC_CHROOT_FCHDIR
59911+ bool "Deny fchdir out of chroot"
59912+ depends on GRKERNSEC_CHROOT
59913+ help
59914+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
59915+ to a file descriptor of the chrooting process that points to a directory
59916+ outside the filesystem will be stopped. If the sysctl option
59917+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
59918+
59919+config GRKERNSEC_CHROOT_MKNOD
59920+ bool "Deny mknod"
59921+ depends on GRKERNSEC_CHROOT
59922+ help
59923+ If you say Y here, processes inside a chroot will not be allowed to
59924+ mknod. The problem with using mknod inside a chroot is that it
59925+ would allow an attacker to create a device entry that is the same
59926+ as one on the physical root of your system, which could range from
59927+ anything from the console device to a device for your harddrive (which
59928+ they could then use to wipe the drive or steal data). It is recommended
59929+ that you say Y here, unless you run into software incompatibilities.
59930+ If the sysctl option is enabled, a sysctl option with name
59931+ "chroot_deny_mknod" is created.
59932+
59933+config GRKERNSEC_CHROOT_SHMAT
59934+ bool "Deny shmat() out of chroot"
59935+ depends on GRKERNSEC_CHROOT
59936+ help
59937+ If you say Y here, processes inside a chroot will not be able to attach
59938+ to shared memory segments that were created outside of the chroot jail.
59939+ It is recommended that you say Y here. If the sysctl option is enabled,
59940+ a sysctl option with name "chroot_deny_shmat" is created.
59941+
59942+config GRKERNSEC_CHROOT_UNIX
59943+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
59944+ depends on GRKERNSEC_CHROOT
59945+ help
59946+ If you say Y here, processes inside a chroot will not be able to
59947+ connect to abstract (meaning not belonging to a filesystem) Unix
59948+ domain sockets that were bound outside of a chroot. It is recommended
59949+ that you say Y here. If the sysctl option is enabled, a sysctl option
59950+ with name "chroot_deny_unix" is created.
59951+
59952+config GRKERNSEC_CHROOT_FINDTASK
59953+ bool "Protect outside processes"
59954+ depends on GRKERNSEC_CHROOT
59955+ help
59956+ If you say Y here, processes inside a chroot will not be able to
59957+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
59958+ getsid, or view any process outside of the chroot. If the sysctl
59959+ option is enabled, a sysctl option with name "chroot_findtask" is
59960+ created.
59961+
59962+config GRKERNSEC_CHROOT_NICE
59963+ bool "Restrict priority changes"
59964+ depends on GRKERNSEC_CHROOT
59965+ help
59966+ If you say Y here, processes inside a chroot will not be able to raise
59967+ the priority of processes in the chroot, or alter the priority of
59968+ processes outside the chroot. This provides more security than simply
59969+ removing CAP_SYS_NICE from the process' capability set. If the
59970+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
59971+ is created.
59972+
59973+config GRKERNSEC_CHROOT_SYSCTL
59974+ bool "Deny sysctl writes"
59975+ depends on GRKERNSEC_CHROOT
59976+ help
59977+ If you say Y here, an attacker in a chroot will not be able to
59978+ write to sysctl entries, either by sysctl(2) or through a /proc
59979+ interface. It is strongly recommended that you say Y here. If the
59980+ sysctl option is enabled, a sysctl option with name
59981+ "chroot_deny_sysctl" is created.
59982+
59983+config GRKERNSEC_CHROOT_CAPS
59984+ bool "Capability restrictions"
59985+ depends on GRKERNSEC_CHROOT
59986+ help
59987+ If you say Y here, the capabilities on all processes within a
59988+ chroot jail will be lowered to stop module insertion, raw i/o,
59989+ system and net admin tasks, rebooting the system, modifying immutable
59990+ files, modifying IPC owned by another, and changing the system time.
59991+ This is left an option because it can break some apps. Disable this
59992+ if your chrooted apps are having problems performing those kinds of
59993+ tasks. If the sysctl option is enabled, a sysctl option with
59994+ name "chroot_caps" is created.
59995+
59996+endmenu
59997+menu "Kernel Auditing"
59998+depends on GRKERNSEC
59999+
60000+config GRKERNSEC_AUDIT_GROUP
60001+ bool "Single group for auditing"
60002+ help
60003+ If you say Y here, the exec, chdir, and (un)mount logging features
60004+ will only operate on a group you specify. This option is recommended
60005+ if you only want to watch certain users instead of having a large
60006+ amount of logs from the entire system. If the sysctl option is enabled,
60007+ a sysctl option with name "audit_group" is created.
60008+
60009+config GRKERNSEC_AUDIT_GID
60010+ int "GID for auditing"
60011+ depends on GRKERNSEC_AUDIT_GROUP
60012+ default 1007
60013+
60014+config GRKERNSEC_EXECLOG
60015+ bool "Exec logging"
60016+ help
60017+ If you say Y here, all execve() calls will be logged (since the
60018+ other exec*() calls are frontends to execve(), all execution
60019+ will be logged). Useful for shell-servers that like to keep track
60020+ of their users. If the sysctl option is enabled, a sysctl option with
60021+ name "exec_logging" is created.
60022+ WARNING: This option when enabled will produce a LOT of logs, especially
60023+ on an active system.
60024+
60025+config GRKERNSEC_RESLOG
60026+ bool "Resource logging"
60027+ help
60028+ If you say Y here, all attempts to overstep resource limits will
60029+ be logged with the resource name, the requested size, and the current
60030+ limit. It is highly recommended that you say Y here. If the sysctl
60031+ option is enabled, a sysctl option with name "resource_logging" is
60032+ created. If the RBAC system is enabled, the sysctl value is ignored.
60033+
60034+config GRKERNSEC_CHROOT_EXECLOG
60035+ bool "Log execs within chroot"
60036+ help
60037+ If you say Y here, all executions inside a chroot jail will be logged
60038+ to syslog. This can cause a large amount of logs if certain
60039+ applications (eg. djb's daemontools) are installed on the system, and
60040+ is therefore left as an option. If the sysctl option is enabled, a
60041+ sysctl option with name "chroot_execlog" is created.
60042+
60043+config GRKERNSEC_AUDIT_PTRACE
60044+ bool "Ptrace logging"
60045+ help
60046+ If you say Y here, all attempts to attach to a process via ptrace
60047+ will be logged. If the sysctl option is enabled, a sysctl option
60048+ with name "audit_ptrace" is created.
60049+
60050+config GRKERNSEC_AUDIT_CHDIR
60051+ bool "Chdir logging"
60052+ help
60053+ If you say Y here, all chdir() calls will be logged. If the sysctl
60054+ option is enabled, a sysctl option with name "audit_chdir" is created.
60055+
60056+config GRKERNSEC_AUDIT_MOUNT
60057+ bool "(Un)Mount logging"
60058+ help
60059+ If you say Y here, all mounts and unmounts will be logged. If the
60060+ sysctl option is enabled, a sysctl option with name "audit_mount" is
60061+ created.
60062+
60063+config GRKERNSEC_SIGNAL
60064+ bool "Signal logging"
60065+ help
60066+ If you say Y here, certain important signals will be logged, such as
60067+ SIGSEGV, which will as a result inform you of when a error in a program
60068+ occurred, which in some cases could mean a possible exploit attempt.
60069+ If the sysctl option is enabled, a sysctl option with name
60070+ "signal_logging" is created.
60071+
60072+config GRKERNSEC_FORKFAIL
60073+ bool "Fork failure logging"
60074+ help
60075+ If you say Y here, all failed fork() attempts will be logged.
60076+ This could suggest a fork bomb, or someone attempting to overstep
60077+ their process limit. If the sysctl option is enabled, a sysctl option
60078+ with name "forkfail_logging" is created.
60079+
60080+config GRKERNSEC_TIME
60081+ bool "Time change logging"
60082+ help
60083+ If you say Y here, any changes of the system clock will be logged.
60084+ If the sysctl option is enabled, a sysctl option with name
60085+ "timechange_logging" is created.
60086+
60087+config GRKERNSEC_PROC_IPADDR
60088+ bool "/proc/<pid>/ipaddr support"
60089+ help
60090+ If you say Y here, a new entry will be added to each /proc/<pid>
60091+ directory that contains the IP address of the person using the task.
60092+ The IP is carried across local TCP and AF_UNIX stream sockets.
60093+ This information can be useful for IDS/IPSes to perform remote response
60094+ to a local attack. The entry is readable by only the owner of the
60095+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
60096+ the RBAC system), and thus does not create privacy concerns.
60097+
60098+config GRKERNSEC_RWXMAP_LOG
60099+ bool 'Denied RWX mmap/mprotect logging'
60100+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
60101+ help
60102+ If you say Y here, calls to mmap() and mprotect() with explicit
60103+ usage of PROT_WRITE and PROT_EXEC together will be logged when
60104+ denied by the PAX_MPROTECT feature. If the sysctl option is
60105+ enabled, a sysctl option with name "rwxmap_logging" is created.
60106+
60107+config GRKERNSEC_AUDIT_TEXTREL
60108+ bool 'ELF text relocations logging (READ HELP)'
60109+ depends on PAX_MPROTECT
60110+ help
60111+ If you say Y here, text relocations will be logged with the filename
60112+ of the offending library or binary. The purpose of the feature is
60113+ to help Linux distribution developers get rid of libraries and
60114+ binaries that need text relocations which hinder the future progress
60115+ of PaX. Only Linux distribution developers should say Y here, and
60116+ never on a production machine, as this option creates an information
60117+ leak that could aid an attacker in defeating the randomization of
60118+ a single memory region. If the sysctl option is enabled, a sysctl
60119+ option with name "audit_textrel" is created.
60120+
60121+endmenu
60122+
60123+menu "Executable Protections"
60124+depends on GRKERNSEC
60125+
60126+config GRKERNSEC_DMESG
60127+ bool "Dmesg(8) restriction"
60128+ help
60129+ If you say Y here, non-root users will not be able to use dmesg(8)
60130+ to view up to the last 4kb of messages in the kernel's log buffer.
60131+ The kernel's log buffer often contains kernel addresses and other
60132+ identifying information useful to an attacker in fingerprinting a
60133+ system for a targeted exploit.
60134+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
60135+ created.
60136+
60137+config GRKERNSEC_HARDEN_PTRACE
60138+ bool "Deter ptrace-based process snooping"
60139+ help
60140+ If you say Y here, TTY sniffers and other malicious monitoring
60141+ programs implemented through ptrace will be defeated. If you
60142+ have been using the RBAC system, this option has already been
60143+ enabled for several years for all users, with the ability to make
60144+ fine-grained exceptions.
60145+
60146+ This option only affects the ability of non-root users to ptrace
60147+ processes that are not a descendent of the ptracing process.
60148+ This means that strace ./binary and gdb ./binary will still work,
60149+ but attaching to arbitrary processes will not. If the sysctl
60150+ option is enabled, a sysctl option with name "harden_ptrace" is
60151+ created.
60152+
60153+config GRKERNSEC_TPE
60154+ bool "Trusted Path Execution (TPE)"
60155+ help
60156+ If you say Y here, you will be able to choose a gid to add to the
60157+ supplementary groups of users you want to mark as "untrusted."
60158+ These users will not be able to execute any files that are not in
60159+ root-owned directories writable only by root. If the sysctl option
60160+ is enabled, a sysctl option with name "tpe" is created.
60161+
60162+config GRKERNSEC_TPE_ALL
60163+ bool "Partially restrict all non-root users"
60164+ depends on GRKERNSEC_TPE
60165+ help
60166+ If you say Y here, all non-root users will be covered under
60167+ a weaker TPE restriction. This is separate from, and in addition to,
60168+ the main TPE options that you have selected elsewhere. Thus, if a
60169+ "trusted" GID is chosen, this restriction applies to even that GID.
60170+ Under this restriction, all non-root users will only be allowed to
60171+ execute files in directories they own that are not group or
60172+ world-writable, or in directories owned by root and writable only by
60173+ root. If the sysctl option is enabled, a sysctl option with name
60174+ "tpe_restrict_all" is created.
60175+
60176+config GRKERNSEC_TPE_INVERT
60177+ bool "Invert GID option"
60178+ depends on GRKERNSEC_TPE
60179+ help
60180+ If you say Y here, the group you specify in the TPE configuration will
60181+ decide what group TPE restrictions will be *disabled* for. This
60182+ option is useful if you want TPE restrictions to be applied to most
60183+ users on the system. If the sysctl option is enabled, a sysctl option
60184+ with name "tpe_invert" is created. Unlike other sysctl options, this
60185+ entry will default to on for backward-compatibility.
60186+
60187+config GRKERNSEC_TPE_GID
60188+ int "GID for untrusted users"
60189+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
60190+ default 1005
60191+ help
60192+ Setting this GID determines what group TPE restrictions will be
60193+ *enabled* for. If the sysctl option is enabled, a sysctl option
60194+ with name "tpe_gid" is created.
60195+
60196+config GRKERNSEC_TPE_GID
60197+ int "GID for trusted users"
60198+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
60199+ default 1005
60200+ help
60201+ Setting this GID determines what group TPE restrictions will be
60202+ *disabled* for. If the sysctl option is enabled, a sysctl option
60203+ with name "tpe_gid" is created.
60204+
60205+endmenu
60206+menu "Network Protections"
60207+depends on GRKERNSEC
60208+
60209+config GRKERNSEC_RANDNET
60210+ bool "Larger entropy pools"
60211+ help
60212+ If you say Y here, the entropy pools used for many features of Linux
60213+ and grsecurity will be doubled in size. Since several grsecurity
60214+ features use additional randomness, it is recommended that you say Y
60215+ here. Saying Y here has a similar effect as modifying
60216+ /proc/sys/kernel/random/poolsize.
60217+
60218+config GRKERNSEC_BLACKHOLE
60219+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
60220+ depends on NET
60221+ help
60222+ If you say Y here, neither TCP resets nor ICMP
60223+ destination-unreachable packets will be sent in response to packets
60224+ sent to ports for which no associated listening process exists.
60225+ This feature supports both IPV4 and IPV6 and exempts the
60226+ loopback interface from blackholing. Enabling this feature
60227+ makes a host more resilient to DoS attacks and reduces network
60228+ visibility against scanners.
60229+
60230+ The blackhole feature as-implemented is equivalent to the FreeBSD
60231+ blackhole feature, as it prevents RST responses to all packets, not
60232+ just SYNs. Under most application behavior this causes no
60233+ problems, but applications (like haproxy) may not close certain
60234+ connections in a way that cleanly terminates them on the remote
60235+ end, leaving the remote host in LAST_ACK state. Because of this
60236+ side-effect and to prevent intentional LAST_ACK DoSes, this
60237+ feature also adds automatic mitigation against such attacks.
60238+ The mitigation drastically reduces the amount of time a socket
60239+ can spend in LAST_ACK state. If you're using haproxy and not
60240+ all servers it connects to have this option enabled, consider
60241+ disabling this feature on the haproxy host.
60242+
60243+ If the sysctl option is enabled, two sysctl options with names
60244+ "ip_blackhole" and "lastack_retries" will be created.
60245+ While "ip_blackhole" takes the standard zero/non-zero on/off
60246+ toggle, "lastack_retries" uses the same kinds of values as
60247+ "tcp_retries1" and "tcp_retries2". The default value of 4
60248+ prevents a socket from lasting more than 45 seconds in LAST_ACK
60249+ state.
60250+
60251+config GRKERNSEC_SOCKET
60252+ bool "Socket restrictions"
60253+ depends on NET
60254+ help
60255+ If you say Y here, you will be able to choose from several options.
60256+ If you assign a GID on your system and add it to the supplementary
60257+ groups of users you want to restrict socket access to, this patch
60258+ will perform up to three things, based on the option(s) you choose.
60259+
60260+config GRKERNSEC_SOCKET_ALL
60261+ bool "Deny any sockets to group"
60262+ depends on GRKERNSEC_SOCKET
60263+ help
60264+ If you say Y here, you will be able to choose a GID of whose users will
60265+ be unable to connect to other hosts from your machine or run server
60266+ applications from your machine. If the sysctl option is enabled, a
60267+ sysctl option with name "socket_all" is created.
60268+
60269+config GRKERNSEC_SOCKET_ALL_GID
60270+ int "GID to deny all sockets for"
60271+ depends on GRKERNSEC_SOCKET_ALL
60272+ default 1004
60273+ help
60274+ Here you can choose the GID to disable socket access for. Remember to
60275+ add the users you want socket access disabled for to the GID
60276+ specified here. If the sysctl option is enabled, a sysctl option
60277+ with name "socket_all_gid" is created.
60278+
60279+config GRKERNSEC_SOCKET_CLIENT
60280+ bool "Deny client sockets to group"
60281+ depends on GRKERNSEC_SOCKET
60282+ help
60283+ If you say Y here, you will be able to choose a GID of whose users will
60284+ be unable to connect to other hosts from your machine, but will be
60285+ able to run servers. If this option is enabled, all users in the group
60286+ you specify will have to use passive mode when initiating ftp transfers
60287+ from the shell on your machine. If the sysctl option is enabled, a
60288+ sysctl option with name "socket_client" is created.
60289+
60290+config GRKERNSEC_SOCKET_CLIENT_GID
60291+ int "GID to deny client sockets for"
60292+ depends on GRKERNSEC_SOCKET_CLIENT
60293+ default 1003
60294+ help
60295+ Here you can choose the GID to disable client socket access for.
60296+ Remember to add the users you want client socket access disabled for to
60297+ the GID specified here. If the sysctl option is enabled, a sysctl
60298+ option with name "socket_client_gid" is created.
60299+
60300+config GRKERNSEC_SOCKET_SERVER
60301+ bool "Deny server sockets to group"
60302+ depends on GRKERNSEC_SOCKET
60303+ help
60304+ If you say Y here, you will be able to choose a GID of whose users will
60305+ be unable to run server applications from your machine. If the sysctl
60306+ option is enabled, a sysctl option with name "socket_server" is created.
60307+
60308+config GRKERNSEC_SOCKET_SERVER_GID
60309+ int "GID to deny server sockets for"
60310+ depends on GRKERNSEC_SOCKET_SERVER
60311+ default 1002
60312+ help
60313+ Here you can choose the GID to disable server socket access for.
60314+ Remember to add the users you want server socket access disabled for to
60315+ the GID specified here. If the sysctl option is enabled, a sysctl
60316+ option with name "socket_server_gid" is created.
60317+
60318+endmenu
60319+menu "Sysctl support"
60320+depends on GRKERNSEC && SYSCTL
60321+
60322+config GRKERNSEC_SYSCTL
60323+ bool "Sysctl support"
60324+ help
60325+ If you say Y here, you will be able to change the options that
60326+ grsecurity runs with at bootup, without having to recompile your
60327+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
60328+ to enable (1) or disable (0) various features. All the sysctl entries
60329+ are mutable until the "grsec_lock" entry is set to a non-zero value.
60330+ All features enabled in the kernel configuration are disabled at boot
60331+ if you do not say Y to the "Turn on features by default" option.
60332+ All options should be set at startup, and the grsec_lock entry should
60333+ be set to a non-zero value after all the options are set.
60334+ *THIS IS EXTREMELY IMPORTANT*
60335+
60336+config GRKERNSEC_SYSCTL_DISTRO
60337+ bool "Extra sysctl support for distro makers (READ HELP)"
60338+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
60339+ help
60340+ If you say Y here, additional sysctl options will be created
60341+ for features that affect processes running as root. Therefore,
60342+ it is critical when using this option that the grsec_lock entry be
60343+ enabled after boot. Only distros with prebuilt kernel packages
60344+ with this option enabled that can ensure grsec_lock is enabled
60345+ after boot should use this option.
60346+ *Failure to set grsec_lock after boot makes all grsec features
60347+ this option covers useless*
60348+
60349+ Currently this option creates the following sysctl entries:
60350+ "Disable Privileged I/O": "disable_priv_io"
60351+
60352+config GRKERNSEC_SYSCTL_ON
60353+ bool "Turn on features by default"
60354+ depends on GRKERNSEC_SYSCTL
60355+ help
60356+ If you say Y here, instead of having all features enabled in the
60357+ kernel configuration disabled at boot time, the features will be
60358+ enabled at boot time. It is recommended you say Y here unless
60359+ there is some reason you would want all sysctl-tunable features to
60360+ be disabled by default. As mentioned elsewhere, it is important
60361+ to enable the grsec_lock entry once you have finished modifying
60362+ the sysctl entries.
60363+
60364+endmenu
60365+menu "Logging Options"
60366+depends on GRKERNSEC
60367+
60368+config GRKERNSEC_FLOODTIME
60369+ int "Seconds in between log messages (minimum)"
60370+ default 10
60371+ help
60372+ This option allows you to enforce the number of seconds between
60373+ grsecurity log messages. The default should be suitable for most
60374+ people, however, if you choose to change it, choose a value small enough
60375+ to allow informative logs to be produced, but large enough to
60376+ prevent flooding.
60377+
60378+config GRKERNSEC_FLOODBURST
60379+ int "Number of messages in a burst (maximum)"
60380+ default 6
60381+ help
60382+ This option allows you to choose the maximum number of messages allowed
60383+ within the flood time interval you chose in a separate option. The
60384+ default should be suitable for most people, however if you find that
60385+ many of your logs are being interpreted as flooding, you may want to
60386+ raise this value.
60387+
60388+endmenu
60389+
60390+endmenu
60391diff -urNp linux-2.6.32.48/grsecurity/Makefile linux-2.6.32.48/grsecurity/Makefile
60392--- linux-2.6.32.48/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
60393+++ linux-2.6.32.48/grsecurity/Makefile 2011-11-15 19:59:43.000000000 -0500
60394@@ -0,0 +1,36 @@
60395+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
60396+# during 2001-2009 it has been completely redesigned by Brad Spengler
60397+# into an RBAC system
60398+#
60399+# All code in this directory and various hooks inserted throughout the kernel
60400+# are copyright Brad Spengler - Open Source Security, Inc., and released
60401+# under the GPL v2 or higher
60402+
60403+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
60404+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
60405+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
60406+
60407+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
60408+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
60409+ gracl_learn.o grsec_log.o
60410+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
60411+
60412+ifdef CONFIG_NET
60413+obj-y += grsec_sock.o
60414+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
60415+endif
60416+
60417+ifndef CONFIG_GRKERNSEC
60418+obj-y += grsec_disabled.o
60419+endif
60420+
60421+ifdef CONFIG_GRKERNSEC_HIDESYM
60422+extra-y := grsec_hidesym.o
60423+$(obj)/grsec_hidesym.o:
60424+ @-chmod -f 500 /boot
60425+ @-chmod -f 500 /lib/modules
60426+ @-chmod -f 500 /lib64/modules
60427+ @-chmod -f 500 /lib32/modules
60428+ @-chmod -f 700 .
60429+ @echo ' grsec: protected kernel image paths'
60430+endif
60431diff -urNp linux-2.6.32.48/include/acpi/acpi_bus.h linux-2.6.32.48/include/acpi/acpi_bus.h
60432--- linux-2.6.32.48/include/acpi/acpi_bus.h 2011-11-08 19:02:43.000000000 -0500
60433+++ linux-2.6.32.48/include/acpi/acpi_bus.h 2011-11-15 19:59:43.000000000 -0500
60434@@ -107,7 +107,7 @@ struct acpi_device_ops {
60435 acpi_op_bind bind;
60436 acpi_op_unbind unbind;
60437 acpi_op_notify notify;
60438-};
60439+} __no_const;
60440
60441 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
60442
60443diff -urNp linux-2.6.32.48/include/acpi/acpi_drivers.h linux-2.6.32.48/include/acpi/acpi_drivers.h
60444--- linux-2.6.32.48/include/acpi/acpi_drivers.h 2011-11-08 19:02:43.000000000 -0500
60445+++ linux-2.6.32.48/include/acpi/acpi_drivers.h 2011-11-15 19:59:43.000000000 -0500
60446@@ -119,8 +119,8 @@ int acpi_processor_set_thermal_limit(acp
60447 Dock Station
60448 -------------------------------------------------------------------------- */
60449 struct acpi_dock_ops {
60450- acpi_notify_handler handler;
60451- acpi_notify_handler uevent;
60452+ const acpi_notify_handler handler;
60453+ const acpi_notify_handler uevent;
60454 };
60455
60456 #if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE)
60457@@ -128,7 +128,7 @@ extern int is_dock_device(acpi_handle ha
60458 extern int register_dock_notifier(struct notifier_block *nb);
60459 extern void unregister_dock_notifier(struct notifier_block *nb);
60460 extern int register_hotplug_dock_device(acpi_handle handle,
60461- struct acpi_dock_ops *ops,
60462+ const struct acpi_dock_ops *ops,
60463 void *context);
60464 extern void unregister_hotplug_dock_device(acpi_handle handle);
60465 #else
60466@@ -144,7 +144,7 @@ static inline void unregister_dock_notif
60467 {
60468 }
60469 static inline int register_hotplug_dock_device(acpi_handle handle,
60470- struct acpi_dock_ops *ops,
60471+ const struct acpi_dock_ops *ops,
60472 void *context)
60473 {
60474 return -ENODEV;
60475diff -urNp linux-2.6.32.48/include/asm-generic/atomic-long.h linux-2.6.32.48/include/asm-generic/atomic-long.h
60476--- linux-2.6.32.48/include/asm-generic/atomic-long.h 2011-11-08 19:02:43.000000000 -0500
60477+++ linux-2.6.32.48/include/asm-generic/atomic-long.h 2011-11-15 19:59:43.000000000 -0500
60478@@ -22,6 +22,12 @@
60479
60480 typedef atomic64_t atomic_long_t;
60481
60482+#ifdef CONFIG_PAX_REFCOUNT
60483+typedef atomic64_unchecked_t atomic_long_unchecked_t;
60484+#else
60485+typedef atomic64_t atomic_long_unchecked_t;
60486+#endif
60487+
60488 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
60489
60490 static inline long atomic_long_read(atomic_long_t *l)
60491@@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
60492 return (long)atomic64_read(v);
60493 }
60494
60495+#ifdef CONFIG_PAX_REFCOUNT
60496+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
60497+{
60498+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
60499+
60500+ return (long)atomic64_read_unchecked(v);
60501+}
60502+#endif
60503+
60504 static inline void atomic_long_set(atomic_long_t *l, long i)
60505 {
60506 atomic64_t *v = (atomic64_t *)l;
60507@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
60508 atomic64_set(v, i);
60509 }
60510
60511+#ifdef CONFIG_PAX_REFCOUNT
60512+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
60513+{
60514+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
60515+
60516+ atomic64_set_unchecked(v, i);
60517+}
60518+#endif
60519+
60520 static inline void atomic_long_inc(atomic_long_t *l)
60521 {
60522 atomic64_t *v = (atomic64_t *)l;
60523@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
60524 atomic64_inc(v);
60525 }
60526
60527+#ifdef CONFIG_PAX_REFCOUNT
60528+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
60529+{
60530+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
60531+
60532+ atomic64_inc_unchecked(v);
60533+}
60534+#endif
60535+
60536 static inline void atomic_long_dec(atomic_long_t *l)
60537 {
60538 atomic64_t *v = (atomic64_t *)l;
60539@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
60540 atomic64_dec(v);
60541 }
60542
60543+#ifdef CONFIG_PAX_REFCOUNT
60544+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
60545+{
60546+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
60547+
60548+ atomic64_dec_unchecked(v);
60549+}
60550+#endif
60551+
60552 static inline void atomic_long_add(long i, atomic_long_t *l)
60553 {
60554 atomic64_t *v = (atomic64_t *)l;
60555@@ -59,6 +101,15 @@ static inline void atomic_long_add(long
60556 atomic64_add(i, v);
60557 }
60558
60559+#ifdef CONFIG_PAX_REFCOUNT
60560+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
60561+{
60562+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
60563+
60564+ atomic64_add_unchecked(i, v);
60565+}
60566+#endif
60567+
60568 static inline void atomic_long_sub(long i, atomic_long_t *l)
60569 {
60570 atomic64_t *v = (atomic64_t *)l;
60571@@ -115,6 +166,15 @@ static inline long atomic_long_inc_retur
60572 return (long)atomic64_inc_return(v);
60573 }
60574
60575+#ifdef CONFIG_PAX_REFCOUNT
60576+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
60577+{
60578+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
60579+
60580+ return (long)atomic64_inc_return_unchecked(v);
60581+}
60582+#endif
60583+
60584 static inline long atomic_long_dec_return(atomic_long_t *l)
60585 {
60586 atomic64_t *v = (atomic64_t *)l;
60587@@ -140,6 +200,12 @@ static inline long atomic_long_add_unles
60588
60589 typedef atomic_t atomic_long_t;
60590
60591+#ifdef CONFIG_PAX_REFCOUNT
60592+typedef atomic_unchecked_t atomic_long_unchecked_t;
60593+#else
60594+typedef atomic_t atomic_long_unchecked_t;
60595+#endif
60596+
60597 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
60598 static inline long atomic_long_read(atomic_long_t *l)
60599 {
60600@@ -148,6 +214,15 @@ static inline long atomic_long_read(atom
60601 return (long)atomic_read(v);
60602 }
60603
60604+#ifdef CONFIG_PAX_REFCOUNT
60605+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
60606+{
60607+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
60608+
60609+ return (long)atomic_read_unchecked(v);
60610+}
60611+#endif
60612+
60613 static inline void atomic_long_set(atomic_long_t *l, long i)
60614 {
60615 atomic_t *v = (atomic_t *)l;
60616@@ -155,6 +230,15 @@ static inline void atomic_long_set(atomi
60617 atomic_set(v, i);
60618 }
60619
60620+#ifdef CONFIG_PAX_REFCOUNT
60621+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
60622+{
60623+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
60624+
60625+ atomic_set_unchecked(v, i);
60626+}
60627+#endif
60628+
60629 static inline void atomic_long_inc(atomic_long_t *l)
60630 {
60631 atomic_t *v = (atomic_t *)l;
60632@@ -162,6 +246,15 @@ static inline void atomic_long_inc(atomi
60633 atomic_inc(v);
60634 }
60635
60636+#ifdef CONFIG_PAX_REFCOUNT
60637+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
60638+{
60639+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
60640+
60641+ atomic_inc_unchecked(v);
60642+}
60643+#endif
60644+
60645 static inline void atomic_long_dec(atomic_long_t *l)
60646 {
60647 atomic_t *v = (atomic_t *)l;
60648@@ -169,6 +262,15 @@ static inline void atomic_long_dec(atomi
60649 atomic_dec(v);
60650 }
60651
60652+#ifdef CONFIG_PAX_REFCOUNT
60653+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
60654+{
60655+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
60656+
60657+ atomic_dec_unchecked(v);
60658+}
60659+#endif
60660+
60661 static inline void atomic_long_add(long i, atomic_long_t *l)
60662 {
60663 atomic_t *v = (atomic_t *)l;
60664@@ -176,6 +278,15 @@ static inline void atomic_long_add(long
60665 atomic_add(i, v);
60666 }
60667
60668+#ifdef CONFIG_PAX_REFCOUNT
60669+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
60670+{
60671+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
60672+
60673+ atomic_add_unchecked(i, v);
60674+}
60675+#endif
60676+
60677 static inline void atomic_long_sub(long i, atomic_long_t *l)
60678 {
60679 atomic_t *v = (atomic_t *)l;
60680@@ -232,6 +343,15 @@ static inline long atomic_long_inc_retur
60681 return (long)atomic_inc_return(v);
60682 }
60683
60684+#ifdef CONFIG_PAX_REFCOUNT
60685+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
60686+{
60687+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
60688+
60689+ return (long)atomic_inc_return_unchecked(v);
60690+}
60691+#endif
60692+
60693 static inline long atomic_long_dec_return(atomic_long_t *l)
60694 {
60695 atomic_t *v = (atomic_t *)l;
60696@@ -255,4 +375,47 @@ static inline long atomic_long_add_unles
60697
60698 #endif /* BITS_PER_LONG == 64 */
60699
60700+#ifdef CONFIG_PAX_REFCOUNT
60701+static inline void pax_refcount_needs_these_functions(void)
60702+{
60703+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
60704+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
60705+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
60706+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
60707+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
60708+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
60709+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
60710+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
60711+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
60712+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
60713+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
60714+
60715+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
60716+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
60717+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
60718+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
60719+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
60720+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
60721+}
60722+#else
60723+#define atomic_read_unchecked(v) atomic_read(v)
60724+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
60725+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
60726+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
60727+#define atomic_inc_unchecked(v) atomic_inc(v)
60728+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
60729+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
60730+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
60731+#define atomic_dec_unchecked(v) atomic_dec(v)
60732+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
60733+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
60734+
60735+#define atomic_long_read_unchecked(v) atomic_long_read(v)
60736+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
60737+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
60738+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
60739+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
60740+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
60741+#endif
60742+
60743 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
60744diff -urNp linux-2.6.32.48/include/asm-generic/bug.h linux-2.6.32.48/include/asm-generic/bug.h
60745--- linux-2.6.32.48/include/asm-generic/bug.h 2011-11-08 19:02:43.000000000 -0500
60746+++ linux-2.6.32.48/include/asm-generic/bug.h 2011-11-15 19:59:43.000000000 -0500
60747@@ -105,11 +105,11 @@ extern void warn_slowpath_null(const cha
60748
60749 #else /* !CONFIG_BUG */
60750 #ifndef HAVE_ARCH_BUG
60751-#define BUG() do {} while(0)
60752+#define BUG() do { for (;;) ; } while(0)
60753 #endif
60754
60755 #ifndef HAVE_ARCH_BUG_ON
60756-#define BUG_ON(condition) do { if (condition) ; } while(0)
60757+#define BUG_ON(condition) do { if (condition) for (;;) ; } while(0)
60758 #endif
60759
60760 #ifndef HAVE_ARCH_WARN_ON
60761diff -urNp linux-2.6.32.48/include/asm-generic/cache.h linux-2.6.32.48/include/asm-generic/cache.h
60762--- linux-2.6.32.48/include/asm-generic/cache.h 2011-11-08 19:02:43.000000000 -0500
60763+++ linux-2.6.32.48/include/asm-generic/cache.h 2011-11-15 19:59:43.000000000 -0500
60764@@ -6,7 +6,7 @@
60765 * cache lines need to provide their own cache.h.
60766 */
60767
60768-#define L1_CACHE_SHIFT 5
60769-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
60770+#define L1_CACHE_SHIFT 5UL
60771+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
60772
60773 #endif /* __ASM_GENERIC_CACHE_H */
60774diff -urNp linux-2.6.32.48/include/asm-generic/dma-mapping-common.h linux-2.6.32.48/include/asm-generic/dma-mapping-common.h
60775--- linux-2.6.32.48/include/asm-generic/dma-mapping-common.h 2011-11-08 19:02:43.000000000 -0500
60776+++ linux-2.6.32.48/include/asm-generic/dma-mapping-common.h 2011-11-15 19:59:43.000000000 -0500
60777@@ -11,7 +11,7 @@ static inline dma_addr_t dma_map_single_
60778 enum dma_data_direction dir,
60779 struct dma_attrs *attrs)
60780 {
60781- struct dma_map_ops *ops = get_dma_ops(dev);
60782+ const struct dma_map_ops *ops = get_dma_ops(dev);
60783 dma_addr_t addr;
60784
60785 kmemcheck_mark_initialized(ptr, size);
60786@@ -30,7 +30,7 @@ static inline void dma_unmap_single_attr
60787 enum dma_data_direction dir,
60788 struct dma_attrs *attrs)
60789 {
60790- struct dma_map_ops *ops = get_dma_ops(dev);
60791+ const struct dma_map_ops *ops = get_dma_ops(dev);
60792
60793 BUG_ON(!valid_dma_direction(dir));
60794 if (ops->unmap_page)
60795@@ -42,7 +42,7 @@ static inline int dma_map_sg_attrs(struc
60796 int nents, enum dma_data_direction dir,
60797 struct dma_attrs *attrs)
60798 {
60799- struct dma_map_ops *ops = get_dma_ops(dev);
60800+ const struct dma_map_ops *ops = get_dma_ops(dev);
60801 int i, ents;
60802 struct scatterlist *s;
60803
60804@@ -59,7 +59,7 @@ static inline void dma_unmap_sg_attrs(st
60805 int nents, enum dma_data_direction dir,
60806 struct dma_attrs *attrs)
60807 {
60808- struct dma_map_ops *ops = get_dma_ops(dev);
60809+ const struct dma_map_ops *ops = get_dma_ops(dev);
60810
60811 BUG_ON(!valid_dma_direction(dir));
60812 debug_dma_unmap_sg(dev, sg, nents, dir);
60813@@ -71,7 +71,7 @@ static inline dma_addr_t dma_map_page(st
60814 size_t offset, size_t size,
60815 enum dma_data_direction dir)
60816 {
60817- struct dma_map_ops *ops = get_dma_ops(dev);
60818+ const struct dma_map_ops *ops = get_dma_ops(dev);
60819 dma_addr_t addr;
60820
60821 kmemcheck_mark_initialized(page_address(page) + offset, size);
60822@@ -85,7 +85,7 @@ static inline dma_addr_t dma_map_page(st
60823 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
60824 size_t size, enum dma_data_direction dir)
60825 {
60826- struct dma_map_ops *ops = get_dma_ops(dev);
60827+ const struct dma_map_ops *ops = get_dma_ops(dev);
60828
60829 BUG_ON(!valid_dma_direction(dir));
60830 if (ops->unmap_page)
60831@@ -97,7 +97,7 @@ static inline void dma_sync_single_for_c
60832 size_t size,
60833 enum dma_data_direction dir)
60834 {
60835- struct dma_map_ops *ops = get_dma_ops(dev);
60836+ const struct dma_map_ops *ops = get_dma_ops(dev);
60837
60838 BUG_ON(!valid_dma_direction(dir));
60839 if (ops->sync_single_for_cpu)
60840@@ -109,7 +109,7 @@ static inline void dma_sync_single_for_d
60841 dma_addr_t addr, size_t size,
60842 enum dma_data_direction dir)
60843 {
60844- struct dma_map_ops *ops = get_dma_ops(dev);
60845+ const struct dma_map_ops *ops = get_dma_ops(dev);
60846
60847 BUG_ON(!valid_dma_direction(dir));
60848 if (ops->sync_single_for_device)
60849@@ -123,7 +123,7 @@ static inline void dma_sync_single_range
60850 size_t size,
60851 enum dma_data_direction dir)
60852 {
60853- struct dma_map_ops *ops = get_dma_ops(dev);
60854+ const struct dma_map_ops *ops = get_dma_ops(dev);
60855
60856 BUG_ON(!valid_dma_direction(dir));
60857 if (ops->sync_single_range_for_cpu) {
60858@@ -140,7 +140,7 @@ static inline void dma_sync_single_range
60859 size_t size,
60860 enum dma_data_direction dir)
60861 {
60862- struct dma_map_ops *ops = get_dma_ops(dev);
60863+ const struct dma_map_ops *ops = get_dma_ops(dev);
60864
60865 BUG_ON(!valid_dma_direction(dir));
60866 if (ops->sync_single_range_for_device) {
60867@@ -155,7 +155,7 @@ static inline void
60868 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
60869 int nelems, enum dma_data_direction dir)
60870 {
60871- struct dma_map_ops *ops = get_dma_ops(dev);
60872+ const struct dma_map_ops *ops = get_dma_ops(dev);
60873
60874 BUG_ON(!valid_dma_direction(dir));
60875 if (ops->sync_sg_for_cpu)
60876@@ -167,7 +167,7 @@ static inline void
60877 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
60878 int nelems, enum dma_data_direction dir)
60879 {
60880- struct dma_map_ops *ops = get_dma_ops(dev);
60881+ const struct dma_map_ops *ops = get_dma_ops(dev);
60882
60883 BUG_ON(!valid_dma_direction(dir));
60884 if (ops->sync_sg_for_device)
60885diff -urNp linux-2.6.32.48/include/asm-generic/emergency-restart.h linux-2.6.32.48/include/asm-generic/emergency-restart.h
60886--- linux-2.6.32.48/include/asm-generic/emergency-restart.h 2011-11-08 19:02:43.000000000 -0500
60887+++ linux-2.6.32.48/include/asm-generic/emergency-restart.h 2011-11-15 19:59:43.000000000 -0500
60888@@ -1,7 +1,7 @@
60889 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
60890 #define _ASM_GENERIC_EMERGENCY_RESTART_H
60891
60892-static inline void machine_emergency_restart(void)
60893+static inline __noreturn void machine_emergency_restart(void)
60894 {
60895 machine_restart(NULL);
60896 }
60897diff -urNp linux-2.6.32.48/include/asm-generic/futex.h linux-2.6.32.48/include/asm-generic/futex.h
60898--- linux-2.6.32.48/include/asm-generic/futex.h 2011-11-08 19:02:43.000000000 -0500
60899+++ linux-2.6.32.48/include/asm-generic/futex.h 2011-11-15 19:59:43.000000000 -0500
60900@@ -6,7 +6,7 @@
60901 #include <asm/errno.h>
60902
60903 static inline int
60904-futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
60905+futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
60906 {
60907 int op = (encoded_op >> 28) & 7;
60908 int cmp = (encoded_op >> 24) & 15;
60909@@ -48,7 +48,7 @@ futex_atomic_op_inuser (int encoded_op,
60910 }
60911
60912 static inline int
60913-futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
60914+futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval)
60915 {
60916 return -ENOSYS;
60917 }
60918diff -urNp linux-2.6.32.48/include/asm-generic/int-l64.h linux-2.6.32.48/include/asm-generic/int-l64.h
60919--- linux-2.6.32.48/include/asm-generic/int-l64.h 2011-11-08 19:02:43.000000000 -0500
60920+++ linux-2.6.32.48/include/asm-generic/int-l64.h 2011-11-15 19:59:43.000000000 -0500
60921@@ -46,6 +46,8 @@ typedef unsigned int u32;
60922 typedef signed long s64;
60923 typedef unsigned long u64;
60924
60925+typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
60926+
60927 #define S8_C(x) x
60928 #define U8_C(x) x ## U
60929 #define S16_C(x) x
60930diff -urNp linux-2.6.32.48/include/asm-generic/int-ll64.h linux-2.6.32.48/include/asm-generic/int-ll64.h
60931--- linux-2.6.32.48/include/asm-generic/int-ll64.h 2011-11-08 19:02:43.000000000 -0500
60932+++ linux-2.6.32.48/include/asm-generic/int-ll64.h 2011-11-15 19:59:43.000000000 -0500
60933@@ -51,6 +51,8 @@ typedef unsigned int u32;
60934 typedef signed long long s64;
60935 typedef unsigned long long u64;
60936
60937+typedef unsigned long long intoverflow_t;
60938+
60939 #define S8_C(x) x
60940 #define U8_C(x) x ## U
60941 #define S16_C(x) x
60942diff -urNp linux-2.6.32.48/include/asm-generic/kmap_types.h linux-2.6.32.48/include/asm-generic/kmap_types.h
60943--- linux-2.6.32.48/include/asm-generic/kmap_types.h 2011-11-08 19:02:43.000000000 -0500
60944+++ linux-2.6.32.48/include/asm-generic/kmap_types.h 2011-11-15 19:59:43.000000000 -0500
60945@@ -28,7 +28,8 @@ KMAP_D(15) KM_UML_USERCOPY,
60946 KMAP_D(16) KM_IRQ_PTE,
60947 KMAP_D(17) KM_NMI,
60948 KMAP_D(18) KM_NMI_PTE,
60949-KMAP_D(19) KM_TYPE_NR
60950+KMAP_D(19) KM_CLEARPAGE,
60951+KMAP_D(20) KM_TYPE_NR
60952 };
60953
60954 #undef KMAP_D
60955diff -urNp linux-2.6.32.48/include/asm-generic/pgtable.h linux-2.6.32.48/include/asm-generic/pgtable.h
60956--- linux-2.6.32.48/include/asm-generic/pgtable.h 2011-11-08 19:02:43.000000000 -0500
60957+++ linux-2.6.32.48/include/asm-generic/pgtable.h 2011-11-15 19:59:43.000000000 -0500
60958@@ -344,6 +344,14 @@ extern void untrack_pfn_vma(struct vm_ar
60959 unsigned long size);
60960 #endif
60961
60962+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
60963+static inline unsigned long pax_open_kernel(void) { return 0; }
60964+#endif
60965+
60966+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
60967+static inline unsigned long pax_close_kernel(void) { return 0; }
60968+#endif
60969+
60970 #endif /* !__ASSEMBLY__ */
60971
60972 #endif /* _ASM_GENERIC_PGTABLE_H */
60973diff -urNp linux-2.6.32.48/include/asm-generic/pgtable-nopmd.h linux-2.6.32.48/include/asm-generic/pgtable-nopmd.h
60974--- linux-2.6.32.48/include/asm-generic/pgtable-nopmd.h 2011-11-08 19:02:43.000000000 -0500
60975+++ linux-2.6.32.48/include/asm-generic/pgtable-nopmd.h 2011-11-15 19:59:43.000000000 -0500
60976@@ -1,14 +1,19 @@
60977 #ifndef _PGTABLE_NOPMD_H
60978 #define _PGTABLE_NOPMD_H
60979
60980-#ifndef __ASSEMBLY__
60981-
60982 #include <asm-generic/pgtable-nopud.h>
60983
60984-struct mm_struct;
60985-
60986 #define __PAGETABLE_PMD_FOLDED
60987
60988+#define PMD_SHIFT PUD_SHIFT
60989+#define PTRS_PER_PMD 1
60990+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
60991+#define PMD_MASK (~(PMD_SIZE-1))
60992+
60993+#ifndef __ASSEMBLY__
60994+
60995+struct mm_struct;
60996+
60997 /*
60998 * Having the pmd type consist of a pud gets the size right, and allows
60999 * us to conceptually access the pud entry that this pmd is folded into
61000@@ -16,11 +21,6 @@ struct mm_struct;
61001 */
61002 typedef struct { pud_t pud; } pmd_t;
61003
61004-#define PMD_SHIFT PUD_SHIFT
61005-#define PTRS_PER_PMD 1
61006-#define PMD_SIZE (1UL << PMD_SHIFT)
61007-#define PMD_MASK (~(PMD_SIZE-1))
61008-
61009 /*
61010 * The "pud_xxx()" functions here are trivial for a folded two-level
61011 * setup: the pmd is never bad, and a pmd always exists (as it's folded
61012diff -urNp linux-2.6.32.48/include/asm-generic/pgtable-nopud.h linux-2.6.32.48/include/asm-generic/pgtable-nopud.h
61013--- linux-2.6.32.48/include/asm-generic/pgtable-nopud.h 2011-11-08 19:02:43.000000000 -0500
61014+++ linux-2.6.32.48/include/asm-generic/pgtable-nopud.h 2011-11-15 19:59:43.000000000 -0500
61015@@ -1,10 +1,15 @@
61016 #ifndef _PGTABLE_NOPUD_H
61017 #define _PGTABLE_NOPUD_H
61018
61019-#ifndef __ASSEMBLY__
61020-
61021 #define __PAGETABLE_PUD_FOLDED
61022
61023+#define PUD_SHIFT PGDIR_SHIFT
61024+#define PTRS_PER_PUD 1
61025+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
61026+#define PUD_MASK (~(PUD_SIZE-1))
61027+
61028+#ifndef __ASSEMBLY__
61029+
61030 /*
61031 * Having the pud type consist of a pgd gets the size right, and allows
61032 * us to conceptually access the pgd entry that this pud is folded into
61033@@ -12,11 +17,6 @@
61034 */
61035 typedef struct { pgd_t pgd; } pud_t;
61036
61037-#define PUD_SHIFT PGDIR_SHIFT
61038-#define PTRS_PER_PUD 1
61039-#define PUD_SIZE (1UL << PUD_SHIFT)
61040-#define PUD_MASK (~(PUD_SIZE-1))
61041-
61042 /*
61043 * The "pgd_xxx()" functions here are trivial for a folded two-level
61044 * setup: the pud is never bad, and a pud always exists (as it's folded
61045diff -urNp linux-2.6.32.48/include/asm-generic/vmlinux.lds.h linux-2.6.32.48/include/asm-generic/vmlinux.lds.h
61046--- linux-2.6.32.48/include/asm-generic/vmlinux.lds.h 2011-11-08 19:02:43.000000000 -0500
61047+++ linux-2.6.32.48/include/asm-generic/vmlinux.lds.h 2011-11-15 19:59:43.000000000 -0500
61048@@ -199,6 +199,7 @@
61049 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
61050 VMLINUX_SYMBOL(__start_rodata) = .; \
61051 *(.rodata) *(.rodata.*) \
61052+ *(.data.read_only) \
61053 *(__vermagic) /* Kernel version magic */ \
61054 *(__markers_strings) /* Markers: strings */ \
61055 *(__tracepoints_strings)/* Tracepoints: strings */ \
61056@@ -656,22 +657,24 @@
61057 * section in the linker script will go there too. @phdr should have
61058 * a leading colon.
61059 *
61060- * Note that this macros defines __per_cpu_load as an absolute symbol.
61061+ * Note that this macros defines per_cpu_load as an absolute symbol.
61062 * If there is no need to put the percpu section at a predetermined
61063 * address, use PERCPU().
61064 */
61065 #define PERCPU_VADDR(vaddr, phdr) \
61066- VMLINUX_SYMBOL(__per_cpu_load) = .; \
61067- .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
61068+ per_cpu_load = .; \
61069+ .data.percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
61070 - LOAD_OFFSET) { \
61071+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
61072 VMLINUX_SYMBOL(__per_cpu_start) = .; \
61073 *(.data.percpu.first) \
61074- *(.data.percpu.page_aligned) \
61075 *(.data.percpu) \
61076+ . = ALIGN(PAGE_SIZE); \
61077+ *(.data.percpu.page_aligned) \
61078 *(.data.percpu.shared_aligned) \
61079 VMLINUX_SYMBOL(__per_cpu_end) = .; \
61080 } phdr \
61081- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
61082+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data.percpu);
61083
61084 /**
61085 * PERCPU - define output section for percpu area, simple version
61086diff -urNp linux-2.6.32.48/include/drm/drm_crtc_helper.h linux-2.6.32.48/include/drm/drm_crtc_helper.h
61087--- linux-2.6.32.48/include/drm/drm_crtc_helper.h 2011-11-08 19:02:43.000000000 -0500
61088+++ linux-2.6.32.48/include/drm/drm_crtc_helper.h 2011-11-15 19:59:43.000000000 -0500
61089@@ -64,7 +64,7 @@ struct drm_crtc_helper_funcs {
61090
61091 /* reload the current crtc LUT */
61092 void (*load_lut)(struct drm_crtc *crtc);
61093-};
61094+} __no_const;
61095
61096 struct drm_encoder_helper_funcs {
61097 void (*dpms)(struct drm_encoder *encoder, int mode);
61098@@ -85,7 +85,7 @@ struct drm_encoder_helper_funcs {
61099 struct drm_connector *connector);
61100 /* disable encoder when not in use - more explicit than dpms off */
61101 void (*disable)(struct drm_encoder *encoder);
61102-};
61103+} __no_const;
61104
61105 struct drm_connector_helper_funcs {
61106 int (*get_modes)(struct drm_connector *connector);
61107diff -urNp linux-2.6.32.48/include/drm/drmP.h linux-2.6.32.48/include/drm/drmP.h
61108--- linux-2.6.32.48/include/drm/drmP.h 2011-11-08 19:02:43.000000000 -0500
61109+++ linux-2.6.32.48/include/drm/drmP.h 2011-11-15 19:59:43.000000000 -0500
61110@@ -71,6 +71,7 @@
61111 #include <linux/workqueue.h>
61112 #include <linux/poll.h>
61113 #include <asm/pgalloc.h>
61114+#include <asm/local.h>
61115 #include "drm.h"
61116
61117 #include <linux/idr.h>
61118@@ -814,7 +815,7 @@ struct drm_driver {
61119 void (*vgaarb_irq)(struct drm_device *dev, bool state);
61120
61121 /* Driver private ops for this object */
61122- struct vm_operations_struct *gem_vm_ops;
61123+ const struct vm_operations_struct *gem_vm_ops;
61124
61125 int major;
61126 int minor;
61127@@ -917,7 +918,7 @@ struct drm_device {
61128
61129 /** \name Usage Counters */
61130 /*@{ */
61131- int open_count; /**< Outstanding files open */
61132+ local_t open_count; /**< Outstanding files open */
61133 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
61134 atomic_t vma_count; /**< Outstanding vma areas open */
61135 int buf_use; /**< Buffers in use -- cannot alloc */
61136@@ -928,7 +929,7 @@ struct drm_device {
61137 /*@{ */
61138 unsigned long counters;
61139 enum drm_stat_type types[15];
61140- atomic_t counts[15];
61141+ atomic_unchecked_t counts[15];
61142 /*@} */
61143
61144 struct list_head filelist;
61145@@ -1016,7 +1017,7 @@ struct drm_device {
61146 struct pci_controller *hose;
61147 #endif
61148 struct drm_sg_mem *sg; /**< Scatter gather memory */
61149- unsigned int num_crtcs; /**< Number of CRTCs on this device */
61150+ unsigned int num_crtcs; /**< Number of CRTCs on this device */
61151 void *dev_private; /**< device private data */
61152 void *mm_private;
61153 struct address_space *dev_mapping;
61154@@ -1042,11 +1043,11 @@ struct drm_device {
61155 spinlock_t object_name_lock;
61156 struct idr object_name_idr;
61157 atomic_t object_count;
61158- atomic_t object_memory;
61159+ atomic_unchecked_t object_memory;
61160 atomic_t pin_count;
61161- atomic_t pin_memory;
61162+ atomic_unchecked_t pin_memory;
61163 atomic_t gtt_count;
61164- atomic_t gtt_memory;
61165+ atomic_unchecked_t gtt_memory;
61166 uint32_t gtt_total;
61167 uint32_t invalidate_domains; /* domains pending invalidation */
61168 uint32_t flush_domains; /* domains pending flush */
61169diff -urNp linux-2.6.32.48/include/drm/ttm/ttm_memory.h linux-2.6.32.48/include/drm/ttm/ttm_memory.h
61170--- linux-2.6.32.48/include/drm/ttm/ttm_memory.h 2011-11-08 19:02:43.000000000 -0500
61171+++ linux-2.6.32.48/include/drm/ttm/ttm_memory.h 2011-11-15 19:59:43.000000000 -0500
61172@@ -47,7 +47,7 @@
61173
61174 struct ttm_mem_shrink {
61175 int (*do_shrink) (struct ttm_mem_shrink *);
61176-};
61177+} __no_const;
61178
61179 /**
61180 * struct ttm_mem_global - Global memory accounting structure.
61181diff -urNp linux-2.6.32.48/include/linux/a.out.h linux-2.6.32.48/include/linux/a.out.h
61182--- linux-2.6.32.48/include/linux/a.out.h 2011-11-08 19:02:43.000000000 -0500
61183+++ linux-2.6.32.48/include/linux/a.out.h 2011-11-15 19:59:43.000000000 -0500
61184@@ -39,6 +39,14 @@ enum machine_type {
61185 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
61186 };
61187
61188+/* Constants for the N_FLAGS field */
61189+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
61190+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
61191+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
61192+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
61193+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
61194+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
61195+
61196 #if !defined (N_MAGIC)
61197 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
61198 #endif
61199diff -urNp linux-2.6.32.48/include/linux/atmdev.h linux-2.6.32.48/include/linux/atmdev.h
61200--- linux-2.6.32.48/include/linux/atmdev.h 2011-11-08 19:02:43.000000000 -0500
61201+++ linux-2.6.32.48/include/linux/atmdev.h 2011-11-15 19:59:43.000000000 -0500
61202@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
61203 #endif
61204
61205 struct k_atm_aal_stats {
61206-#define __HANDLE_ITEM(i) atomic_t i
61207+#define __HANDLE_ITEM(i) atomic_unchecked_t i
61208 __AAL_STAT_ITEMS
61209 #undef __HANDLE_ITEM
61210 };
61211diff -urNp linux-2.6.32.48/include/linux/backlight.h linux-2.6.32.48/include/linux/backlight.h
61212--- linux-2.6.32.48/include/linux/backlight.h 2011-11-08 19:02:43.000000000 -0500
61213+++ linux-2.6.32.48/include/linux/backlight.h 2011-11-15 19:59:43.000000000 -0500
61214@@ -36,18 +36,18 @@ struct backlight_device;
61215 struct fb_info;
61216
61217 struct backlight_ops {
61218- unsigned int options;
61219+ const unsigned int options;
61220
61221 #define BL_CORE_SUSPENDRESUME (1 << 0)
61222
61223 /* Notify the backlight driver some property has changed */
61224- int (*update_status)(struct backlight_device *);
61225+ int (* const update_status)(struct backlight_device *);
61226 /* Return the current backlight brightness (accounting for power,
61227 fb_blank etc.) */
61228- int (*get_brightness)(struct backlight_device *);
61229+ int (* const get_brightness)(struct backlight_device *);
61230 /* Check if given framebuffer device is the one bound to this backlight;
61231 return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
61232- int (*check_fb)(struct fb_info *);
61233+ int (* const check_fb)(struct fb_info *);
61234 };
61235
61236 /* This structure defines all the properties of a backlight */
61237@@ -86,7 +86,7 @@ struct backlight_device {
61238 registered this device has been unloaded, and if class_get_devdata()
61239 points to something in the body of that driver, it is also invalid. */
61240 struct mutex ops_lock;
61241- struct backlight_ops *ops;
61242+ const struct backlight_ops *ops;
61243
61244 /* The framebuffer notifier block */
61245 struct notifier_block fb_notif;
61246@@ -103,7 +103,7 @@ static inline void backlight_update_stat
61247 }
61248
61249 extern struct backlight_device *backlight_device_register(const char *name,
61250- struct device *dev, void *devdata, struct backlight_ops *ops);
61251+ struct device *dev, void *devdata, const struct backlight_ops *ops);
61252 extern void backlight_device_unregister(struct backlight_device *bd);
61253 extern void backlight_force_update(struct backlight_device *bd,
61254 enum backlight_update_reason reason);
61255diff -urNp linux-2.6.32.48/include/linux/binfmts.h linux-2.6.32.48/include/linux/binfmts.h
61256--- linux-2.6.32.48/include/linux/binfmts.h 2011-11-08 19:02:43.000000000 -0500
61257+++ linux-2.6.32.48/include/linux/binfmts.h 2011-11-15 19:59:43.000000000 -0500
61258@@ -83,6 +83,7 @@ struct linux_binfmt {
61259 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
61260 int (*load_shlib)(struct file *);
61261 int (*core_dump)(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
61262+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
61263 unsigned long min_coredump; /* minimal dump size */
61264 int hasvdso;
61265 };
61266diff -urNp linux-2.6.32.48/include/linux/blkdev.h linux-2.6.32.48/include/linux/blkdev.h
61267--- linux-2.6.32.48/include/linux/blkdev.h 2011-11-08 19:02:43.000000000 -0500
61268+++ linux-2.6.32.48/include/linux/blkdev.h 2011-11-15 19:59:43.000000000 -0500
61269@@ -1278,7 +1278,7 @@ struct block_device_operations {
61270 int (*revalidate_disk) (struct gendisk *);
61271 int (*getgeo)(struct block_device *, struct hd_geometry *);
61272 struct module *owner;
61273-};
61274+} __do_const;
61275
61276 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
61277 unsigned long);
61278diff -urNp linux-2.6.32.48/include/linux/blktrace_api.h linux-2.6.32.48/include/linux/blktrace_api.h
61279--- linux-2.6.32.48/include/linux/blktrace_api.h 2011-11-08 19:02:43.000000000 -0500
61280+++ linux-2.6.32.48/include/linux/blktrace_api.h 2011-11-15 19:59:43.000000000 -0500
61281@@ -160,7 +160,7 @@ struct blk_trace {
61282 struct dentry *dir;
61283 struct dentry *dropped_file;
61284 struct dentry *msg_file;
61285- atomic_t dropped;
61286+ atomic_unchecked_t dropped;
61287 };
61288
61289 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
61290diff -urNp linux-2.6.32.48/include/linux/byteorder/little_endian.h linux-2.6.32.48/include/linux/byteorder/little_endian.h
61291--- linux-2.6.32.48/include/linux/byteorder/little_endian.h 2011-11-08 19:02:43.000000000 -0500
61292+++ linux-2.6.32.48/include/linux/byteorder/little_endian.h 2011-11-15 19:59:43.000000000 -0500
61293@@ -42,51 +42,51 @@
61294
61295 static inline __le64 __cpu_to_le64p(const __u64 *p)
61296 {
61297- return (__force __le64)*p;
61298+ return (__force const __le64)*p;
61299 }
61300 static inline __u64 __le64_to_cpup(const __le64 *p)
61301 {
61302- return (__force __u64)*p;
61303+ return (__force const __u64)*p;
61304 }
61305 static inline __le32 __cpu_to_le32p(const __u32 *p)
61306 {
61307- return (__force __le32)*p;
61308+ return (__force const __le32)*p;
61309 }
61310 static inline __u32 __le32_to_cpup(const __le32 *p)
61311 {
61312- return (__force __u32)*p;
61313+ return (__force const __u32)*p;
61314 }
61315 static inline __le16 __cpu_to_le16p(const __u16 *p)
61316 {
61317- return (__force __le16)*p;
61318+ return (__force const __le16)*p;
61319 }
61320 static inline __u16 __le16_to_cpup(const __le16 *p)
61321 {
61322- return (__force __u16)*p;
61323+ return (__force const __u16)*p;
61324 }
61325 static inline __be64 __cpu_to_be64p(const __u64 *p)
61326 {
61327- return (__force __be64)__swab64p(p);
61328+ return (__force const __be64)__swab64p(p);
61329 }
61330 static inline __u64 __be64_to_cpup(const __be64 *p)
61331 {
61332- return __swab64p((__u64 *)p);
61333+ return __swab64p((const __u64 *)p);
61334 }
61335 static inline __be32 __cpu_to_be32p(const __u32 *p)
61336 {
61337- return (__force __be32)__swab32p(p);
61338+ return (__force const __be32)__swab32p(p);
61339 }
61340 static inline __u32 __be32_to_cpup(const __be32 *p)
61341 {
61342- return __swab32p((__u32 *)p);
61343+ return __swab32p((const __u32 *)p);
61344 }
61345 static inline __be16 __cpu_to_be16p(const __u16 *p)
61346 {
61347- return (__force __be16)__swab16p(p);
61348+ return (__force const __be16)__swab16p(p);
61349 }
61350 static inline __u16 __be16_to_cpup(const __be16 *p)
61351 {
61352- return __swab16p((__u16 *)p);
61353+ return __swab16p((const __u16 *)p);
61354 }
61355 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
61356 #define __le64_to_cpus(x) do { (void)(x); } while (0)
61357diff -urNp linux-2.6.32.48/include/linux/cache.h linux-2.6.32.48/include/linux/cache.h
61358--- linux-2.6.32.48/include/linux/cache.h 2011-11-08 19:02:43.000000000 -0500
61359+++ linux-2.6.32.48/include/linux/cache.h 2011-11-15 19:59:43.000000000 -0500
61360@@ -16,6 +16,10 @@
61361 #define __read_mostly
61362 #endif
61363
61364+#ifndef __read_only
61365+#define __read_only __read_mostly
61366+#endif
61367+
61368 #ifndef ____cacheline_aligned
61369 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
61370 #endif
61371diff -urNp linux-2.6.32.48/include/linux/capability.h linux-2.6.32.48/include/linux/capability.h
61372--- linux-2.6.32.48/include/linux/capability.h 2011-11-08 19:02:43.000000000 -0500
61373+++ linux-2.6.32.48/include/linux/capability.h 2011-11-15 19:59:43.000000000 -0500
61374@@ -563,6 +563,7 @@ extern const kernel_cap_t __cap_init_eff
61375 (security_real_capable_noaudit((t), (cap)) == 0)
61376
61377 extern int capable(int cap);
61378+int capable_nolog(int cap);
61379
61380 /* audit system wants to get cap info from files as well */
61381 struct dentry;
61382diff -urNp linux-2.6.32.48/include/linux/compiler-gcc4.h linux-2.6.32.48/include/linux/compiler-gcc4.h
61383--- linux-2.6.32.48/include/linux/compiler-gcc4.h 2011-11-08 19:02:43.000000000 -0500
61384+++ linux-2.6.32.48/include/linux/compiler-gcc4.h 2011-11-15 19:59:43.000000000 -0500
61385@@ -36,4 +36,16 @@
61386 the kernel context */
61387 #define __cold __attribute__((__cold__))
61388
61389+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
61390+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
61391+#define __bos0(ptr) __bos((ptr), 0)
61392+#define __bos1(ptr) __bos((ptr), 1)
61393+
61394+#if __GNUC_MINOR__ >= 5
61395+#ifdef CONSTIFY_PLUGIN
61396+#define __no_const __attribute__((no_const))
61397+#define __do_const __attribute__((do_const))
61398+#endif
61399+#endif
61400+
61401 #endif
61402diff -urNp linux-2.6.32.48/include/linux/compiler.h linux-2.6.32.48/include/linux/compiler.h
61403--- linux-2.6.32.48/include/linux/compiler.h 2011-11-08 19:02:43.000000000 -0500
61404+++ linux-2.6.32.48/include/linux/compiler.h 2011-11-15 19:59:43.000000000 -0500
61405@@ -5,11 +5,14 @@
61406
61407 #ifdef __CHECKER__
61408 # define __user __attribute__((noderef, address_space(1)))
61409+# define __force_user __force __user
61410 # define __kernel /* default address space */
61411+# define __force_kernel __force __kernel
61412 # define __safe __attribute__((safe))
61413 # define __force __attribute__((force))
61414 # define __nocast __attribute__((nocast))
61415 # define __iomem __attribute__((noderef, address_space(2)))
61416+# define __force_iomem __force __iomem
61417 # define __acquires(x) __attribute__((context(x,0,1)))
61418 # define __releases(x) __attribute__((context(x,1,0)))
61419 # define __acquire(x) __context__(x,1)
61420@@ -17,13 +20,34 @@
61421 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
61422 extern void __chk_user_ptr(const volatile void __user *);
61423 extern void __chk_io_ptr(const volatile void __iomem *);
61424+#elif defined(CHECKER_PLUGIN)
61425+//# define __user
61426+//# define __force_user
61427+//# define __kernel
61428+//# define __force_kernel
61429+# define __safe
61430+# define __force
61431+# define __nocast
61432+# define __iomem
61433+# define __force_iomem
61434+# define __chk_user_ptr(x) (void)0
61435+# define __chk_io_ptr(x) (void)0
61436+# define __builtin_warning(x, y...) (1)
61437+# define __acquires(x)
61438+# define __releases(x)
61439+# define __acquire(x) (void)0
61440+# define __release(x) (void)0
61441+# define __cond_lock(x,c) (c)
61442 #else
61443 # define __user
61444+# define __force_user
61445 # define __kernel
61446+# define __force_kernel
61447 # define __safe
61448 # define __force
61449 # define __nocast
61450 # define __iomem
61451+# define __force_iomem
61452 # define __chk_user_ptr(x) (void)0
61453 # define __chk_io_ptr(x) (void)0
61454 # define __builtin_warning(x, y...) (1)
61455@@ -247,6 +271,14 @@ void ftrace_likely_update(struct ftrace_
61456 # define __attribute_const__ /* unimplemented */
61457 #endif
61458
61459+#ifndef __no_const
61460+# define __no_const
61461+#endif
61462+
61463+#ifndef __do_const
61464+# define __do_const
61465+#endif
61466+
61467 /*
61468 * Tell gcc if a function is cold. The compiler will assume any path
61469 * directly leading to the call is unlikely.
61470@@ -256,6 +288,22 @@ void ftrace_likely_update(struct ftrace_
61471 #define __cold
61472 #endif
61473
61474+#ifndef __alloc_size
61475+#define __alloc_size(...)
61476+#endif
61477+
61478+#ifndef __bos
61479+#define __bos(ptr, arg)
61480+#endif
61481+
61482+#ifndef __bos0
61483+#define __bos0(ptr)
61484+#endif
61485+
61486+#ifndef __bos1
61487+#define __bos1(ptr)
61488+#endif
61489+
61490 /* Simple shorthand for a section definition */
61491 #ifndef __section
61492 # define __section(S) __attribute__ ((__section__(#S)))
61493@@ -278,6 +326,7 @@ void ftrace_likely_update(struct ftrace_
61494 * use is to mediate communication between process-level code and irq/NMI
61495 * handlers, all running on the same CPU.
61496 */
61497-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
61498+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
61499+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
61500
61501 #endif /* __LINUX_COMPILER_H */
61502diff -urNp linux-2.6.32.48/include/linux/crypto.h linux-2.6.32.48/include/linux/crypto.h
61503--- linux-2.6.32.48/include/linux/crypto.h 2011-11-08 19:02:43.000000000 -0500
61504+++ linux-2.6.32.48/include/linux/crypto.h 2011-11-15 19:59:43.000000000 -0500
61505@@ -394,7 +394,7 @@ struct cipher_tfm {
61506 const u8 *key, unsigned int keylen);
61507 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
61508 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
61509-};
61510+} __no_const;
61511
61512 struct hash_tfm {
61513 int (*init)(struct hash_desc *desc);
61514@@ -415,13 +415,13 @@ struct compress_tfm {
61515 int (*cot_decompress)(struct crypto_tfm *tfm,
61516 const u8 *src, unsigned int slen,
61517 u8 *dst, unsigned int *dlen);
61518-};
61519+} __no_const;
61520
61521 struct rng_tfm {
61522 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
61523 unsigned int dlen);
61524 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
61525-};
61526+} __no_const;
61527
61528 #define crt_ablkcipher crt_u.ablkcipher
61529 #define crt_aead crt_u.aead
61530diff -urNp linux-2.6.32.48/include/linux/dcache.h linux-2.6.32.48/include/linux/dcache.h
61531--- linux-2.6.32.48/include/linux/dcache.h 2011-11-08 19:02:43.000000000 -0500
61532+++ linux-2.6.32.48/include/linux/dcache.h 2011-11-15 19:59:43.000000000 -0500
61533@@ -119,6 +119,8 @@ struct dentry {
61534 unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
61535 };
61536
61537+#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
61538+
61539 /*
61540 * dentry->d_lock spinlock nesting subclasses:
61541 *
61542diff -urNp linux-2.6.32.48/include/linux/decompress/mm.h linux-2.6.32.48/include/linux/decompress/mm.h
61543--- linux-2.6.32.48/include/linux/decompress/mm.h 2011-11-08 19:02:43.000000000 -0500
61544+++ linux-2.6.32.48/include/linux/decompress/mm.h 2011-11-15 19:59:43.000000000 -0500
61545@@ -78,7 +78,7 @@ static void free(void *where)
61546 * warnings when not needed (indeed large_malloc / large_free are not
61547 * needed by inflate */
61548
61549-#define malloc(a) kmalloc(a, GFP_KERNEL)
61550+#define malloc(a) kmalloc((a), GFP_KERNEL)
61551 #define free(a) kfree(a)
61552
61553 #define large_malloc(a) vmalloc(a)
61554diff -urNp linux-2.6.32.48/include/linux/dma-mapping.h linux-2.6.32.48/include/linux/dma-mapping.h
61555--- linux-2.6.32.48/include/linux/dma-mapping.h 2011-11-08 19:02:43.000000000 -0500
61556+++ linux-2.6.32.48/include/linux/dma-mapping.h 2011-11-15 19:59:43.000000000 -0500
61557@@ -16,51 +16,51 @@ enum dma_data_direction {
61558 };
61559
61560 struct dma_map_ops {
61561- void* (*alloc_coherent)(struct device *dev, size_t size,
61562+ void* (* const alloc_coherent)(struct device *dev, size_t size,
61563 dma_addr_t *dma_handle, gfp_t gfp);
61564- void (*free_coherent)(struct device *dev, size_t size,
61565+ void (* const free_coherent)(struct device *dev, size_t size,
61566 void *vaddr, dma_addr_t dma_handle);
61567- dma_addr_t (*map_page)(struct device *dev, struct page *page,
61568+ dma_addr_t (* const map_page)(struct device *dev, struct page *page,
61569 unsigned long offset, size_t size,
61570 enum dma_data_direction dir,
61571 struct dma_attrs *attrs);
61572- void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
61573+ void (* const unmap_page)(struct device *dev, dma_addr_t dma_handle,
61574 size_t size, enum dma_data_direction dir,
61575 struct dma_attrs *attrs);
61576- int (*map_sg)(struct device *dev, struct scatterlist *sg,
61577+ int (* const map_sg)(struct device *dev, struct scatterlist *sg,
61578 int nents, enum dma_data_direction dir,
61579 struct dma_attrs *attrs);
61580- void (*unmap_sg)(struct device *dev,
61581+ void (* const unmap_sg)(struct device *dev,
61582 struct scatterlist *sg, int nents,
61583 enum dma_data_direction dir,
61584 struct dma_attrs *attrs);
61585- void (*sync_single_for_cpu)(struct device *dev,
61586+ void (* const sync_single_for_cpu)(struct device *dev,
61587 dma_addr_t dma_handle, size_t size,
61588 enum dma_data_direction dir);
61589- void (*sync_single_for_device)(struct device *dev,
61590+ void (* const sync_single_for_device)(struct device *dev,
61591 dma_addr_t dma_handle, size_t size,
61592 enum dma_data_direction dir);
61593- void (*sync_single_range_for_cpu)(struct device *dev,
61594+ void (* const sync_single_range_for_cpu)(struct device *dev,
61595 dma_addr_t dma_handle,
61596 unsigned long offset,
61597 size_t size,
61598 enum dma_data_direction dir);
61599- void (*sync_single_range_for_device)(struct device *dev,
61600+ void (* const sync_single_range_for_device)(struct device *dev,
61601 dma_addr_t dma_handle,
61602 unsigned long offset,
61603 size_t size,
61604 enum dma_data_direction dir);
61605- void (*sync_sg_for_cpu)(struct device *dev,
61606+ void (* const sync_sg_for_cpu)(struct device *dev,
61607 struct scatterlist *sg, int nents,
61608 enum dma_data_direction dir);
61609- void (*sync_sg_for_device)(struct device *dev,
61610+ void (* const sync_sg_for_device)(struct device *dev,
61611 struct scatterlist *sg, int nents,
61612 enum dma_data_direction dir);
61613- int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
61614- int (*dma_supported)(struct device *dev, u64 mask);
61615+ int (* const mapping_error)(struct device *dev, dma_addr_t dma_addr);
61616+ int (* const dma_supported)(struct device *dev, u64 mask);
61617 int (*set_dma_mask)(struct device *dev, u64 mask);
61618 int is_phys;
61619-};
61620+} __do_const;
61621
61622 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
61623
61624diff -urNp linux-2.6.32.48/include/linux/dst.h linux-2.6.32.48/include/linux/dst.h
61625--- linux-2.6.32.48/include/linux/dst.h 2011-11-08 19:02:43.000000000 -0500
61626+++ linux-2.6.32.48/include/linux/dst.h 2011-11-15 19:59:43.000000000 -0500
61627@@ -380,7 +380,7 @@ struct dst_node
61628 struct thread_pool *pool;
61629
61630 /* Transaction IDs live here */
61631- atomic_long_t gen;
61632+ atomic_long_unchecked_t gen;
61633
61634 /*
61635 * How frequently and how many times transaction
61636diff -urNp linux-2.6.32.48/include/linux/elf.h linux-2.6.32.48/include/linux/elf.h
61637--- linux-2.6.32.48/include/linux/elf.h 2011-11-08 19:02:43.000000000 -0500
61638+++ linux-2.6.32.48/include/linux/elf.h 2011-11-15 19:59:43.000000000 -0500
61639@@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
61640 #define PT_GNU_EH_FRAME 0x6474e550
61641
61642 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
61643+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
61644+
61645+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
61646+
61647+/* Constants for the e_flags field */
61648+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
61649+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
61650+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
61651+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
61652+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
61653+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
61654
61655 /* These constants define the different elf file types */
61656 #define ET_NONE 0
61657@@ -84,6 +95,8 @@ typedef __s64 Elf64_Sxword;
61658 #define DT_DEBUG 21
61659 #define DT_TEXTREL 22
61660 #define DT_JMPREL 23
61661+#define DT_FLAGS 30
61662+ #define DF_TEXTREL 0x00000004
61663 #define DT_ENCODING 32
61664 #define OLD_DT_LOOS 0x60000000
61665 #define DT_LOOS 0x6000000d
61666@@ -230,6 +243,19 @@ typedef struct elf64_hdr {
61667 #define PF_W 0x2
61668 #define PF_X 0x1
61669
61670+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
61671+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
61672+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
61673+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
61674+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
61675+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
61676+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
61677+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
61678+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
61679+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
61680+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
61681+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
61682+
61683 typedef struct elf32_phdr{
61684 Elf32_Word p_type;
61685 Elf32_Off p_offset;
61686@@ -322,6 +348,8 @@ typedef struct elf64_shdr {
61687 #define EI_OSABI 7
61688 #define EI_PAD 8
61689
61690+#define EI_PAX 14
61691+
61692 #define ELFMAG0 0x7f /* EI_MAG */
61693 #define ELFMAG1 'E'
61694 #define ELFMAG2 'L'
61695@@ -386,6 +414,7 @@ extern Elf32_Dyn _DYNAMIC [];
61696 #define elf_phdr elf32_phdr
61697 #define elf_note elf32_note
61698 #define elf_addr_t Elf32_Off
61699+#define elf_dyn Elf32_Dyn
61700
61701 #else
61702
61703@@ -394,6 +423,7 @@ extern Elf64_Dyn _DYNAMIC [];
61704 #define elf_phdr elf64_phdr
61705 #define elf_note elf64_note
61706 #define elf_addr_t Elf64_Off
61707+#define elf_dyn Elf64_Dyn
61708
61709 #endif
61710
61711diff -urNp linux-2.6.32.48/include/linux/fscache-cache.h linux-2.6.32.48/include/linux/fscache-cache.h
61712--- linux-2.6.32.48/include/linux/fscache-cache.h 2011-11-08 19:02:43.000000000 -0500
61713+++ linux-2.6.32.48/include/linux/fscache-cache.h 2011-11-15 19:59:43.000000000 -0500
61714@@ -116,7 +116,7 @@ struct fscache_operation {
61715 #endif
61716 };
61717
61718-extern atomic_t fscache_op_debug_id;
61719+extern atomic_unchecked_t fscache_op_debug_id;
61720 extern const struct slow_work_ops fscache_op_slow_work_ops;
61721
61722 extern void fscache_enqueue_operation(struct fscache_operation *);
61723@@ -134,7 +134,7 @@ static inline void fscache_operation_ini
61724 fscache_operation_release_t release)
61725 {
61726 atomic_set(&op->usage, 1);
61727- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
61728+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
61729 op->release = release;
61730 INIT_LIST_HEAD(&op->pend_link);
61731 fscache_set_op_state(op, "Init");
61732diff -urNp linux-2.6.32.48/include/linux/fs.h linux-2.6.32.48/include/linux/fs.h
61733--- linux-2.6.32.48/include/linux/fs.h 2011-11-08 19:02:43.000000000 -0500
61734+++ linux-2.6.32.48/include/linux/fs.h 2011-11-15 19:59:43.000000000 -0500
61735@@ -90,6 +90,11 @@ struct inodes_stat_t {
61736 /* Expect random access pattern */
61737 #define FMODE_RANDOM ((__force fmode_t)4096)
61738
61739+/* Hack for grsec so as not to require read permission simply to execute
61740+ * a binary
61741+ */
61742+#define FMODE_GREXEC ((__force fmode_t)0x2000000)
61743+
61744 /*
61745 * The below are the various read and write types that we support. Some of
61746 * them include behavioral modifiers that send information down to the
61747@@ -568,41 +573,41 @@ typedef int (*read_actor_t)(read_descrip
61748 unsigned long, unsigned long);
61749
61750 struct address_space_operations {
61751- int (*writepage)(struct page *page, struct writeback_control *wbc);
61752- int (*readpage)(struct file *, struct page *);
61753- void (*sync_page)(struct page *);
61754+ int (* const writepage)(struct page *page, struct writeback_control *wbc);
61755+ int (* const readpage)(struct file *, struct page *);
61756+ void (* const sync_page)(struct page *);
61757
61758 /* Write back some dirty pages from this mapping. */
61759- int (*writepages)(struct address_space *, struct writeback_control *);
61760+ int (* const writepages)(struct address_space *, struct writeback_control *);
61761
61762 /* Set a page dirty. Return true if this dirtied it */
61763- int (*set_page_dirty)(struct page *page);
61764+ int (* const set_page_dirty)(struct page *page);
61765
61766- int (*readpages)(struct file *filp, struct address_space *mapping,
61767+ int (* const readpages)(struct file *filp, struct address_space *mapping,
61768 struct list_head *pages, unsigned nr_pages);
61769
61770- int (*write_begin)(struct file *, struct address_space *mapping,
61771+ int (* const write_begin)(struct file *, struct address_space *mapping,
61772 loff_t pos, unsigned len, unsigned flags,
61773 struct page **pagep, void **fsdata);
61774- int (*write_end)(struct file *, struct address_space *mapping,
61775+ int (* const write_end)(struct file *, struct address_space *mapping,
61776 loff_t pos, unsigned len, unsigned copied,
61777 struct page *page, void *fsdata);
61778
61779 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
61780- sector_t (*bmap)(struct address_space *, sector_t);
61781- void (*invalidatepage) (struct page *, unsigned long);
61782- int (*releasepage) (struct page *, gfp_t);
61783- ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
61784+ sector_t (* const bmap)(struct address_space *, sector_t);
61785+ void (* const invalidatepage) (struct page *, unsigned long);
61786+ int (* const releasepage) (struct page *, gfp_t);
61787+ ssize_t (* const direct_IO)(int, struct kiocb *, const struct iovec *iov,
61788 loff_t offset, unsigned long nr_segs);
61789- int (*get_xip_mem)(struct address_space *, pgoff_t, int,
61790+ int (* const get_xip_mem)(struct address_space *, pgoff_t, int,
61791 void **, unsigned long *);
61792 /* migrate the contents of a page to the specified target */
61793- int (*migratepage) (struct address_space *,
61794+ int (* const migratepage) (struct address_space *,
61795 struct page *, struct page *);
61796- int (*launder_page) (struct page *);
61797- int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
61798+ int (* const launder_page) (struct page *);
61799+ int (* const is_partially_uptodate) (struct page *, read_descriptor_t *,
61800 unsigned long);
61801- int (*error_remove_page)(struct address_space *, struct page *);
61802+ int (* const error_remove_page)(struct address_space *, struct page *);
61803 };
61804
61805 /*
61806@@ -1031,19 +1036,19 @@ static inline int file_check_writeable(s
61807 typedef struct files_struct *fl_owner_t;
61808
61809 struct file_lock_operations {
61810- void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
61811- void (*fl_release_private)(struct file_lock *);
61812+ void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
61813+ void (* const fl_release_private)(struct file_lock *);
61814 };
61815
61816 struct lock_manager_operations {
61817- int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
61818- void (*fl_notify)(struct file_lock *); /* unblock callback */
61819- int (*fl_grant)(struct file_lock *, struct file_lock *, int);
61820- void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
61821- void (*fl_release_private)(struct file_lock *);
61822- void (*fl_break)(struct file_lock *);
61823- int (*fl_mylease)(struct file_lock *, struct file_lock *);
61824- int (*fl_change)(struct file_lock **, int);
61825+ int (* const fl_compare_owner)(struct file_lock *, struct file_lock *);
61826+ void (* const fl_notify)(struct file_lock *); /* unblock callback */
61827+ int (* const fl_grant)(struct file_lock *, struct file_lock *, int);
61828+ void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
61829+ void (* const fl_release_private)(struct file_lock *);
61830+ void (* const fl_break)(struct file_lock *);
61831+ int (* const fl_mylease)(struct file_lock *, struct file_lock *);
61832+ int (* const fl_change)(struct file_lock **, int);
61833 };
61834
61835 struct lock_manager {
61836@@ -1442,7 +1447,7 @@ struct fiemap_extent_info {
61837 unsigned int fi_flags; /* Flags as passed from user */
61838 unsigned int fi_extents_mapped; /* Number of mapped extents */
61839 unsigned int fi_extents_max; /* Size of fiemap_extent array */
61840- struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent
61841+ struct fiemap_extent __user *fi_extents_start; /* Start of fiemap_extent
61842 * array */
61843 };
61844 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
61845@@ -1512,7 +1517,8 @@ struct file_operations {
61846 ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
61847 ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
61848 int (*setlease)(struct file *, long, struct file_lock **);
61849-};
61850+} __do_const;
61851+typedef struct file_operations __no_const file_operations_no_const;
61852
61853 struct inode_operations {
61854 int (*create) (struct inode *,struct dentry *,int, struct nameidata *);
61855@@ -1559,30 +1565,30 @@ extern ssize_t vfs_writev(struct file *,
61856 unsigned long, loff_t *);
61857
61858 struct super_operations {
61859- struct inode *(*alloc_inode)(struct super_block *sb);
61860- void (*destroy_inode)(struct inode *);
61861+ struct inode *(* const alloc_inode)(struct super_block *sb);
61862+ void (* const destroy_inode)(struct inode *);
61863
61864- void (*dirty_inode) (struct inode *);
61865- int (*write_inode) (struct inode *, int);
61866- void (*drop_inode) (struct inode *);
61867- void (*delete_inode) (struct inode *);
61868- void (*put_super) (struct super_block *);
61869- void (*write_super) (struct super_block *);
61870- int (*sync_fs)(struct super_block *sb, int wait);
61871- int (*freeze_fs) (struct super_block *);
61872- int (*unfreeze_fs) (struct super_block *);
61873- int (*statfs) (struct dentry *, struct kstatfs *);
61874- int (*remount_fs) (struct super_block *, int *, char *);
61875- void (*clear_inode) (struct inode *);
61876- void (*umount_begin) (struct super_block *);
61877+ void (* const dirty_inode) (struct inode *);
61878+ int (* const write_inode) (struct inode *, int);
61879+ void (* const drop_inode) (struct inode *);
61880+ void (* const delete_inode) (struct inode *);
61881+ void (* const put_super) (struct super_block *);
61882+ void (* const write_super) (struct super_block *);
61883+ int (* const sync_fs)(struct super_block *sb, int wait);
61884+ int (* const freeze_fs) (struct super_block *);
61885+ int (* const unfreeze_fs) (struct super_block *);
61886+ int (* const statfs) (struct dentry *, struct kstatfs *);
61887+ int (* const remount_fs) (struct super_block *, int *, char *);
61888+ void (* const clear_inode) (struct inode *);
61889+ void (* const umount_begin) (struct super_block *);
61890
61891- int (*show_options)(struct seq_file *, struct vfsmount *);
61892- int (*show_stats)(struct seq_file *, struct vfsmount *);
61893+ int (* const show_options)(struct seq_file *, struct vfsmount *);
61894+ int (* const show_stats)(struct seq_file *, struct vfsmount *);
61895 #ifdef CONFIG_QUOTA
61896- ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
61897- ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
61898+ ssize_t (* const quota_read)(struct super_block *, int, char *, size_t, loff_t);
61899+ ssize_t (* const quota_write)(struct super_block *, int, const char *, size_t, loff_t);
61900 #endif
61901- int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
61902+ int (* const bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
61903 };
61904
61905 /*
61906diff -urNp linux-2.6.32.48/include/linux/fs_struct.h linux-2.6.32.48/include/linux/fs_struct.h
61907--- linux-2.6.32.48/include/linux/fs_struct.h 2011-11-08 19:02:43.000000000 -0500
61908+++ linux-2.6.32.48/include/linux/fs_struct.h 2011-11-15 19:59:43.000000000 -0500
61909@@ -4,7 +4,7 @@
61910 #include <linux/path.h>
61911
61912 struct fs_struct {
61913- int users;
61914+ atomic_t users;
61915 rwlock_t lock;
61916 int umask;
61917 int in_exec;
61918diff -urNp linux-2.6.32.48/include/linux/ftrace_event.h linux-2.6.32.48/include/linux/ftrace_event.h
61919--- linux-2.6.32.48/include/linux/ftrace_event.h 2011-11-08 19:02:43.000000000 -0500
61920+++ linux-2.6.32.48/include/linux/ftrace_event.h 2011-11-15 19:59:43.000000000 -0500
61921@@ -163,7 +163,7 @@ extern int trace_define_field(struct ftr
61922 int filter_type);
61923 extern int trace_define_common_fields(struct ftrace_event_call *call);
61924
61925-#define is_signed_type(type) (((type)(-1)) < 0)
61926+#define is_signed_type(type) (((type)(-1)) < (type)1)
61927
61928 int trace_set_clr_event(const char *system, const char *event, int set);
61929
61930diff -urNp linux-2.6.32.48/include/linux/genhd.h linux-2.6.32.48/include/linux/genhd.h
61931--- linux-2.6.32.48/include/linux/genhd.h 2011-11-08 19:02:43.000000000 -0500
61932+++ linux-2.6.32.48/include/linux/genhd.h 2011-11-15 19:59:43.000000000 -0500
61933@@ -161,7 +161,7 @@ struct gendisk {
61934
61935 struct timer_rand_state *random;
61936
61937- atomic_t sync_io; /* RAID */
61938+ atomic_unchecked_t sync_io; /* RAID */
61939 struct work_struct async_notify;
61940 #ifdef CONFIG_BLK_DEV_INTEGRITY
61941 struct blk_integrity *integrity;
61942diff -urNp linux-2.6.32.48/include/linux/gracl.h linux-2.6.32.48/include/linux/gracl.h
61943--- linux-2.6.32.48/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
61944+++ linux-2.6.32.48/include/linux/gracl.h 2011-11-15 19:59:43.000000000 -0500
61945@@ -0,0 +1,317 @@
61946+#ifndef GR_ACL_H
61947+#define GR_ACL_H
61948+
61949+#include <linux/grdefs.h>
61950+#include <linux/resource.h>
61951+#include <linux/capability.h>
61952+#include <linux/dcache.h>
61953+#include <asm/resource.h>
61954+
61955+/* Major status information */
61956+
61957+#define GR_VERSION "grsecurity 2.2.2"
61958+#define GRSECURITY_VERSION 0x2202
61959+
61960+enum {
61961+ GR_SHUTDOWN = 0,
61962+ GR_ENABLE = 1,
61963+ GR_SPROLE = 2,
61964+ GR_RELOAD = 3,
61965+ GR_SEGVMOD = 4,
61966+ GR_STATUS = 5,
61967+ GR_UNSPROLE = 6,
61968+ GR_PASSSET = 7,
61969+ GR_SPROLEPAM = 8,
61970+};
61971+
61972+/* Password setup definitions
61973+ * kernel/grhash.c */
61974+enum {
61975+ GR_PW_LEN = 128,
61976+ GR_SALT_LEN = 16,
61977+ GR_SHA_LEN = 32,
61978+};
61979+
61980+enum {
61981+ GR_SPROLE_LEN = 64,
61982+};
61983+
61984+enum {
61985+ GR_NO_GLOB = 0,
61986+ GR_REG_GLOB,
61987+ GR_CREATE_GLOB
61988+};
61989+
61990+#define GR_NLIMITS 32
61991+
61992+/* Begin Data Structures */
61993+
61994+struct sprole_pw {
61995+ unsigned char *rolename;
61996+ unsigned char salt[GR_SALT_LEN];
61997+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
61998+};
61999+
62000+struct name_entry {
62001+ __u32 key;
62002+ ino_t inode;
62003+ dev_t device;
62004+ char *name;
62005+ __u16 len;
62006+ __u8 deleted;
62007+ struct name_entry *prev;
62008+ struct name_entry *next;
62009+};
62010+
62011+struct inodev_entry {
62012+ struct name_entry *nentry;
62013+ struct inodev_entry *prev;
62014+ struct inodev_entry *next;
62015+};
62016+
62017+struct acl_role_db {
62018+ struct acl_role_label **r_hash;
62019+ __u32 r_size;
62020+};
62021+
62022+struct inodev_db {
62023+ struct inodev_entry **i_hash;
62024+ __u32 i_size;
62025+};
62026+
62027+struct name_db {
62028+ struct name_entry **n_hash;
62029+ __u32 n_size;
62030+};
62031+
62032+struct crash_uid {
62033+ uid_t uid;
62034+ unsigned long expires;
62035+};
62036+
62037+struct gr_hash_struct {
62038+ void **table;
62039+ void **nametable;
62040+ void *first;
62041+ __u32 table_size;
62042+ __u32 used_size;
62043+ int type;
62044+};
62045+
62046+/* Userspace Grsecurity ACL data structures */
62047+
62048+struct acl_subject_label {
62049+ char *filename;
62050+ ino_t inode;
62051+ dev_t device;
62052+ __u32 mode;
62053+ kernel_cap_t cap_mask;
62054+ kernel_cap_t cap_lower;
62055+ kernel_cap_t cap_invert_audit;
62056+
62057+ struct rlimit res[GR_NLIMITS];
62058+ __u32 resmask;
62059+
62060+ __u8 user_trans_type;
62061+ __u8 group_trans_type;
62062+ uid_t *user_transitions;
62063+ gid_t *group_transitions;
62064+ __u16 user_trans_num;
62065+ __u16 group_trans_num;
62066+
62067+ __u32 sock_families[2];
62068+ __u32 ip_proto[8];
62069+ __u32 ip_type;
62070+ struct acl_ip_label **ips;
62071+ __u32 ip_num;
62072+ __u32 inaddr_any_override;
62073+
62074+ __u32 crashes;
62075+ unsigned long expires;
62076+
62077+ struct acl_subject_label *parent_subject;
62078+ struct gr_hash_struct *hash;
62079+ struct acl_subject_label *prev;
62080+ struct acl_subject_label *next;
62081+
62082+ struct acl_object_label **obj_hash;
62083+ __u32 obj_hash_size;
62084+ __u16 pax_flags;
62085+};
62086+
62087+struct role_allowed_ip {
62088+ __u32 addr;
62089+ __u32 netmask;
62090+
62091+ struct role_allowed_ip *prev;
62092+ struct role_allowed_ip *next;
62093+};
62094+
62095+struct role_transition {
62096+ char *rolename;
62097+
62098+ struct role_transition *prev;
62099+ struct role_transition *next;
62100+};
62101+
62102+struct acl_role_label {
62103+ char *rolename;
62104+ uid_t uidgid;
62105+ __u16 roletype;
62106+
62107+ __u16 auth_attempts;
62108+ unsigned long expires;
62109+
62110+ struct acl_subject_label *root_label;
62111+ struct gr_hash_struct *hash;
62112+
62113+ struct acl_role_label *prev;
62114+ struct acl_role_label *next;
62115+
62116+ struct role_transition *transitions;
62117+ struct role_allowed_ip *allowed_ips;
62118+ uid_t *domain_children;
62119+ __u16 domain_child_num;
62120+
62121+ struct acl_subject_label **subj_hash;
62122+ __u32 subj_hash_size;
62123+};
62124+
62125+struct user_acl_role_db {
62126+ struct acl_role_label **r_table;
62127+ __u32 num_pointers; /* Number of allocations to track */
62128+ __u32 num_roles; /* Number of roles */
62129+ __u32 num_domain_children; /* Number of domain children */
62130+ __u32 num_subjects; /* Number of subjects */
62131+ __u32 num_objects; /* Number of objects */
62132+};
62133+
62134+struct acl_object_label {
62135+ char *filename;
62136+ ino_t inode;
62137+ dev_t device;
62138+ __u32 mode;
62139+
62140+ struct acl_subject_label *nested;
62141+ struct acl_object_label *globbed;
62142+
62143+ /* next two structures not used */
62144+
62145+ struct acl_object_label *prev;
62146+ struct acl_object_label *next;
62147+};
62148+
62149+struct acl_ip_label {
62150+ char *iface;
62151+ __u32 addr;
62152+ __u32 netmask;
62153+ __u16 low, high;
62154+ __u8 mode;
62155+ __u32 type;
62156+ __u32 proto[8];
62157+
62158+ /* next two structures not used */
62159+
62160+ struct acl_ip_label *prev;
62161+ struct acl_ip_label *next;
62162+};
62163+
62164+struct gr_arg {
62165+ struct user_acl_role_db role_db;
62166+ unsigned char pw[GR_PW_LEN];
62167+ unsigned char salt[GR_SALT_LEN];
62168+ unsigned char sum[GR_SHA_LEN];
62169+ unsigned char sp_role[GR_SPROLE_LEN];
62170+ struct sprole_pw *sprole_pws;
62171+ dev_t segv_device;
62172+ ino_t segv_inode;
62173+ uid_t segv_uid;
62174+ __u16 num_sprole_pws;
62175+ __u16 mode;
62176+};
62177+
62178+struct gr_arg_wrapper {
62179+ struct gr_arg *arg;
62180+ __u32 version;
62181+ __u32 size;
62182+};
62183+
62184+struct subject_map {
62185+ struct acl_subject_label *user;
62186+ struct acl_subject_label *kernel;
62187+ struct subject_map *prev;
62188+ struct subject_map *next;
62189+};
62190+
62191+struct acl_subj_map_db {
62192+ struct subject_map **s_hash;
62193+ __u32 s_size;
62194+};
62195+
62196+/* End Data Structures Section */
62197+
62198+/* Hash functions generated by empirical testing by Brad Spengler
62199+ Makes good use of the low bits of the inode. Generally 0-1 times
62200+ in loop for successful match. 0-3 for unsuccessful match.
62201+ Shift/add algorithm with modulus of table size and an XOR*/
62202+
62203+static __inline__ unsigned int
62204+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
62205+{
62206+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
62207+}
62208+
62209+ static __inline__ unsigned int
62210+shash(const struct acl_subject_label *userp, const unsigned int sz)
62211+{
62212+ return ((const unsigned long)userp % sz);
62213+}
62214+
62215+static __inline__ unsigned int
62216+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
62217+{
62218+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
62219+}
62220+
62221+static __inline__ unsigned int
62222+nhash(const char *name, const __u16 len, const unsigned int sz)
62223+{
62224+ return full_name_hash((const unsigned char *)name, len) % sz;
62225+}
62226+
62227+#define FOR_EACH_ROLE_START(role) \
62228+ role = role_list; \
62229+ while (role) {
62230+
62231+#define FOR_EACH_ROLE_END(role) \
62232+ role = role->prev; \
62233+ }
62234+
62235+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
62236+ subj = NULL; \
62237+ iter = 0; \
62238+ while (iter < role->subj_hash_size) { \
62239+ if (subj == NULL) \
62240+ subj = role->subj_hash[iter]; \
62241+ if (subj == NULL) { \
62242+ iter++; \
62243+ continue; \
62244+ }
62245+
62246+#define FOR_EACH_SUBJECT_END(subj,iter) \
62247+ subj = subj->next; \
62248+ if (subj == NULL) \
62249+ iter++; \
62250+ }
62251+
62252+
62253+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
62254+ subj = role->hash->first; \
62255+ while (subj != NULL) {
62256+
62257+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
62258+ subj = subj->next; \
62259+ }
62260+
62261+#endif
62262+
62263diff -urNp linux-2.6.32.48/include/linux/gralloc.h linux-2.6.32.48/include/linux/gralloc.h
62264--- linux-2.6.32.48/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
62265+++ linux-2.6.32.48/include/linux/gralloc.h 2011-11-15 19:59:43.000000000 -0500
62266@@ -0,0 +1,9 @@
62267+#ifndef __GRALLOC_H
62268+#define __GRALLOC_H
62269+
62270+void acl_free_all(void);
62271+int acl_alloc_stack_init(unsigned long size);
62272+void *acl_alloc(unsigned long len);
62273+void *acl_alloc_num(unsigned long num, unsigned long len);
62274+
62275+#endif
62276diff -urNp linux-2.6.32.48/include/linux/grdefs.h linux-2.6.32.48/include/linux/grdefs.h
62277--- linux-2.6.32.48/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
62278+++ linux-2.6.32.48/include/linux/grdefs.h 2011-11-15 19:59:43.000000000 -0500
62279@@ -0,0 +1,140 @@
62280+#ifndef GRDEFS_H
62281+#define GRDEFS_H
62282+
62283+/* Begin grsecurity status declarations */
62284+
62285+enum {
62286+ GR_READY = 0x01,
62287+ GR_STATUS_INIT = 0x00 // disabled state
62288+};
62289+
62290+/* Begin ACL declarations */
62291+
62292+/* Role flags */
62293+
62294+enum {
62295+ GR_ROLE_USER = 0x0001,
62296+ GR_ROLE_GROUP = 0x0002,
62297+ GR_ROLE_DEFAULT = 0x0004,
62298+ GR_ROLE_SPECIAL = 0x0008,
62299+ GR_ROLE_AUTH = 0x0010,
62300+ GR_ROLE_NOPW = 0x0020,
62301+ GR_ROLE_GOD = 0x0040,
62302+ GR_ROLE_LEARN = 0x0080,
62303+ GR_ROLE_TPE = 0x0100,
62304+ GR_ROLE_DOMAIN = 0x0200,
62305+ GR_ROLE_PAM = 0x0400,
62306+ GR_ROLE_PERSIST = 0x800
62307+};
62308+
62309+/* ACL Subject and Object mode flags */
62310+enum {
62311+ GR_DELETED = 0x80000000
62312+};
62313+
62314+/* ACL Object-only mode flags */
62315+enum {
62316+ GR_READ = 0x00000001,
62317+ GR_APPEND = 0x00000002,
62318+ GR_WRITE = 0x00000004,
62319+ GR_EXEC = 0x00000008,
62320+ GR_FIND = 0x00000010,
62321+ GR_INHERIT = 0x00000020,
62322+ GR_SETID = 0x00000040,
62323+ GR_CREATE = 0x00000080,
62324+ GR_DELETE = 0x00000100,
62325+ GR_LINK = 0x00000200,
62326+ GR_AUDIT_READ = 0x00000400,
62327+ GR_AUDIT_APPEND = 0x00000800,
62328+ GR_AUDIT_WRITE = 0x00001000,
62329+ GR_AUDIT_EXEC = 0x00002000,
62330+ GR_AUDIT_FIND = 0x00004000,
62331+ GR_AUDIT_INHERIT= 0x00008000,
62332+ GR_AUDIT_SETID = 0x00010000,
62333+ GR_AUDIT_CREATE = 0x00020000,
62334+ GR_AUDIT_DELETE = 0x00040000,
62335+ GR_AUDIT_LINK = 0x00080000,
62336+ GR_PTRACERD = 0x00100000,
62337+ GR_NOPTRACE = 0x00200000,
62338+ GR_SUPPRESS = 0x00400000,
62339+ GR_NOLEARN = 0x00800000,
62340+ GR_INIT_TRANSFER= 0x01000000
62341+};
62342+
62343+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
62344+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
62345+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
62346+
62347+/* ACL subject-only mode flags */
62348+enum {
62349+ GR_KILL = 0x00000001,
62350+ GR_VIEW = 0x00000002,
62351+ GR_PROTECTED = 0x00000004,
62352+ GR_LEARN = 0x00000008,
62353+ GR_OVERRIDE = 0x00000010,
62354+ /* just a placeholder, this mode is only used in userspace */
62355+ GR_DUMMY = 0x00000020,
62356+ GR_PROTSHM = 0x00000040,
62357+ GR_KILLPROC = 0x00000080,
62358+ GR_KILLIPPROC = 0x00000100,
62359+ /* just a placeholder, this mode is only used in userspace */
62360+ GR_NOTROJAN = 0x00000200,
62361+ GR_PROTPROCFD = 0x00000400,
62362+ GR_PROCACCT = 0x00000800,
62363+ GR_RELAXPTRACE = 0x00001000,
62364+ GR_NESTED = 0x00002000,
62365+ GR_INHERITLEARN = 0x00004000,
62366+ GR_PROCFIND = 0x00008000,
62367+ GR_POVERRIDE = 0x00010000,
62368+ GR_KERNELAUTH = 0x00020000,
62369+ GR_ATSECURE = 0x00040000,
62370+ GR_SHMEXEC = 0x00080000
62371+};
62372+
62373+enum {
62374+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
62375+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
62376+ GR_PAX_ENABLE_MPROTECT = 0x0004,
62377+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
62378+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
62379+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
62380+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
62381+ GR_PAX_DISABLE_MPROTECT = 0x0400,
62382+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
62383+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
62384+};
62385+
62386+enum {
62387+ GR_ID_USER = 0x01,
62388+ GR_ID_GROUP = 0x02,
62389+};
62390+
62391+enum {
62392+ GR_ID_ALLOW = 0x01,
62393+ GR_ID_DENY = 0x02,
62394+};
62395+
62396+#define GR_CRASH_RES 31
62397+#define GR_UIDTABLE_MAX 500
62398+
62399+/* begin resource learning section */
62400+enum {
62401+ GR_RLIM_CPU_BUMP = 60,
62402+ GR_RLIM_FSIZE_BUMP = 50000,
62403+ GR_RLIM_DATA_BUMP = 10000,
62404+ GR_RLIM_STACK_BUMP = 1000,
62405+ GR_RLIM_CORE_BUMP = 10000,
62406+ GR_RLIM_RSS_BUMP = 500000,
62407+ GR_RLIM_NPROC_BUMP = 1,
62408+ GR_RLIM_NOFILE_BUMP = 5,
62409+ GR_RLIM_MEMLOCK_BUMP = 50000,
62410+ GR_RLIM_AS_BUMP = 500000,
62411+ GR_RLIM_LOCKS_BUMP = 2,
62412+ GR_RLIM_SIGPENDING_BUMP = 5,
62413+ GR_RLIM_MSGQUEUE_BUMP = 10000,
62414+ GR_RLIM_NICE_BUMP = 1,
62415+ GR_RLIM_RTPRIO_BUMP = 1,
62416+ GR_RLIM_RTTIME_BUMP = 1000000
62417+};
62418+
62419+#endif
62420diff -urNp linux-2.6.32.48/include/linux/grinternal.h linux-2.6.32.48/include/linux/grinternal.h
62421--- linux-2.6.32.48/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
62422+++ linux-2.6.32.48/include/linux/grinternal.h 2011-11-15 19:59:43.000000000 -0500
62423@@ -0,0 +1,218 @@
62424+#ifndef __GRINTERNAL_H
62425+#define __GRINTERNAL_H
62426+
62427+#ifdef CONFIG_GRKERNSEC
62428+
62429+#include <linux/fs.h>
62430+#include <linux/mnt_namespace.h>
62431+#include <linux/nsproxy.h>
62432+#include <linux/gracl.h>
62433+#include <linux/grdefs.h>
62434+#include <linux/grmsg.h>
62435+
62436+void gr_add_learn_entry(const char *fmt, ...)
62437+ __attribute__ ((format (printf, 1, 2)));
62438+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
62439+ const struct vfsmount *mnt);
62440+__u32 gr_check_create(const struct dentry *new_dentry,
62441+ const struct dentry *parent,
62442+ const struct vfsmount *mnt, const __u32 mode);
62443+int gr_check_protected_task(const struct task_struct *task);
62444+__u32 to_gr_audit(const __u32 reqmode);
62445+int gr_set_acls(const int type);
62446+int gr_apply_subject_to_task(struct task_struct *task);
62447+int gr_acl_is_enabled(void);
62448+char gr_roletype_to_char(void);
62449+
62450+void gr_handle_alertkill(struct task_struct *task);
62451+char *gr_to_filename(const struct dentry *dentry,
62452+ const struct vfsmount *mnt);
62453+char *gr_to_filename1(const struct dentry *dentry,
62454+ const struct vfsmount *mnt);
62455+char *gr_to_filename2(const struct dentry *dentry,
62456+ const struct vfsmount *mnt);
62457+char *gr_to_filename3(const struct dentry *dentry,
62458+ const struct vfsmount *mnt);
62459+
62460+extern int grsec_enable_harden_ptrace;
62461+extern int grsec_enable_link;
62462+extern int grsec_enable_fifo;
62463+extern int grsec_enable_shm;
62464+extern int grsec_enable_execlog;
62465+extern int grsec_enable_signal;
62466+extern int grsec_enable_audit_ptrace;
62467+extern int grsec_enable_forkfail;
62468+extern int grsec_enable_time;
62469+extern int grsec_enable_rofs;
62470+extern int grsec_enable_chroot_shmat;
62471+extern int grsec_enable_chroot_mount;
62472+extern int grsec_enable_chroot_double;
62473+extern int grsec_enable_chroot_pivot;
62474+extern int grsec_enable_chroot_chdir;
62475+extern int grsec_enable_chroot_chmod;
62476+extern int grsec_enable_chroot_mknod;
62477+extern int grsec_enable_chroot_fchdir;
62478+extern int grsec_enable_chroot_nice;
62479+extern int grsec_enable_chroot_execlog;
62480+extern int grsec_enable_chroot_caps;
62481+extern int grsec_enable_chroot_sysctl;
62482+extern int grsec_enable_chroot_unix;
62483+extern int grsec_enable_tpe;
62484+extern int grsec_tpe_gid;
62485+extern int grsec_enable_tpe_all;
62486+extern int grsec_enable_tpe_invert;
62487+extern int grsec_enable_socket_all;
62488+extern int grsec_socket_all_gid;
62489+extern int grsec_enable_socket_client;
62490+extern int grsec_socket_client_gid;
62491+extern int grsec_enable_socket_server;
62492+extern int grsec_socket_server_gid;
62493+extern int grsec_audit_gid;
62494+extern int grsec_enable_group;
62495+extern int grsec_enable_audit_textrel;
62496+extern int grsec_enable_log_rwxmaps;
62497+extern int grsec_enable_mount;
62498+extern int grsec_enable_chdir;
62499+extern int grsec_resource_logging;
62500+extern int grsec_enable_blackhole;
62501+extern int grsec_lastack_retries;
62502+extern int grsec_enable_brute;
62503+extern int grsec_lock;
62504+
62505+extern spinlock_t grsec_alert_lock;
62506+extern unsigned long grsec_alert_wtime;
62507+extern unsigned long grsec_alert_fyet;
62508+
62509+extern spinlock_t grsec_audit_lock;
62510+
62511+extern rwlock_t grsec_exec_file_lock;
62512+
62513+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
62514+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
62515+ (tsk)->exec_file->f_vfsmnt) : "/")
62516+
62517+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
62518+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
62519+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
62520+
62521+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
62522+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
62523+ (tsk)->exec_file->f_vfsmnt) : "/")
62524+
62525+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
62526+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
62527+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
62528+
62529+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
62530+
62531+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
62532+
62533+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
62534+ (task)->pid, (cred)->uid, \
62535+ (cred)->euid, (cred)->gid, (cred)->egid, \
62536+ gr_parent_task_fullpath(task), \
62537+ (task)->real_parent->comm, (task)->real_parent->pid, \
62538+ (pcred)->uid, (pcred)->euid, \
62539+ (pcred)->gid, (pcred)->egid
62540+
62541+#define GR_CHROOT_CAPS {{ \
62542+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
62543+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
62544+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
62545+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
62546+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
62547+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
62548+ CAP_TO_MASK(CAP_MAC_ADMIN) }}
62549+
62550+#define security_learn(normal_msg,args...) \
62551+({ \
62552+ read_lock(&grsec_exec_file_lock); \
62553+ gr_add_learn_entry(normal_msg "\n", ## args); \
62554+ read_unlock(&grsec_exec_file_lock); \
62555+})
62556+
62557+enum {
62558+ GR_DO_AUDIT,
62559+ GR_DONT_AUDIT,
62560+ GR_DONT_AUDIT_GOOD
62561+};
62562+
62563+enum {
62564+ GR_TTYSNIFF,
62565+ GR_RBAC,
62566+ GR_RBAC_STR,
62567+ GR_STR_RBAC,
62568+ GR_RBAC_MODE2,
62569+ GR_RBAC_MODE3,
62570+ GR_FILENAME,
62571+ GR_SYSCTL_HIDDEN,
62572+ GR_NOARGS,
62573+ GR_ONE_INT,
62574+ GR_ONE_INT_TWO_STR,
62575+ GR_ONE_STR,
62576+ GR_STR_INT,
62577+ GR_TWO_STR_INT,
62578+ GR_TWO_INT,
62579+ GR_TWO_U64,
62580+ GR_THREE_INT,
62581+ GR_FIVE_INT_TWO_STR,
62582+ GR_TWO_STR,
62583+ GR_THREE_STR,
62584+ GR_FOUR_STR,
62585+ GR_STR_FILENAME,
62586+ GR_FILENAME_STR,
62587+ GR_FILENAME_TWO_INT,
62588+ GR_FILENAME_TWO_INT_STR,
62589+ GR_TEXTREL,
62590+ GR_PTRACE,
62591+ GR_RESOURCE,
62592+ GR_CAP,
62593+ GR_SIG,
62594+ GR_SIG2,
62595+ GR_CRASH1,
62596+ GR_CRASH2,
62597+ GR_PSACCT,
62598+ GR_RWXMAP
62599+};
62600+
62601+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
62602+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
62603+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
62604+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
62605+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
62606+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
62607+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
62608+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
62609+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
62610+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
62611+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
62612+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
62613+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
62614+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
62615+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
62616+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
62617+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
62618+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
62619+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
62620+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
62621+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
62622+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
62623+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
62624+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
62625+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
62626+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
62627+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
62628+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
62629+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
62630+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
62631+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
62632+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
62633+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
62634+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
62635+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
62636+
62637+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
62638+
62639+#endif
62640+
62641+#endif
62642diff -urNp linux-2.6.32.48/include/linux/grmsg.h linux-2.6.32.48/include/linux/grmsg.h
62643--- linux-2.6.32.48/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
62644+++ linux-2.6.32.48/include/linux/grmsg.h 2011-11-15 19:59:43.000000000 -0500
62645@@ -0,0 +1,108 @@
62646+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
62647+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
62648+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
62649+#define GR_STOPMOD_MSG "denied modification of module state by "
62650+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
62651+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
62652+#define GR_IOPERM_MSG "denied use of ioperm() by "
62653+#define GR_IOPL_MSG "denied use of iopl() by "
62654+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
62655+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
62656+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
62657+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
62658+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
62659+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
62660+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
62661+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
62662+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
62663+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
62664+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
62665+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
62666+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
62667+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
62668+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
62669+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
62670+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
62671+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
62672+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
62673+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
62674+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
62675+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
62676+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
62677+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
62678+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
62679+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
62680+#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
62681+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
62682+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
62683+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
62684+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
62685+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
62686+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
62687+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
62688+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
62689+#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
62690+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
62691+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
62692+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
62693+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
62694+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
62695+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
62696+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
62697+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
62698+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
62699+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
62700+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
62701+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
62702+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
62703+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
62704+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
62705+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
62706+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
62707+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
62708+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
62709+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
62710+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
62711+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
62712+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
62713+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
62714+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
62715+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
62716+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
62717+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
62718+#define GR_FAILFORK_MSG "failed fork with errno %s by "
62719+#define GR_NICE_CHROOT_MSG "denied priority change by "
62720+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
62721+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
62722+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
62723+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
62724+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
62725+#define GR_TIME_MSG "time set by "
62726+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
62727+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
62728+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
62729+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
62730+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
62731+#define GR_BIND_MSG "denied bind() by "
62732+#define GR_CONNECT_MSG "denied connect() by "
62733+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
62734+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
62735+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
62736+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
62737+#define GR_CAP_ACL_MSG "use of %s denied for "
62738+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
62739+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
62740+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
62741+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
62742+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
62743+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
62744+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
62745+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
62746+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
62747+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
62748+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
62749+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
62750+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
62751+#define GR_VM86_MSG "denied use of vm86 by "
62752+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
62753+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
62754diff -urNp linux-2.6.32.48/include/linux/grsecurity.h linux-2.6.32.48/include/linux/grsecurity.h
62755--- linux-2.6.32.48/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
62756+++ linux-2.6.32.48/include/linux/grsecurity.h 2011-11-15 19:59:43.000000000 -0500
62757@@ -0,0 +1,218 @@
62758+#ifndef GR_SECURITY_H
62759+#define GR_SECURITY_H
62760+#include <linux/fs.h>
62761+#include <linux/fs_struct.h>
62762+#include <linux/binfmts.h>
62763+#include <linux/gracl.h>
62764+#include <linux/compat.h>
62765+
62766+/* notify of brain-dead configs */
62767+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62768+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
62769+#endif
62770+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
62771+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
62772+#endif
62773+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
62774+#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
62775+#endif
62776+#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
62777+#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
62778+#endif
62779+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
62780+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
62781+#endif
62782+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
62783+#error "CONFIG_PAX enabled, but no PaX options are enabled."
62784+#endif
62785+
62786+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
62787+void gr_handle_brute_check(void);
62788+void gr_handle_kernel_exploit(void);
62789+int gr_process_user_ban(void);
62790+
62791+char gr_roletype_to_char(void);
62792+
62793+int gr_acl_enable_at_secure(void);
62794+
62795+int gr_check_user_change(int real, int effective, int fs);
62796+int gr_check_group_change(int real, int effective, int fs);
62797+
62798+void gr_del_task_from_ip_table(struct task_struct *p);
62799+
62800+int gr_pid_is_chrooted(struct task_struct *p);
62801+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
62802+int gr_handle_chroot_nice(void);
62803+int gr_handle_chroot_sysctl(const int op);
62804+int gr_handle_chroot_setpriority(struct task_struct *p,
62805+ const int niceval);
62806+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
62807+int gr_handle_chroot_chroot(const struct dentry *dentry,
62808+ const struct vfsmount *mnt);
62809+void gr_handle_chroot_chdir(struct path *path);
62810+int gr_handle_chroot_chmod(const struct dentry *dentry,
62811+ const struct vfsmount *mnt, const int mode);
62812+int gr_handle_chroot_mknod(const struct dentry *dentry,
62813+ const struct vfsmount *mnt, const int mode);
62814+int gr_handle_chroot_mount(const struct dentry *dentry,
62815+ const struct vfsmount *mnt,
62816+ const char *dev_name);
62817+int gr_handle_chroot_pivot(void);
62818+int gr_handle_chroot_unix(const pid_t pid);
62819+
62820+int gr_handle_rawio(const struct inode *inode);
62821+
62822+void gr_handle_ioperm(void);
62823+void gr_handle_iopl(void);
62824+
62825+int gr_tpe_allow(const struct file *file);
62826+
62827+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
62828+void gr_clear_chroot_entries(struct task_struct *task);
62829+
62830+void gr_log_forkfail(const int retval);
62831+void gr_log_timechange(void);
62832+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
62833+void gr_log_chdir(const struct dentry *dentry,
62834+ const struct vfsmount *mnt);
62835+void gr_log_chroot_exec(const struct dentry *dentry,
62836+ const struct vfsmount *mnt);
62837+void gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv);
62838+#ifdef CONFIG_COMPAT
62839+void gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv);
62840+#endif
62841+void gr_log_remount(const char *devname, const int retval);
62842+void gr_log_unmount(const char *devname, const int retval);
62843+void gr_log_mount(const char *from, const char *to, const int retval);
62844+void gr_log_textrel(struct vm_area_struct *vma);
62845+void gr_log_rwxmmap(struct file *file);
62846+void gr_log_rwxmprotect(struct file *file);
62847+
62848+int gr_handle_follow_link(const struct inode *parent,
62849+ const struct inode *inode,
62850+ const struct dentry *dentry,
62851+ const struct vfsmount *mnt);
62852+int gr_handle_fifo(const struct dentry *dentry,
62853+ const struct vfsmount *mnt,
62854+ const struct dentry *dir, const int flag,
62855+ const int acc_mode);
62856+int gr_handle_hardlink(const struct dentry *dentry,
62857+ const struct vfsmount *mnt,
62858+ struct inode *inode,
62859+ const int mode, const char *to);
62860+
62861+int gr_is_capable(const int cap);
62862+int gr_is_capable_nolog(const int cap);
62863+void gr_learn_resource(const struct task_struct *task, const int limit,
62864+ const unsigned long wanted, const int gt);
62865+void gr_copy_label(struct task_struct *tsk);
62866+void gr_handle_crash(struct task_struct *task, const int sig);
62867+int gr_handle_signal(const struct task_struct *p, const int sig);
62868+int gr_check_crash_uid(const uid_t uid);
62869+int gr_check_protected_task(const struct task_struct *task);
62870+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
62871+int gr_acl_handle_mmap(const struct file *file,
62872+ const unsigned long prot);
62873+int gr_acl_handle_mprotect(const struct file *file,
62874+ const unsigned long prot);
62875+int gr_check_hidden_task(const struct task_struct *tsk);
62876+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
62877+ const struct vfsmount *mnt);
62878+__u32 gr_acl_handle_utime(const struct dentry *dentry,
62879+ const struct vfsmount *mnt);
62880+__u32 gr_acl_handle_access(const struct dentry *dentry,
62881+ const struct vfsmount *mnt, const int fmode);
62882+__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
62883+ const struct vfsmount *mnt, mode_t mode);
62884+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
62885+ const struct vfsmount *mnt, mode_t mode);
62886+__u32 gr_acl_handle_chown(const struct dentry *dentry,
62887+ const struct vfsmount *mnt);
62888+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
62889+ const struct vfsmount *mnt);
62890+int gr_handle_ptrace(struct task_struct *task, const long request);
62891+int gr_handle_proc_ptrace(struct task_struct *task);
62892+__u32 gr_acl_handle_execve(const struct dentry *dentry,
62893+ const struct vfsmount *mnt);
62894+int gr_check_crash_exec(const struct file *filp);
62895+int gr_acl_is_enabled(void);
62896+void gr_set_kernel_label(struct task_struct *task);
62897+void gr_set_role_label(struct task_struct *task, const uid_t uid,
62898+ const gid_t gid);
62899+int gr_set_proc_label(const struct dentry *dentry,
62900+ const struct vfsmount *mnt,
62901+ const int unsafe_share);
62902+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
62903+ const struct vfsmount *mnt);
62904+__u32 gr_acl_handle_open(const struct dentry *dentry,
62905+ const struct vfsmount *mnt, const int fmode);
62906+__u32 gr_acl_handle_creat(const struct dentry *dentry,
62907+ const struct dentry *p_dentry,
62908+ const struct vfsmount *p_mnt, const int fmode,
62909+ const int imode);
62910+void gr_handle_create(const struct dentry *dentry,
62911+ const struct vfsmount *mnt);
62912+void gr_handle_proc_create(const struct dentry *dentry,
62913+ const struct inode *inode);
62914+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
62915+ const struct dentry *parent_dentry,
62916+ const struct vfsmount *parent_mnt,
62917+ const int mode);
62918+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
62919+ const struct dentry *parent_dentry,
62920+ const struct vfsmount *parent_mnt);
62921+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
62922+ const struct vfsmount *mnt);
62923+void gr_handle_delete(const ino_t ino, const dev_t dev);
62924+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
62925+ const struct vfsmount *mnt);
62926+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
62927+ const struct dentry *parent_dentry,
62928+ const struct vfsmount *parent_mnt,
62929+ const char *from);
62930+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
62931+ const struct dentry *parent_dentry,
62932+ const struct vfsmount *parent_mnt,
62933+ const struct dentry *old_dentry,
62934+ const struct vfsmount *old_mnt, const char *to);
62935+int gr_acl_handle_rename(struct dentry *new_dentry,
62936+ struct dentry *parent_dentry,
62937+ const struct vfsmount *parent_mnt,
62938+ struct dentry *old_dentry,
62939+ struct inode *old_parent_inode,
62940+ struct vfsmount *old_mnt, const char *newname);
62941+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
62942+ struct dentry *old_dentry,
62943+ struct dentry *new_dentry,
62944+ struct vfsmount *mnt, const __u8 replace);
62945+__u32 gr_check_link(const struct dentry *new_dentry,
62946+ const struct dentry *parent_dentry,
62947+ const struct vfsmount *parent_mnt,
62948+ const struct dentry *old_dentry,
62949+ const struct vfsmount *old_mnt);
62950+int gr_acl_handle_filldir(const struct file *file, const char *name,
62951+ const unsigned int namelen, const ino_t ino);
62952+
62953+__u32 gr_acl_handle_unix(const struct dentry *dentry,
62954+ const struct vfsmount *mnt);
62955+void gr_acl_handle_exit(void);
62956+void gr_acl_handle_psacct(struct task_struct *task, const long code);
62957+int gr_acl_handle_procpidmem(const struct task_struct *task);
62958+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
62959+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
62960+void gr_audit_ptrace(struct task_struct *task);
62961+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
62962+
62963+#ifdef CONFIG_GRKERNSEC
62964+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
62965+void gr_handle_vm86(void);
62966+void gr_handle_mem_readwrite(u64 from, u64 to);
62967+
62968+extern int grsec_enable_dmesg;
62969+extern int grsec_disable_privio;
62970+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
62971+extern int grsec_enable_chroot_findtask;
62972+#endif
62973+#endif
62974+
62975+#endif
62976diff -urNp linux-2.6.32.48/include/linux/hdpu_features.h linux-2.6.32.48/include/linux/hdpu_features.h
62977--- linux-2.6.32.48/include/linux/hdpu_features.h 2011-11-08 19:02:43.000000000 -0500
62978+++ linux-2.6.32.48/include/linux/hdpu_features.h 2011-11-15 19:59:43.000000000 -0500
62979@@ -3,7 +3,7 @@
62980 struct cpustate_t {
62981 spinlock_t lock;
62982 int excl;
62983- int open_count;
62984+ atomic_t open_count;
62985 unsigned char cached_val;
62986 int inited;
62987 unsigned long *set_addr;
62988diff -urNp linux-2.6.32.48/include/linux/highmem.h linux-2.6.32.48/include/linux/highmem.h
62989--- linux-2.6.32.48/include/linux/highmem.h 2011-11-08 19:02:43.000000000 -0500
62990+++ linux-2.6.32.48/include/linux/highmem.h 2011-11-15 19:59:43.000000000 -0500
62991@@ -137,6 +137,18 @@ static inline void clear_highpage(struct
62992 kunmap_atomic(kaddr, KM_USER0);
62993 }
62994
62995+static inline void sanitize_highpage(struct page *page)
62996+{
62997+ void *kaddr;
62998+ unsigned long flags;
62999+
63000+ local_irq_save(flags);
63001+ kaddr = kmap_atomic(page, KM_CLEARPAGE);
63002+ clear_page(kaddr);
63003+ kunmap_atomic(kaddr, KM_CLEARPAGE);
63004+ local_irq_restore(flags);
63005+}
63006+
63007 static inline void zero_user_segments(struct page *page,
63008 unsigned start1, unsigned end1,
63009 unsigned start2, unsigned end2)
63010diff -urNp linux-2.6.32.48/include/linux/i2c.h linux-2.6.32.48/include/linux/i2c.h
63011--- linux-2.6.32.48/include/linux/i2c.h 2011-11-08 19:02:43.000000000 -0500
63012+++ linux-2.6.32.48/include/linux/i2c.h 2011-11-15 19:59:43.000000000 -0500
63013@@ -325,6 +325,7 @@ struct i2c_algorithm {
63014 /* To determine what the adapter supports */
63015 u32 (*functionality) (struct i2c_adapter *);
63016 };
63017+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
63018
63019 /*
63020 * i2c_adapter is the structure used to identify a physical i2c bus along
63021diff -urNp linux-2.6.32.48/include/linux/i2o.h linux-2.6.32.48/include/linux/i2o.h
63022--- linux-2.6.32.48/include/linux/i2o.h 2011-11-08 19:02:43.000000000 -0500
63023+++ linux-2.6.32.48/include/linux/i2o.h 2011-11-15 19:59:43.000000000 -0500
63024@@ -564,7 +564,7 @@ struct i2o_controller {
63025 struct i2o_device *exec; /* Executive */
63026 #if BITS_PER_LONG == 64
63027 spinlock_t context_list_lock; /* lock for context_list */
63028- atomic_t context_list_counter; /* needed for unique contexts */
63029+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
63030 struct list_head context_list; /* list of context id's
63031 and pointers */
63032 #endif
63033diff -urNp linux-2.6.32.48/include/linux/init_task.h linux-2.6.32.48/include/linux/init_task.h
63034--- linux-2.6.32.48/include/linux/init_task.h 2011-11-08 19:02:43.000000000 -0500
63035+++ linux-2.6.32.48/include/linux/init_task.h 2011-11-15 19:59:43.000000000 -0500
63036@@ -83,6 +83,12 @@ extern struct group_info init_groups;
63037 #define INIT_IDS
63038 #endif
63039
63040+#ifdef CONFIG_X86
63041+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
63042+#else
63043+#define INIT_TASK_THREAD_INFO
63044+#endif
63045+
63046 #ifdef CONFIG_SECURITY_FILE_CAPABILITIES
63047 /*
63048 * Because of the reduced scope of CAP_SETPCAP when filesystem
63049@@ -156,6 +162,7 @@ extern struct cred init_cred;
63050 __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \
63051 .comm = "swapper", \
63052 .thread = INIT_THREAD, \
63053+ INIT_TASK_THREAD_INFO \
63054 .fs = &init_fs, \
63055 .files = &init_files, \
63056 .signal = &init_signals, \
63057diff -urNp linux-2.6.32.48/include/linux/intel-iommu.h linux-2.6.32.48/include/linux/intel-iommu.h
63058--- linux-2.6.32.48/include/linux/intel-iommu.h 2011-11-08 19:02:43.000000000 -0500
63059+++ linux-2.6.32.48/include/linux/intel-iommu.h 2011-11-15 19:59:43.000000000 -0500
63060@@ -296,7 +296,7 @@ struct iommu_flush {
63061 u8 fm, u64 type);
63062 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
63063 unsigned int size_order, u64 type);
63064-};
63065+} __no_const;
63066
63067 enum {
63068 SR_DMAR_FECTL_REG,
63069diff -urNp linux-2.6.32.48/include/linux/interrupt.h linux-2.6.32.48/include/linux/interrupt.h
63070--- linux-2.6.32.48/include/linux/interrupt.h 2011-11-08 19:02:43.000000000 -0500
63071+++ linux-2.6.32.48/include/linux/interrupt.h 2011-11-15 19:59:43.000000000 -0500
63072@@ -363,7 +363,7 @@ enum
63073 /* map softirq index to softirq name. update 'softirq_to_name' in
63074 * kernel/softirq.c when adding a new softirq.
63075 */
63076-extern char *softirq_to_name[NR_SOFTIRQS];
63077+extern const char * const softirq_to_name[NR_SOFTIRQS];
63078
63079 /* softirq mask and active fields moved to irq_cpustat_t in
63080 * asm/hardirq.h to get better cache usage. KAO
63081@@ -371,12 +371,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
63082
63083 struct softirq_action
63084 {
63085- void (*action)(struct softirq_action *);
63086+ void (*action)(void);
63087 };
63088
63089 asmlinkage void do_softirq(void);
63090 asmlinkage void __do_softirq(void);
63091-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
63092+extern void open_softirq(int nr, void (*action)(void));
63093 extern void softirq_init(void);
63094 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
63095 extern void raise_softirq_irqoff(unsigned int nr);
63096diff -urNp linux-2.6.32.48/include/linux/irq.h linux-2.6.32.48/include/linux/irq.h
63097--- linux-2.6.32.48/include/linux/irq.h 2011-11-08 19:02:43.000000000 -0500
63098+++ linux-2.6.32.48/include/linux/irq.h 2011-11-15 19:59:43.000000000 -0500
63099@@ -438,12 +438,12 @@ extern int set_irq_msi(unsigned int irq,
63100 static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
63101 bool boot)
63102 {
63103+#ifdef CONFIG_CPUMASK_OFFSTACK
63104 gfp_t gfp = GFP_ATOMIC;
63105
63106 if (boot)
63107 gfp = GFP_NOWAIT;
63108
63109-#ifdef CONFIG_CPUMASK_OFFSTACK
63110 if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
63111 return false;
63112
63113diff -urNp linux-2.6.32.48/include/linux/kallsyms.h linux-2.6.32.48/include/linux/kallsyms.h
63114--- linux-2.6.32.48/include/linux/kallsyms.h 2011-11-08 19:02:43.000000000 -0500
63115+++ linux-2.6.32.48/include/linux/kallsyms.h 2011-11-15 19:59:43.000000000 -0500
63116@@ -15,7 +15,8 @@
63117
63118 struct module;
63119
63120-#ifdef CONFIG_KALLSYMS
63121+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
63122+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
63123 /* Lookup the address for a symbol. Returns 0 if not found. */
63124 unsigned long kallsyms_lookup_name(const char *name);
63125
63126@@ -92,6 +93,15 @@ static inline int lookup_symbol_attrs(un
63127 /* Stupid that this does nothing, but I didn't create this mess. */
63128 #define __print_symbol(fmt, addr)
63129 #endif /*CONFIG_KALLSYMS*/
63130+#else /* when included by kallsyms.c, vsnprintf.c, or
63131+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
63132+extern void __print_symbol(const char *fmt, unsigned long address);
63133+extern int sprint_symbol(char *buffer, unsigned long address);
63134+const char *kallsyms_lookup(unsigned long addr,
63135+ unsigned long *symbolsize,
63136+ unsigned long *offset,
63137+ char **modname, char *namebuf);
63138+#endif
63139
63140 /* This macro allows us to keep printk typechecking */
63141 static void __check_printsym_format(const char *fmt, ...)
63142diff -urNp linux-2.6.32.48/include/linux/kgdb.h linux-2.6.32.48/include/linux/kgdb.h
63143--- linux-2.6.32.48/include/linux/kgdb.h 2011-11-08 19:02:43.000000000 -0500
63144+++ linux-2.6.32.48/include/linux/kgdb.h 2011-11-15 19:59:43.000000000 -0500
63145@@ -74,8 +74,8 @@ void kgdb_breakpoint(void);
63146
63147 extern int kgdb_connected;
63148
63149-extern atomic_t kgdb_setting_breakpoint;
63150-extern atomic_t kgdb_cpu_doing_single_step;
63151+extern atomic_unchecked_t kgdb_setting_breakpoint;
63152+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
63153
63154 extern struct task_struct *kgdb_usethread;
63155 extern struct task_struct *kgdb_contthread;
63156@@ -235,7 +235,7 @@ struct kgdb_arch {
63157 int (*remove_hw_breakpoint)(unsigned long, int, enum kgdb_bptype);
63158 void (*remove_all_hw_break)(void);
63159 void (*correct_hw_break)(void);
63160-};
63161+} __do_const;
63162
63163 /**
63164 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
63165@@ -257,14 +257,14 @@ struct kgdb_io {
63166 int (*init) (void);
63167 void (*pre_exception) (void);
63168 void (*post_exception) (void);
63169-};
63170+} __do_const;
63171
63172-extern struct kgdb_arch arch_kgdb_ops;
63173+extern const struct kgdb_arch arch_kgdb_ops;
63174
63175 extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
63176
63177-extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
63178-extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
63179+extern int kgdb_register_io_module(const struct kgdb_io *local_kgdb_io_ops);
63180+extern void kgdb_unregister_io_module(const struct kgdb_io *local_kgdb_io_ops);
63181
63182 extern int kgdb_hex2long(char **ptr, unsigned long *long_val);
63183 extern int kgdb_mem2hex(char *mem, char *buf, int count);
63184diff -urNp linux-2.6.32.48/include/linux/kmod.h linux-2.6.32.48/include/linux/kmod.h
63185--- linux-2.6.32.48/include/linux/kmod.h 2011-11-08 19:02:43.000000000 -0500
63186+++ linux-2.6.32.48/include/linux/kmod.h 2011-11-15 19:59:43.000000000 -0500
63187@@ -31,6 +31,8 @@
63188 * usually useless though. */
63189 extern int __request_module(bool wait, const char *name, ...) \
63190 __attribute__((format(printf, 2, 3)));
63191+extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
63192+ __attribute__((format(printf, 3, 4)));
63193 #define request_module(mod...) __request_module(true, mod)
63194 #define request_module_nowait(mod...) __request_module(false, mod)
63195 #define try_then_request_module(x, mod...) \
63196diff -urNp linux-2.6.32.48/include/linux/kobject.h linux-2.6.32.48/include/linux/kobject.h
63197--- linux-2.6.32.48/include/linux/kobject.h 2011-11-08 19:02:43.000000000 -0500
63198+++ linux-2.6.32.48/include/linux/kobject.h 2011-11-15 19:59:43.000000000 -0500
63199@@ -106,7 +106,7 @@ extern char *kobject_get_path(struct kob
63200
63201 struct kobj_type {
63202 void (*release)(struct kobject *kobj);
63203- struct sysfs_ops *sysfs_ops;
63204+ const struct sysfs_ops *sysfs_ops;
63205 struct attribute **default_attrs;
63206 };
63207
63208@@ -118,9 +118,9 @@ struct kobj_uevent_env {
63209 };
63210
63211 struct kset_uevent_ops {
63212- int (*filter)(struct kset *kset, struct kobject *kobj);
63213- const char *(*name)(struct kset *kset, struct kobject *kobj);
63214- int (*uevent)(struct kset *kset, struct kobject *kobj,
63215+ int (* const filter)(struct kset *kset, struct kobject *kobj);
63216+ const char *(* const name)(struct kset *kset, struct kobject *kobj);
63217+ int (* const uevent)(struct kset *kset, struct kobject *kobj,
63218 struct kobj_uevent_env *env);
63219 };
63220
63221@@ -132,7 +132,7 @@ struct kobj_attribute {
63222 const char *buf, size_t count);
63223 };
63224
63225-extern struct sysfs_ops kobj_sysfs_ops;
63226+extern const struct sysfs_ops kobj_sysfs_ops;
63227
63228 /**
63229 * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem.
63230@@ -155,14 +155,14 @@ struct kset {
63231 struct list_head list;
63232 spinlock_t list_lock;
63233 struct kobject kobj;
63234- struct kset_uevent_ops *uevent_ops;
63235+ const struct kset_uevent_ops *uevent_ops;
63236 };
63237
63238 extern void kset_init(struct kset *kset);
63239 extern int __must_check kset_register(struct kset *kset);
63240 extern void kset_unregister(struct kset *kset);
63241 extern struct kset * __must_check kset_create_and_add(const char *name,
63242- struct kset_uevent_ops *u,
63243+ const struct kset_uevent_ops *u,
63244 struct kobject *parent_kobj);
63245
63246 static inline struct kset *to_kset(struct kobject *kobj)
63247diff -urNp linux-2.6.32.48/include/linux/kvm_host.h linux-2.6.32.48/include/linux/kvm_host.h
63248--- linux-2.6.32.48/include/linux/kvm_host.h 2011-11-08 19:02:43.000000000 -0500
63249+++ linux-2.6.32.48/include/linux/kvm_host.h 2011-11-15 19:59:43.000000000 -0500
63250@@ -210,7 +210,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
63251 void vcpu_load(struct kvm_vcpu *vcpu);
63252 void vcpu_put(struct kvm_vcpu *vcpu);
63253
63254-int kvm_init(void *opaque, unsigned int vcpu_size,
63255+int kvm_init(const void *opaque, unsigned int vcpu_size,
63256 struct module *module);
63257 void kvm_exit(void);
63258
63259@@ -316,7 +316,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
63260 struct kvm_guest_debug *dbg);
63261 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
63262
63263-int kvm_arch_init(void *opaque);
63264+int kvm_arch_init(const void *opaque);
63265 void kvm_arch_exit(void);
63266
63267 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
63268diff -urNp linux-2.6.32.48/include/linux/libata.h linux-2.6.32.48/include/linux/libata.h
63269--- linux-2.6.32.48/include/linux/libata.h 2011-11-08 19:02:43.000000000 -0500
63270+++ linux-2.6.32.48/include/linux/libata.h 2011-11-15 19:59:43.000000000 -0500
63271@@ -525,11 +525,11 @@ struct ata_ioports {
63272
63273 struct ata_host {
63274 spinlock_t lock;
63275- struct device *dev;
63276+ struct device *dev;
63277 void __iomem * const *iomap;
63278 unsigned int n_ports;
63279 void *private_data;
63280- struct ata_port_operations *ops;
63281+ const struct ata_port_operations *ops;
63282 unsigned long flags;
63283 #ifdef CONFIG_ATA_ACPI
63284 acpi_handle acpi_handle;
63285@@ -710,7 +710,7 @@ struct ata_link {
63286
63287 struct ata_port {
63288 struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
63289- struct ata_port_operations *ops;
63290+ const struct ata_port_operations *ops;
63291 spinlock_t *lock;
63292 /* Flags owned by the EH context. Only EH should touch these once the
63293 port is active */
63294@@ -884,7 +884,7 @@ struct ata_port_operations {
63295 * fields must be pointers.
63296 */
63297 const struct ata_port_operations *inherits;
63298-};
63299+} __do_const;
63300
63301 struct ata_port_info {
63302 unsigned long flags;
63303@@ -892,7 +892,7 @@ struct ata_port_info {
63304 unsigned long pio_mask;
63305 unsigned long mwdma_mask;
63306 unsigned long udma_mask;
63307- struct ata_port_operations *port_ops;
63308+ const struct ata_port_operations *port_ops;
63309 void *private_data;
63310 };
63311
63312@@ -916,7 +916,7 @@ extern const unsigned long sata_deb_timi
63313 extern const unsigned long sata_deb_timing_hotplug[];
63314 extern const unsigned long sata_deb_timing_long[];
63315
63316-extern struct ata_port_operations ata_dummy_port_ops;
63317+extern const struct ata_port_operations ata_dummy_port_ops;
63318 extern const struct ata_port_info ata_dummy_port_info;
63319
63320 static inline const unsigned long *
63321@@ -962,7 +962,7 @@ extern int ata_host_activate(struct ata_
63322 struct scsi_host_template *sht);
63323 extern void ata_host_detach(struct ata_host *host);
63324 extern void ata_host_init(struct ata_host *, struct device *,
63325- unsigned long, struct ata_port_operations *);
63326+ unsigned long, const struct ata_port_operations *);
63327 extern int ata_scsi_detect(struct scsi_host_template *sht);
63328 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
63329 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
63330diff -urNp linux-2.6.32.48/include/linux/lockd/bind.h linux-2.6.32.48/include/linux/lockd/bind.h
63331--- linux-2.6.32.48/include/linux/lockd/bind.h 2011-11-08 19:02:43.000000000 -0500
63332+++ linux-2.6.32.48/include/linux/lockd/bind.h 2011-11-15 19:59:43.000000000 -0500
63333@@ -23,13 +23,13 @@ struct svc_rqst;
63334 * This is the set of functions for lockd->nfsd communication
63335 */
63336 struct nlmsvc_binding {
63337- __be32 (*fopen)(struct svc_rqst *,
63338+ __be32 (* const fopen)(struct svc_rqst *,
63339 struct nfs_fh *,
63340 struct file **);
63341- void (*fclose)(struct file *);
63342+ void (* const fclose)(struct file *);
63343 };
63344
63345-extern struct nlmsvc_binding * nlmsvc_ops;
63346+extern const struct nlmsvc_binding * nlmsvc_ops;
63347
63348 /*
63349 * Similar to nfs_client_initdata, but without the NFS-specific
63350diff -urNp linux-2.6.32.48/include/linux/mca.h linux-2.6.32.48/include/linux/mca.h
63351--- linux-2.6.32.48/include/linux/mca.h 2011-11-08 19:02:43.000000000 -0500
63352+++ linux-2.6.32.48/include/linux/mca.h 2011-11-15 19:59:43.000000000 -0500
63353@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
63354 int region);
63355 void * (*mca_transform_memory)(struct mca_device *,
63356 void *memory);
63357-};
63358+} __no_const;
63359
63360 struct mca_bus {
63361 u64 default_dma_mask;
63362diff -urNp linux-2.6.32.48/include/linux/memory.h linux-2.6.32.48/include/linux/memory.h
63363--- linux-2.6.32.48/include/linux/memory.h 2011-11-08 19:02:43.000000000 -0500
63364+++ linux-2.6.32.48/include/linux/memory.h 2011-11-15 19:59:43.000000000 -0500
63365@@ -108,7 +108,7 @@ struct memory_accessor {
63366 size_t count);
63367 ssize_t (*write)(struct memory_accessor *, const char *buf,
63368 off_t offset, size_t count);
63369-};
63370+} __no_const;
63371
63372 /*
63373 * Kernel text modification mutex, used for code patching. Users of this lock
63374diff -urNp linux-2.6.32.48/include/linux/mm.h linux-2.6.32.48/include/linux/mm.h
63375--- linux-2.6.32.48/include/linux/mm.h 2011-11-08 19:02:43.000000000 -0500
63376+++ linux-2.6.32.48/include/linux/mm.h 2011-11-15 19:59:43.000000000 -0500
63377@@ -106,7 +106,14 @@ extern unsigned int kobjsize(const void
63378
63379 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
63380 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
63381+
63382+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
63383+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
63384+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
63385+#else
63386 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
63387+#endif
63388+
63389 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
63390 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
63391
63392@@ -841,12 +848,6 @@ int set_page_dirty(struct page *page);
63393 int set_page_dirty_lock(struct page *page);
63394 int clear_page_dirty_for_io(struct page *page);
63395
63396-/* Is the vma a continuation of the stack vma above it? */
63397-static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
63398-{
63399- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
63400-}
63401-
63402 extern unsigned long move_page_tables(struct vm_area_struct *vma,
63403 unsigned long old_addr, struct vm_area_struct *new_vma,
63404 unsigned long new_addr, unsigned long len);
63405@@ -890,6 +891,8 @@ struct shrinker {
63406 extern void register_shrinker(struct shrinker *);
63407 extern void unregister_shrinker(struct shrinker *);
63408
63409+pgprot_t vm_get_page_prot(unsigned long vm_flags);
63410+
63411 int vma_wants_writenotify(struct vm_area_struct *vma);
63412
63413 extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
63414@@ -1162,6 +1165,7 @@ out:
63415 }
63416
63417 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
63418+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
63419
63420 extern unsigned long do_brk(unsigned long, unsigned long);
63421
63422@@ -1218,6 +1222,10 @@ extern struct vm_area_struct * find_vma(
63423 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
63424 struct vm_area_struct **pprev);
63425
63426+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
63427+extern void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
63428+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
63429+
63430 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
63431 NULL if none. Assume start_addr < end_addr. */
63432 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
63433@@ -1234,7 +1242,6 @@ static inline unsigned long vma_pages(st
63434 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
63435 }
63436
63437-pgprot_t vm_get_page_prot(unsigned long vm_flags);
63438 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
63439 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
63440 unsigned long pfn, unsigned long size, pgprot_t);
63441@@ -1332,7 +1339,13 @@ extern void memory_failure(unsigned long
63442 extern int __memory_failure(unsigned long pfn, int trapno, int ref);
63443 extern int sysctl_memory_failure_early_kill;
63444 extern int sysctl_memory_failure_recovery;
63445-extern atomic_long_t mce_bad_pages;
63446+extern atomic_long_unchecked_t mce_bad_pages;
63447+
63448+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
63449+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
63450+#else
63451+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
63452+#endif
63453
63454 #endif /* __KERNEL__ */
63455 #endif /* _LINUX_MM_H */
63456diff -urNp linux-2.6.32.48/include/linux/mm_types.h linux-2.6.32.48/include/linux/mm_types.h
63457--- linux-2.6.32.48/include/linux/mm_types.h 2011-11-08 19:02:43.000000000 -0500
63458+++ linux-2.6.32.48/include/linux/mm_types.h 2011-11-15 19:59:43.000000000 -0500
63459@@ -186,6 +186,8 @@ struct vm_area_struct {
63460 #ifdef CONFIG_NUMA
63461 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
63462 #endif
63463+
63464+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
63465 };
63466
63467 struct core_thread {
63468@@ -287,6 +289,24 @@ struct mm_struct {
63469 #ifdef CONFIG_MMU_NOTIFIER
63470 struct mmu_notifier_mm *mmu_notifier_mm;
63471 #endif
63472+
63473+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
63474+ unsigned long pax_flags;
63475+#endif
63476+
63477+#ifdef CONFIG_PAX_DLRESOLVE
63478+ unsigned long call_dl_resolve;
63479+#endif
63480+
63481+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
63482+ unsigned long call_syscall;
63483+#endif
63484+
63485+#ifdef CONFIG_PAX_ASLR
63486+ unsigned long delta_mmap; /* randomized offset */
63487+ unsigned long delta_stack; /* randomized offset */
63488+#endif
63489+
63490 };
63491
63492 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
63493diff -urNp linux-2.6.32.48/include/linux/mmu_notifier.h linux-2.6.32.48/include/linux/mmu_notifier.h
63494--- linux-2.6.32.48/include/linux/mmu_notifier.h 2011-11-08 19:02:43.000000000 -0500
63495+++ linux-2.6.32.48/include/linux/mmu_notifier.h 2011-11-15 19:59:43.000000000 -0500
63496@@ -235,12 +235,12 @@ static inline void mmu_notifier_mm_destr
63497 */
63498 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
63499 ({ \
63500- pte_t __pte; \
63501+ pte_t ___pte; \
63502 struct vm_area_struct *___vma = __vma; \
63503 unsigned long ___address = __address; \
63504- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
63505+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
63506 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
63507- __pte; \
63508+ ___pte; \
63509 })
63510
63511 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
63512diff -urNp linux-2.6.32.48/include/linux/mmzone.h linux-2.6.32.48/include/linux/mmzone.h
63513--- linux-2.6.32.48/include/linux/mmzone.h 2011-11-08 19:02:43.000000000 -0500
63514+++ linux-2.6.32.48/include/linux/mmzone.h 2011-11-15 19:59:43.000000000 -0500
63515@@ -350,7 +350,7 @@ struct zone {
63516 unsigned long flags; /* zone flags, see below */
63517
63518 /* Zone statistics */
63519- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
63520+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
63521
63522 /*
63523 * prev_priority holds the scanning priority for this zone. It is
63524diff -urNp linux-2.6.32.48/include/linux/mod_devicetable.h linux-2.6.32.48/include/linux/mod_devicetable.h
63525--- linux-2.6.32.48/include/linux/mod_devicetable.h 2011-11-08 19:02:43.000000000 -0500
63526+++ linux-2.6.32.48/include/linux/mod_devicetable.h 2011-11-15 19:59:43.000000000 -0500
63527@@ -12,7 +12,7 @@
63528 typedef unsigned long kernel_ulong_t;
63529 #endif
63530
63531-#define PCI_ANY_ID (~0)
63532+#define PCI_ANY_ID ((__u16)~0)
63533
63534 struct pci_device_id {
63535 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
63536@@ -131,7 +131,7 @@ struct usb_device_id {
63537 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
63538 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
63539
63540-#define HID_ANY_ID (~0)
63541+#define HID_ANY_ID (~0U)
63542
63543 struct hid_device_id {
63544 __u16 bus;
63545diff -urNp linux-2.6.32.48/include/linux/module.h linux-2.6.32.48/include/linux/module.h
63546--- linux-2.6.32.48/include/linux/module.h 2011-11-08 19:02:43.000000000 -0500
63547+++ linux-2.6.32.48/include/linux/module.h 2011-11-15 19:59:43.000000000 -0500
63548@@ -16,6 +16,7 @@
63549 #include <linux/kobject.h>
63550 #include <linux/moduleparam.h>
63551 #include <linux/tracepoint.h>
63552+#include <linux/fs.h>
63553
63554 #include <asm/local.h>
63555 #include <asm/module.h>
63556@@ -287,16 +288,16 @@ struct module
63557 int (*init)(void);
63558
63559 /* If this is non-NULL, vfree after init() returns */
63560- void *module_init;
63561+ void *module_init_rx, *module_init_rw;
63562
63563 /* Here is the actual code + data, vfree'd on unload. */
63564- void *module_core;
63565+ void *module_core_rx, *module_core_rw;
63566
63567 /* Here are the sizes of the init and core sections */
63568- unsigned int init_size, core_size;
63569+ unsigned int init_size_rw, core_size_rw;
63570
63571 /* The size of the executable code in each section. */
63572- unsigned int init_text_size, core_text_size;
63573+ unsigned int init_size_rx, core_size_rx;
63574
63575 /* Arch-specific module values */
63576 struct mod_arch_specific arch;
63577@@ -345,6 +346,10 @@ struct module
63578 #ifdef CONFIG_EVENT_TRACING
63579 struct ftrace_event_call *trace_events;
63580 unsigned int num_trace_events;
63581+ struct file_operations trace_id;
63582+ struct file_operations trace_enable;
63583+ struct file_operations trace_format;
63584+ struct file_operations trace_filter;
63585 #endif
63586 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
63587 unsigned long *ftrace_callsites;
63588@@ -393,16 +398,46 @@ struct module *__module_address(unsigned
63589 bool is_module_address(unsigned long addr);
63590 bool is_module_text_address(unsigned long addr);
63591
63592+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
63593+{
63594+
63595+#ifdef CONFIG_PAX_KERNEXEC
63596+ if (ktla_ktva(addr) >= (unsigned long)start &&
63597+ ktla_ktva(addr) < (unsigned long)start + size)
63598+ return 1;
63599+#endif
63600+
63601+ return ((void *)addr >= start && (void *)addr < start + size);
63602+}
63603+
63604+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
63605+{
63606+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
63607+}
63608+
63609+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
63610+{
63611+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
63612+}
63613+
63614+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
63615+{
63616+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
63617+}
63618+
63619+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
63620+{
63621+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
63622+}
63623+
63624 static inline int within_module_core(unsigned long addr, struct module *mod)
63625 {
63626- return (unsigned long)mod->module_core <= addr &&
63627- addr < (unsigned long)mod->module_core + mod->core_size;
63628+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
63629 }
63630
63631 static inline int within_module_init(unsigned long addr, struct module *mod)
63632 {
63633- return (unsigned long)mod->module_init <= addr &&
63634- addr < (unsigned long)mod->module_init + mod->init_size;
63635+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
63636 }
63637
63638 /* Search for module by name: must hold module_mutex. */
63639diff -urNp linux-2.6.32.48/include/linux/moduleloader.h linux-2.6.32.48/include/linux/moduleloader.h
63640--- linux-2.6.32.48/include/linux/moduleloader.h 2011-11-08 19:02:43.000000000 -0500
63641+++ linux-2.6.32.48/include/linux/moduleloader.h 2011-11-15 19:59:43.000000000 -0500
63642@@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(st
63643 sections. Returns NULL on failure. */
63644 void *module_alloc(unsigned long size);
63645
63646+#ifdef CONFIG_PAX_KERNEXEC
63647+void *module_alloc_exec(unsigned long size);
63648+#else
63649+#define module_alloc_exec(x) module_alloc(x)
63650+#endif
63651+
63652 /* Free memory returned from module_alloc. */
63653 void module_free(struct module *mod, void *module_region);
63654
63655+#ifdef CONFIG_PAX_KERNEXEC
63656+void module_free_exec(struct module *mod, void *module_region);
63657+#else
63658+#define module_free_exec(x, y) module_free((x), (y))
63659+#endif
63660+
63661 /* Apply the given relocation to the (simplified) ELF. Return -error
63662 or 0. */
63663 int apply_relocate(Elf_Shdr *sechdrs,
63664diff -urNp linux-2.6.32.48/include/linux/moduleparam.h linux-2.6.32.48/include/linux/moduleparam.h
63665--- linux-2.6.32.48/include/linux/moduleparam.h 2011-11-08 19:02:43.000000000 -0500
63666+++ linux-2.6.32.48/include/linux/moduleparam.h 2011-11-15 19:59:43.000000000 -0500
63667@@ -132,7 +132,7 @@ struct kparam_array
63668
63669 /* Actually copy string: maxlen param is usually sizeof(string). */
63670 #define module_param_string(name, string, len, perm) \
63671- static const struct kparam_string __param_string_##name \
63672+ static const struct kparam_string __param_string_##name __used \
63673 = { len, string }; \
63674 __module_param_call(MODULE_PARAM_PREFIX, name, \
63675 param_set_copystring, param_get_string, \
63676@@ -211,7 +211,7 @@ extern int param_get_invbool(char *buffe
63677
63678 /* Comma-separated array: *nump is set to number they actually specified. */
63679 #define module_param_array_named(name, array, type, nump, perm) \
63680- static const struct kparam_array __param_arr_##name \
63681+ static const struct kparam_array __param_arr_##name __used \
63682 = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\
63683 sizeof(array[0]), array }; \
63684 __module_param_call(MODULE_PARAM_PREFIX, name, \
63685diff -urNp linux-2.6.32.48/include/linux/mutex.h linux-2.6.32.48/include/linux/mutex.h
63686--- linux-2.6.32.48/include/linux/mutex.h 2011-11-08 19:02:43.000000000 -0500
63687+++ linux-2.6.32.48/include/linux/mutex.h 2011-11-15 19:59:43.000000000 -0500
63688@@ -51,7 +51,7 @@ struct mutex {
63689 spinlock_t wait_lock;
63690 struct list_head wait_list;
63691 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
63692- struct thread_info *owner;
63693+ struct task_struct *owner;
63694 #endif
63695 #ifdef CONFIG_DEBUG_MUTEXES
63696 const char *name;
63697diff -urNp linux-2.6.32.48/include/linux/namei.h linux-2.6.32.48/include/linux/namei.h
63698--- linux-2.6.32.48/include/linux/namei.h 2011-11-08 19:02:43.000000000 -0500
63699+++ linux-2.6.32.48/include/linux/namei.h 2011-11-15 19:59:43.000000000 -0500
63700@@ -22,7 +22,7 @@ struct nameidata {
63701 unsigned int flags;
63702 int last_type;
63703 unsigned depth;
63704- char *saved_names[MAX_NESTED_LINKS + 1];
63705+ const char *saved_names[MAX_NESTED_LINKS + 1];
63706
63707 /* Intent data */
63708 union {
63709@@ -84,12 +84,12 @@ extern int follow_up(struct path *);
63710 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
63711 extern void unlock_rename(struct dentry *, struct dentry *);
63712
63713-static inline void nd_set_link(struct nameidata *nd, char *path)
63714+static inline void nd_set_link(struct nameidata *nd, const char *path)
63715 {
63716 nd->saved_names[nd->depth] = path;
63717 }
63718
63719-static inline char *nd_get_link(struct nameidata *nd)
63720+static inline const char *nd_get_link(const struct nameidata *nd)
63721 {
63722 return nd->saved_names[nd->depth];
63723 }
63724diff -urNp linux-2.6.32.48/include/linux/netdevice.h linux-2.6.32.48/include/linux/netdevice.h
63725--- linux-2.6.32.48/include/linux/netdevice.h 2011-11-08 19:02:43.000000000 -0500
63726+++ linux-2.6.32.48/include/linux/netdevice.h 2011-11-15 19:59:43.000000000 -0500
63727@@ -637,6 +637,7 @@ struct net_device_ops {
63728 u16 xid);
63729 #endif
63730 };
63731+typedef struct net_device_ops __no_const net_device_ops_no_const;
63732
63733 /*
63734 * The DEVICE structure.
63735diff -urNp linux-2.6.32.48/include/linux/netfilter/xt_gradm.h linux-2.6.32.48/include/linux/netfilter/xt_gradm.h
63736--- linux-2.6.32.48/include/linux/netfilter/xt_gradm.h 1969-12-31 19:00:00.000000000 -0500
63737+++ linux-2.6.32.48/include/linux/netfilter/xt_gradm.h 2011-11-15 19:59:43.000000000 -0500
63738@@ -0,0 +1,9 @@
63739+#ifndef _LINUX_NETFILTER_XT_GRADM_H
63740+#define _LINUX_NETFILTER_XT_GRADM_H 1
63741+
63742+struct xt_gradm_mtinfo {
63743+ __u16 flags;
63744+ __u16 invflags;
63745+};
63746+
63747+#endif
63748diff -urNp linux-2.6.32.48/include/linux/nodemask.h linux-2.6.32.48/include/linux/nodemask.h
63749--- linux-2.6.32.48/include/linux/nodemask.h 2011-11-08 19:02:43.000000000 -0500
63750+++ linux-2.6.32.48/include/linux/nodemask.h 2011-11-15 19:59:43.000000000 -0500
63751@@ -464,11 +464,11 @@ static inline int num_node_state(enum no
63752
63753 #define any_online_node(mask) \
63754 ({ \
63755- int node; \
63756- for_each_node_mask(node, (mask)) \
63757- if (node_online(node)) \
63758+ int __node; \
63759+ for_each_node_mask(__node, (mask)) \
63760+ if (node_online(__node)) \
63761 break; \
63762- node; \
63763+ __node; \
63764 })
63765
63766 #define num_online_nodes() num_node_state(N_ONLINE)
63767diff -urNp linux-2.6.32.48/include/linux/oprofile.h linux-2.6.32.48/include/linux/oprofile.h
63768--- linux-2.6.32.48/include/linux/oprofile.h 2011-11-08 19:02:43.000000000 -0500
63769+++ linux-2.6.32.48/include/linux/oprofile.h 2011-11-15 19:59:43.000000000 -0500
63770@@ -129,9 +129,9 @@ int oprofilefs_create_ulong(struct super
63771 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
63772 char const * name, ulong * val);
63773
63774-/** Create a file for read-only access to an atomic_t. */
63775+/** Create a file for read-only access to an atomic_unchecked_t. */
63776 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
63777- char const * name, atomic_t * val);
63778+ char const * name, atomic_unchecked_t * val);
63779
63780 /** create a directory */
63781 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
63782diff -urNp linux-2.6.32.48/include/linux/pagemap.h linux-2.6.32.48/include/linux/pagemap.h
63783--- linux-2.6.32.48/include/linux/pagemap.h 2011-11-08 19:02:43.000000000 -0500
63784+++ linux-2.6.32.48/include/linux/pagemap.h 2011-11-15 19:59:43.000000000 -0500
63785@@ -425,6 +425,7 @@ static inline int fault_in_pages_readabl
63786 if (((unsigned long)uaddr & PAGE_MASK) !=
63787 ((unsigned long)end & PAGE_MASK))
63788 ret = __get_user(c, end);
63789+ (void)c;
63790 }
63791 return ret;
63792 }
63793diff -urNp linux-2.6.32.48/include/linux/perf_event.h linux-2.6.32.48/include/linux/perf_event.h
63794--- linux-2.6.32.48/include/linux/perf_event.h 2011-11-08 19:02:43.000000000 -0500
63795+++ linux-2.6.32.48/include/linux/perf_event.h 2011-11-15 19:59:43.000000000 -0500
63796@@ -476,7 +476,7 @@ struct hw_perf_event {
63797 struct hrtimer hrtimer;
63798 };
63799 };
63800- atomic64_t prev_count;
63801+ atomic64_unchecked_t prev_count;
63802 u64 sample_period;
63803 u64 last_period;
63804 atomic64_t period_left;
63805@@ -557,7 +557,7 @@ struct perf_event {
63806 const struct pmu *pmu;
63807
63808 enum perf_event_active_state state;
63809- atomic64_t count;
63810+ atomic64_unchecked_t count;
63811
63812 /*
63813 * These are the total time in nanoseconds that the event
63814@@ -595,8 +595,8 @@ struct perf_event {
63815 * These accumulate total time (in nanoseconds) that children
63816 * events have been enabled and running, respectively.
63817 */
63818- atomic64_t child_total_time_enabled;
63819- atomic64_t child_total_time_running;
63820+ atomic64_unchecked_t child_total_time_enabled;
63821+ atomic64_unchecked_t child_total_time_running;
63822
63823 /*
63824 * Protect attach/detach and child_list:
63825diff -urNp linux-2.6.32.48/include/linux/pipe_fs_i.h linux-2.6.32.48/include/linux/pipe_fs_i.h
63826--- linux-2.6.32.48/include/linux/pipe_fs_i.h 2011-11-08 19:02:43.000000000 -0500
63827+++ linux-2.6.32.48/include/linux/pipe_fs_i.h 2011-11-15 19:59:43.000000000 -0500
63828@@ -46,9 +46,9 @@ struct pipe_inode_info {
63829 wait_queue_head_t wait;
63830 unsigned int nrbufs, curbuf;
63831 struct page *tmp_page;
63832- unsigned int readers;
63833- unsigned int writers;
63834- unsigned int waiting_writers;
63835+ atomic_t readers;
63836+ atomic_t writers;
63837+ atomic_t waiting_writers;
63838 unsigned int r_counter;
63839 unsigned int w_counter;
63840 struct fasync_struct *fasync_readers;
63841diff -urNp linux-2.6.32.48/include/linux/poison.h linux-2.6.32.48/include/linux/poison.h
63842--- linux-2.6.32.48/include/linux/poison.h 2011-11-08 19:02:43.000000000 -0500
63843+++ linux-2.6.32.48/include/linux/poison.h 2011-11-15 19:59:43.000000000 -0500
63844@@ -19,8 +19,8 @@
63845 * under normal circumstances, used to verify that nobody uses
63846 * non-initialized list entries.
63847 */
63848-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
63849-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
63850+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
63851+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
63852
63853 /********** include/linux/timer.h **********/
63854 /*
63855diff -urNp linux-2.6.32.48/include/linux/posix-timers.h linux-2.6.32.48/include/linux/posix-timers.h
63856--- linux-2.6.32.48/include/linux/posix-timers.h 2011-11-08 19:02:43.000000000 -0500
63857+++ linux-2.6.32.48/include/linux/posix-timers.h 2011-11-15 19:59:43.000000000 -0500
63858@@ -67,7 +67,7 @@ struct k_itimer {
63859 };
63860
63861 struct k_clock {
63862- int res; /* in nanoseconds */
63863+ const int res; /* in nanoseconds */
63864 int (*clock_getres) (const clockid_t which_clock, struct timespec *tp);
63865 int (*clock_set) (const clockid_t which_clock, struct timespec * tp);
63866 int (*clock_get) (const clockid_t which_clock, struct timespec * tp);
63867diff -urNp linux-2.6.32.48/include/linux/preempt.h linux-2.6.32.48/include/linux/preempt.h
63868--- linux-2.6.32.48/include/linux/preempt.h 2011-11-08 19:02:43.000000000 -0500
63869+++ linux-2.6.32.48/include/linux/preempt.h 2011-11-15 19:59:43.000000000 -0500
63870@@ -110,7 +110,7 @@ struct preempt_ops {
63871 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
63872 void (*sched_out)(struct preempt_notifier *notifier,
63873 struct task_struct *next);
63874-};
63875+} __no_const;
63876
63877 /**
63878 * preempt_notifier - key for installing preemption notifiers
63879diff -urNp linux-2.6.32.48/include/linux/proc_fs.h linux-2.6.32.48/include/linux/proc_fs.h
63880--- linux-2.6.32.48/include/linux/proc_fs.h 2011-11-08 19:02:43.000000000 -0500
63881+++ linux-2.6.32.48/include/linux/proc_fs.h 2011-11-15 19:59:43.000000000 -0500
63882@@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
63883 return proc_create_data(name, mode, parent, proc_fops, NULL);
63884 }
63885
63886+static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
63887+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
63888+{
63889+#ifdef CONFIG_GRKERNSEC_PROC_USER
63890+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
63891+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
63892+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
63893+#else
63894+ return proc_create_data(name, mode, parent, proc_fops, NULL);
63895+#endif
63896+}
63897+
63898+
63899 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
63900 mode_t mode, struct proc_dir_entry *base,
63901 read_proc_t *read_proc, void * data)
63902@@ -256,7 +269,7 @@ union proc_op {
63903 int (*proc_show)(struct seq_file *m,
63904 struct pid_namespace *ns, struct pid *pid,
63905 struct task_struct *task);
63906-};
63907+} __no_const;
63908
63909 struct ctl_table_header;
63910 struct ctl_table;
63911diff -urNp linux-2.6.32.48/include/linux/ptrace.h linux-2.6.32.48/include/linux/ptrace.h
63912--- linux-2.6.32.48/include/linux/ptrace.h 2011-11-08 19:02:43.000000000 -0500
63913+++ linux-2.6.32.48/include/linux/ptrace.h 2011-11-15 19:59:43.000000000 -0500
63914@@ -96,10 +96,10 @@ extern void __ptrace_unlink(struct task_
63915 extern void exit_ptrace(struct task_struct *tracer);
63916 #define PTRACE_MODE_READ 1
63917 #define PTRACE_MODE_ATTACH 2
63918-/* Returns 0 on success, -errno on denial. */
63919-extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
63920 /* Returns true on success, false on denial. */
63921 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
63922+/* Returns true on success, false on denial. */
63923+extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
63924
63925 static inline int ptrace_reparented(struct task_struct *child)
63926 {
63927diff -urNp linux-2.6.32.48/include/linux/random.h linux-2.6.32.48/include/linux/random.h
63928--- linux-2.6.32.48/include/linux/random.h 2011-11-08 19:02:43.000000000 -0500
63929+++ linux-2.6.32.48/include/linux/random.h 2011-11-15 19:59:43.000000000 -0500
63930@@ -63,6 +63,11 @@ unsigned long randomize_range(unsigned l
63931 u32 random32(void);
63932 void srandom32(u32 seed);
63933
63934+static inline unsigned long pax_get_random_long(void)
63935+{
63936+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
63937+}
63938+
63939 #endif /* __KERNEL___ */
63940
63941 #endif /* _LINUX_RANDOM_H */
63942diff -urNp linux-2.6.32.48/include/linux/reboot.h linux-2.6.32.48/include/linux/reboot.h
63943--- linux-2.6.32.48/include/linux/reboot.h 2011-11-08 19:02:43.000000000 -0500
63944+++ linux-2.6.32.48/include/linux/reboot.h 2011-11-15 19:59:43.000000000 -0500
63945@@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(st
63946 * Architecture-specific implementations of sys_reboot commands.
63947 */
63948
63949-extern void machine_restart(char *cmd);
63950-extern void machine_halt(void);
63951-extern void machine_power_off(void);
63952+extern void machine_restart(char *cmd) __noreturn;
63953+extern void machine_halt(void) __noreturn;
63954+extern void machine_power_off(void) __noreturn;
63955
63956 extern void machine_shutdown(void);
63957 struct pt_regs;
63958@@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struc
63959 */
63960
63961 extern void kernel_restart_prepare(char *cmd);
63962-extern void kernel_restart(char *cmd);
63963-extern void kernel_halt(void);
63964-extern void kernel_power_off(void);
63965+extern void kernel_restart(char *cmd) __noreturn;
63966+extern void kernel_halt(void) __noreturn;
63967+extern void kernel_power_off(void) __noreturn;
63968
63969 void ctrl_alt_del(void);
63970
63971@@ -75,7 +75,7 @@ extern int orderly_poweroff(bool force);
63972 * Emergency restart, callable from an interrupt handler.
63973 */
63974
63975-extern void emergency_restart(void);
63976+extern void emergency_restart(void) __noreturn;
63977 #include <asm/emergency-restart.h>
63978
63979 #endif
63980diff -urNp linux-2.6.32.48/include/linux/reiserfs_fs.h linux-2.6.32.48/include/linux/reiserfs_fs.h
63981--- linux-2.6.32.48/include/linux/reiserfs_fs.h 2011-11-08 19:02:43.000000000 -0500
63982+++ linux-2.6.32.48/include/linux/reiserfs_fs.h 2011-11-15 19:59:43.000000000 -0500
63983@@ -1326,7 +1326,7 @@ static inline loff_t max_reiserfs_offset
63984 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
63985
63986 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
63987-#define get_generation(s) atomic_read (&fs_generation(s))
63988+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
63989 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
63990 #define __fs_changed(gen,s) (gen != get_generation (s))
63991 #define fs_changed(gen,s) ({cond_resched(); __fs_changed(gen, s);})
63992@@ -1534,24 +1534,24 @@ static inline struct super_block *sb_fro
63993 */
63994
63995 struct item_operations {
63996- int (*bytes_number) (struct item_head * ih, int block_size);
63997- void (*decrement_key) (struct cpu_key *);
63998- int (*is_left_mergeable) (struct reiserfs_key * ih,
63999+ int (* const bytes_number) (struct item_head * ih, int block_size);
64000+ void (* const decrement_key) (struct cpu_key *);
64001+ int (* const is_left_mergeable) (struct reiserfs_key * ih,
64002 unsigned long bsize);
64003- void (*print_item) (struct item_head *, char *item);
64004- void (*check_item) (struct item_head *, char *item);
64005+ void (* const print_item) (struct item_head *, char *item);
64006+ void (* const check_item) (struct item_head *, char *item);
64007
64008- int (*create_vi) (struct virtual_node * vn, struct virtual_item * vi,
64009+ int (* const create_vi) (struct virtual_node * vn, struct virtual_item * vi,
64010 int is_affected, int insert_size);
64011- int (*check_left) (struct virtual_item * vi, int free,
64012+ int (* const check_left) (struct virtual_item * vi, int free,
64013 int start_skip, int end_skip);
64014- int (*check_right) (struct virtual_item * vi, int free);
64015- int (*part_size) (struct virtual_item * vi, int from, int to);
64016- int (*unit_num) (struct virtual_item * vi);
64017- void (*print_vi) (struct virtual_item * vi);
64018+ int (* const check_right) (struct virtual_item * vi, int free);
64019+ int (* const part_size) (struct virtual_item * vi, int from, int to);
64020+ int (* const unit_num) (struct virtual_item * vi);
64021+ void (* const print_vi) (struct virtual_item * vi);
64022 };
64023
64024-extern struct item_operations *item_ops[TYPE_ANY + 1];
64025+extern const struct item_operations * const item_ops[TYPE_ANY + 1];
64026
64027 #define op_bytes_number(ih,bsize) item_ops[le_ih_k_type (ih)]->bytes_number (ih, bsize)
64028 #define op_is_left_mergeable(key,bsize) item_ops[le_key_k_type (le_key_version (key), key)]->is_left_mergeable (key, bsize)
64029diff -urNp linux-2.6.32.48/include/linux/reiserfs_fs_sb.h linux-2.6.32.48/include/linux/reiserfs_fs_sb.h
64030--- linux-2.6.32.48/include/linux/reiserfs_fs_sb.h 2011-11-08 19:02:43.000000000 -0500
64031+++ linux-2.6.32.48/include/linux/reiserfs_fs_sb.h 2011-11-15 19:59:43.000000000 -0500
64032@@ -377,7 +377,7 @@ struct reiserfs_sb_info {
64033 /* Comment? -Hans */
64034 wait_queue_head_t s_wait;
64035 /* To be obsoleted soon by per buffer seals.. -Hans */
64036- atomic_t s_generation_counter; // increased by one every time the
64037+ atomic_unchecked_t s_generation_counter; // increased by one every time the
64038 // tree gets re-balanced
64039 unsigned long s_properties; /* File system properties. Currently holds
64040 on-disk FS format */
64041diff -urNp linux-2.6.32.48/include/linux/relay.h linux-2.6.32.48/include/linux/relay.h
64042--- linux-2.6.32.48/include/linux/relay.h 2011-11-08 19:02:43.000000000 -0500
64043+++ linux-2.6.32.48/include/linux/relay.h 2011-11-15 19:59:43.000000000 -0500
64044@@ -159,7 +159,7 @@ struct rchan_callbacks
64045 * The callback should return 0 if successful, negative if not.
64046 */
64047 int (*remove_buf_file)(struct dentry *dentry);
64048-};
64049+} __no_const;
64050
64051 /*
64052 * CONFIG_RELAY kernel API, kernel/relay.c
64053diff -urNp linux-2.6.32.48/include/linux/rfkill.h linux-2.6.32.48/include/linux/rfkill.h
64054--- linux-2.6.32.48/include/linux/rfkill.h 2011-11-08 19:02:43.000000000 -0500
64055+++ linux-2.6.32.48/include/linux/rfkill.h 2011-11-15 19:59:43.000000000 -0500
64056@@ -144,6 +144,7 @@ struct rfkill_ops {
64057 void (*query)(struct rfkill *rfkill, void *data);
64058 int (*set_block)(void *data, bool blocked);
64059 };
64060+typedef struct rfkill_ops __no_const rfkill_ops_no_const;
64061
64062 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
64063 /**
64064diff -urNp linux-2.6.32.48/include/linux/sched.h linux-2.6.32.48/include/linux/sched.h
64065--- linux-2.6.32.48/include/linux/sched.h 2011-11-08 19:02:43.000000000 -0500
64066+++ linux-2.6.32.48/include/linux/sched.h 2011-11-15 19:59:43.000000000 -0500
64067@@ -101,6 +101,7 @@ struct bio;
64068 struct fs_struct;
64069 struct bts_context;
64070 struct perf_event_context;
64071+struct linux_binprm;
64072
64073 /*
64074 * List of flags we want to share for kernel threads,
64075@@ -350,7 +351,7 @@ extern signed long schedule_timeout_kill
64076 extern signed long schedule_timeout_uninterruptible(signed long timeout);
64077 asmlinkage void __schedule(void);
64078 asmlinkage void schedule(void);
64079-extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
64080+extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
64081
64082 struct nsproxy;
64083 struct user_namespace;
64084@@ -371,9 +372,12 @@ struct user_namespace;
64085 #define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
64086
64087 extern int sysctl_max_map_count;
64088+extern unsigned long sysctl_heap_stack_gap;
64089
64090 #include <linux/aio.h>
64091
64092+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
64093+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
64094 extern unsigned long
64095 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
64096 unsigned long, unsigned long);
64097@@ -666,6 +670,16 @@ struct signal_struct {
64098 struct tty_audit_buf *tty_audit_buf;
64099 #endif
64100
64101+#ifdef CONFIG_GRKERNSEC
64102+ u32 curr_ip;
64103+ u32 saved_ip;
64104+ u32 gr_saddr;
64105+ u32 gr_daddr;
64106+ u16 gr_sport;
64107+ u16 gr_dport;
64108+ u8 used_accept:1;
64109+#endif
64110+
64111 int oom_adj; /* OOM kill score adjustment (bit shift) */
64112 };
64113
64114@@ -723,6 +737,11 @@ struct user_struct {
64115 struct key *session_keyring; /* UID's default session keyring */
64116 #endif
64117
64118+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
64119+ unsigned int banned;
64120+ unsigned long ban_expires;
64121+#endif
64122+
64123 /* Hash table maintenance information */
64124 struct hlist_node uidhash_node;
64125 uid_t uid;
64126@@ -1328,8 +1347,8 @@ struct task_struct {
64127 struct list_head thread_group;
64128
64129 struct completion *vfork_done; /* for vfork() */
64130- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
64131- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
64132+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
64133+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
64134
64135 cputime_t utime, stime, utimescaled, stimescaled;
64136 cputime_t gtime;
64137@@ -1343,16 +1362,6 @@ struct task_struct {
64138 struct task_cputime cputime_expires;
64139 struct list_head cpu_timers[3];
64140
64141-/* process credentials */
64142- const struct cred *real_cred; /* objective and real subjective task
64143- * credentials (COW) */
64144- const struct cred *cred; /* effective (overridable) subjective task
64145- * credentials (COW) */
64146- struct mutex cred_guard_mutex; /* guard against foreign influences on
64147- * credential calculations
64148- * (notably. ptrace) */
64149- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
64150-
64151 char comm[TASK_COMM_LEN]; /* executable name excluding path
64152 - access with [gs]et_task_comm (which lock
64153 it with task_lock())
64154@@ -1369,6 +1378,10 @@ struct task_struct {
64155 #endif
64156 /* CPU-specific state of this task */
64157 struct thread_struct thread;
64158+/* thread_info moved to task_struct */
64159+#ifdef CONFIG_X86
64160+ struct thread_info tinfo;
64161+#endif
64162 /* filesystem information */
64163 struct fs_struct *fs;
64164 /* open file information */
64165@@ -1436,6 +1449,15 @@ struct task_struct {
64166 int hardirq_context;
64167 int softirq_context;
64168 #endif
64169+
64170+/* process credentials */
64171+ const struct cred *real_cred; /* objective and real subjective task
64172+ * credentials (COW) */
64173+ struct mutex cred_guard_mutex; /* guard against foreign influences on
64174+ * credential calculations
64175+ * (notably. ptrace) */
64176+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
64177+
64178 #ifdef CONFIG_LOCKDEP
64179 # define MAX_LOCK_DEPTH 48UL
64180 u64 curr_chain_key;
64181@@ -1456,6 +1478,9 @@ struct task_struct {
64182
64183 struct backing_dev_info *backing_dev_info;
64184
64185+ const struct cred *cred; /* effective (overridable) subjective task
64186+ * credentials (COW) */
64187+
64188 struct io_context *io_context;
64189
64190 unsigned long ptrace_message;
64191@@ -1519,6 +1544,21 @@ struct task_struct {
64192 unsigned long default_timer_slack_ns;
64193
64194 struct list_head *scm_work_list;
64195+
64196+#ifdef CONFIG_GRKERNSEC
64197+ /* grsecurity */
64198+ struct dentry *gr_chroot_dentry;
64199+ struct acl_subject_label *acl;
64200+ struct acl_role_label *role;
64201+ struct file *exec_file;
64202+ u16 acl_role_id;
64203+ /* is this the task that authenticated to the special role */
64204+ u8 acl_sp_role;
64205+ u8 is_writable;
64206+ u8 brute;
64207+ u8 gr_is_chrooted;
64208+#endif
64209+
64210 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
64211 /* Index of current stored adress in ret_stack */
64212 int curr_ret_stack;
64213@@ -1542,6 +1582,57 @@ struct task_struct {
64214 #endif /* CONFIG_TRACING */
64215 };
64216
64217+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
64218+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
64219+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
64220+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
64221+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
64222+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
64223+
64224+#ifdef CONFIG_PAX_SOFTMODE
64225+extern int pax_softmode;
64226+#endif
64227+
64228+extern int pax_check_flags(unsigned long *);
64229+
64230+/* if tsk != current then task_lock must be held on it */
64231+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
64232+static inline unsigned long pax_get_flags(struct task_struct *tsk)
64233+{
64234+ if (likely(tsk->mm))
64235+ return tsk->mm->pax_flags;
64236+ else
64237+ return 0UL;
64238+}
64239+
64240+/* if tsk != current then task_lock must be held on it */
64241+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
64242+{
64243+ if (likely(tsk->mm)) {
64244+ tsk->mm->pax_flags = flags;
64245+ return 0;
64246+ }
64247+ return -EINVAL;
64248+}
64249+#endif
64250+
64251+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
64252+extern void pax_set_initial_flags(struct linux_binprm *bprm);
64253+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
64254+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
64255+#endif
64256+
64257+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
64258+extern void pax_report_insns(void *pc, void *sp);
64259+extern void pax_report_refcount_overflow(struct pt_regs *regs);
64260+extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
64261+
64262+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
64263+extern void pax_track_stack(void);
64264+#else
64265+static inline void pax_track_stack(void) {}
64266+#endif
64267+
64268 /* Future-safe accessor for struct task_struct's cpus_allowed. */
64269 #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
64270
64271@@ -1740,7 +1831,7 @@ extern void thread_group_times(struct ta
64272 #define PF_DUMPCORE 0x00000200 /* dumped core */
64273 #define PF_SIGNALED 0x00000400 /* killed by a signal */
64274 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
64275-#define PF_FLUSHER 0x00001000 /* responsible for disk writeback */
64276+#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
64277 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
64278 #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
64279 #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
64280@@ -1978,7 +2069,9 @@ void yield(void);
64281 extern struct exec_domain default_exec_domain;
64282
64283 union thread_union {
64284+#ifndef CONFIG_X86
64285 struct thread_info thread_info;
64286+#endif
64287 unsigned long stack[THREAD_SIZE/sizeof(long)];
64288 };
64289
64290@@ -2011,6 +2104,7 @@ extern struct pid_namespace init_pid_ns;
64291 */
64292
64293 extern struct task_struct *find_task_by_vpid(pid_t nr);
64294+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
64295 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
64296 struct pid_namespace *ns);
64297
64298@@ -2155,7 +2249,7 @@ extern void __cleanup_sighand(struct sig
64299 extern void exit_itimers(struct signal_struct *);
64300 extern void flush_itimer_signals(void);
64301
64302-extern NORET_TYPE void do_group_exit(int);
64303+extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
64304
64305 extern void daemonize(const char *, ...);
64306 extern int allow_signal(int);
64307@@ -2284,13 +2378,17 @@ static inline unsigned long *end_of_stac
64308
64309 #endif
64310
64311-static inline int object_is_on_stack(void *obj)
64312+static inline int object_starts_on_stack(void *obj)
64313 {
64314- void *stack = task_stack_page(current);
64315+ const void *stack = task_stack_page(current);
64316
64317 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
64318 }
64319
64320+#ifdef CONFIG_PAX_USERCOPY
64321+extern int object_is_on_stack(const void *obj, unsigned long len);
64322+#endif
64323+
64324 extern void thread_info_cache_init(void);
64325
64326 #ifdef CONFIG_DEBUG_STACK_USAGE
64327diff -urNp linux-2.6.32.48/include/linux/screen_info.h linux-2.6.32.48/include/linux/screen_info.h
64328--- linux-2.6.32.48/include/linux/screen_info.h 2011-11-08 19:02:43.000000000 -0500
64329+++ linux-2.6.32.48/include/linux/screen_info.h 2011-11-15 19:59:43.000000000 -0500
64330@@ -42,7 +42,8 @@ struct screen_info {
64331 __u16 pages; /* 0x32 */
64332 __u16 vesa_attributes; /* 0x34 */
64333 __u32 capabilities; /* 0x36 */
64334- __u8 _reserved[6]; /* 0x3a */
64335+ __u16 vesapm_size; /* 0x3a */
64336+ __u8 _reserved[4]; /* 0x3c */
64337 } __attribute__((packed));
64338
64339 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
64340diff -urNp linux-2.6.32.48/include/linux/security.h linux-2.6.32.48/include/linux/security.h
64341--- linux-2.6.32.48/include/linux/security.h 2011-11-08 19:02:43.000000000 -0500
64342+++ linux-2.6.32.48/include/linux/security.h 2011-11-15 19:59:43.000000000 -0500
64343@@ -34,6 +34,7 @@
64344 #include <linux/key.h>
64345 #include <linux/xfrm.h>
64346 #include <linux/gfp.h>
64347+#include <linux/grsecurity.h>
64348 #include <net/flow.h>
64349
64350 /* Maximum number of letters for an LSM name string */
64351diff -urNp linux-2.6.32.48/include/linux/seq_file.h linux-2.6.32.48/include/linux/seq_file.h
64352--- linux-2.6.32.48/include/linux/seq_file.h 2011-11-08 19:02:43.000000000 -0500
64353+++ linux-2.6.32.48/include/linux/seq_file.h 2011-11-15 19:59:43.000000000 -0500
64354@@ -32,6 +32,7 @@ struct seq_operations {
64355 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
64356 int (*show) (struct seq_file *m, void *v);
64357 };
64358+typedef struct seq_operations __no_const seq_operations_no_const;
64359
64360 #define SEQ_SKIP 1
64361
64362diff -urNp linux-2.6.32.48/include/linux/shm.h linux-2.6.32.48/include/linux/shm.h
64363--- linux-2.6.32.48/include/linux/shm.h 2011-11-08 19:02:43.000000000 -0500
64364+++ linux-2.6.32.48/include/linux/shm.h 2011-11-15 19:59:43.000000000 -0500
64365@@ -95,6 +95,10 @@ struct shmid_kernel /* private to the ke
64366 pid_t shm_cprid;
64367 pid_t shm_lprid;
64368 struct user_struct *mlock_user;
64369+#ifdef CONFIG_GRKERNSEC
64370+ time_t shm_createtime;
64371+ pid_t shm_lapid;
64372+#endif
64373 };
64374
64375 /* shm_mode upper byte flags */
64376diff -urNp linux-2.6.32.48/include/linux/skbuff.h linux-2.6.32.48/include/linux/skbuff.h
64377--- linux-2.6.32.48/include/linux/skbuff.h 2011-11-08 19:02:43.000000000 -0500
64378+++ linux-2.6.32.48/include/linux/skbuff.h 2011-11-15 19:59:43.000000000 -0500
64379@@ -14,6 +14,7 @@
64380 #ifndef _LINUX_SKBUFF_H
64381 #define _LINUX_SKBUFF_H
64382
64383+#include <linux/const.h>
64384 #include <linux/kernel.h>
64385 #include <linux/kmemcheck.h>
64386 #include <linux/compiler.h>
64387@@ -544,7 +545,7 @@ static inline union skb_shared_tx *skb_t
64388 */
64389 static inline int skb_queue_empty(const struct sk_buff_head *list)
64390 {
64391- return list->next == (struct sk_buff *)list;
64392+ return list->next == (const struct sk_buff *)list;
64393 }
64394
64395 /**
64396@@ -557,7 +558,7 @@ static inline int skb_queue_empty(const
64397 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
64398 const struct sk_buff *skb)
64399 {
64400- return (skb->next == (struct sk_buff *) list);
64401+ return (skb->next == (const struct sk_buff *) list);
64402 }
64403
64404 /**
64405@@ -570,7 +571,7 @@ static inline bool skb_queue_is_last(con
64406 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
64407 const struct sk_buff *skb)
64408 {
64409- return (skb->prev == (struct sk_buff *) list);
64410+ return (skb->prev == (const struct sk_buff *) list);
64411 }
64412
64413 /**
64414@@ -1367,7 +1368,7 @@ static inline int skb_network_offset(con
64415 * headroom, you should not reduce this.
64416 */
64417 #ifndef NET_SKB_PAD
64418-#define NET_SKB_PAD 32
64419+#define NET_SKB_PAD (_AC(32,UL))
64420 #endif
64421
64422 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
64423diff -urNp linux-2.6.32.48/include/linux/slab_def.h linux-2.6.32.48/include/linux/slab_def.h
64424--- linux-2.6.32.48/include/linux/slab_def.h 2011-11-08 19:02:43.000000000 -0500
64425+++ linux-2.6.32.48/include/linux/slab_def.h 2011-11-15 19:59:43.000000000 -0500
64426@@ -69,10 +69,10 @@ struct kmem_cache {
64427 unsigned long node_allocs;
64428 unsigned long node_frees;
64429 unsigned long node_overflow;
64430- atomic_t allochit;
64431- atomic_t allocmiss;
64432- atomic_t freehit;
64433- atomic_t freemiss;
64434+ atomic_unchecked_t allochit;
64435+ atomic_unchecked_t allocmiss;
64436+ atomic_unchecked_t freehit;
64437+ atomic_unchecked_t freemiss;
64438
64439 /*
64440 * If debugging is enabled, then the allocator can add additional
64441diff -urNp linux-2.6.32.48/include/linux/slab.h linux-2.6.32.48/include/linux/slab.h
64442--- linux-2.6.32.48/include/linux/slab.h 2011-11-08 19:02:43.000000000 -0500
64443+++ linux-2.6.32.48/include/linux/slab.h 2011-11-15 19:59:43.000000000 -0500
64444@@ -11,12 +11,20 @@
64445
64446 #include <linux/gfp.h>
64447 #include <linux/types.h>
64448+#include <linux/err.h>
64449
64450 /*
64451 * Flags to pass to kmem_cache_create().
64452 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
64453 */
64454 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
64455+
64456+#ifdef CONFIG_PAX_USERCOPY
64457+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
64458+#else
64459+#define SLAB_USERCOPY 0x00000000UL
64460+#endif
64461+
64462 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
64463 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
64464 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
64465@@ -82,10 +90,13 @@
64466 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
64467 * Both make kfree a no-op.
64468 */
64469-#define ZERO_SIZE_PTR ((void *)16)
64470+#define ZERO_SIZE_PTR \
64471+({ \
64472+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
64473+ (void *)(-MAX_ERRNO-1L); \
64474+})
64475
64476-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
64477- (unsigned long)ZERO_SIZE_PTR)
64478+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
64479
64480 /*
64481 * struct kmem_cache related prototypes
64482@@ -138,6 +149,7 @@ void * __must_check krealloc(const void
64483 void kfree(const void *);
64484 void kzfree(const void *);
64485 size_t ksize(const void *);
64486+void check_object_size(const void *ptr, unsigned long n, bool to);
64487
64488 /*
64489 * Allocator specific definitions. These are mainly used to establish optimized
64490@@ -328,4 +340,37 @@ static inline void *kzalloc_node(size_t
64491
64492 void __init kmem_cache_init_late(void);
64493
64494+#define kmalloc(x, y) \
64495+({ \
64496+ void *___retval; \
64497+ intoverflow_t ___x = (intoverflow_t)x; \
64498+ if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n"))\
64499+ ___retval = NULL; \
64500+ else \
64501+ ___retval = kmalloc((size_t)___x, (y)); \
64502+ ___retval; \
64503+})
64504+
64505+#define kmalloc_node(x, y, z) \
64506+({ \
64507+ void *___retval; \
64508+ intoverflow_t ___x = (intoverflow_t)x; \
64509+ if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
64510+ ___retval = NULL; \
64511+ else \
64512+ ___retval = kmalloc_node((size_t)___x, (y), (z));\
64513+ ___retval; \
64514+})
64515+
64516+#define kzalloc(x, y) \
64517+({ \
64518+ void *___retval; \
64519+ intoverflow_t ___x = (intoverflow_t)x; \
64520+ if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n"))\
64521+ ___retval = NULL; \
64522+ else \
64523+ ___retval = kzalloc((size_t)___x, (y)); \
64524+ ___retval; \
64525+})
64526+
64527 #endif /* _LINUX_SLAB_H */
64528diff -urNp linux-2.6.32.48/include/linux/slub_def.h linux-2.6.32.48/include/linux/slub_def.h
64529--- linux-2.6.32.48/include/linux/slub_def.h 2011-11-08 19:02:43.000000000 -0500
64530+++ linux-2.6.32.48/include/linux/slub_def.h 2011-11-15 19:59:43.000000000 -0500
64531@@ -86,7 +86,7 @@ struct kmem_cache {
64532 struct kmem_cache_order_objects max;
64533 struct kmem_cache_order_objects min;
64534 gfp_t allocflags; /* gfp flags to use on each alloc */
64535- int refcount; /* Refcount for slab cache destroy */
64536+ atomic_t refcount; /* Refcount for slab cache destroy */
64537 void (*ctor)(void *);
64538 int inuse; /* Offset to metadata */
64539 int align; /* Alignment */
64540@@ -215,7 +215,7 @@ static __always_inline struct kmem_cache
64541 #endif
64542
64543 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
64544-void *__kmalloc(size_t size, gfp_t flags);
64545+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
64546
64547 #ifdef CONFIG_KMEMTRACE
64548 extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
64549diff -urNp linux-2.6.32.48/include/linux/sonet.h linux-2.6.32.48/include/linux/sonet.h
64550--- linux-2.6.32.48/include/linux/sonet.h 2011-11-08 19:02:43.000000000 -0500
64551+++ linux-2.6.32.48/include/linux/sonet.h 2011-11-15 19:59:43.000000000 -0500
64552@@ -61,7 +61,7 @@ struct sonet_stats {
64553 #include <asm/atomic.h>
64554
64555 struct k_sonet_stats {
64556-#define __HANDLE_ITEM(i) atomic_t i
64557+#define __HANDLE_ITEM(i) atomic_unchecked_t i
64558 __SONET_ITEMS
64559 #undef __HANDLE_ITEM
64560 };
64561diff -urNp linux-2.6.32.48/include/linux/sunrpc/cache.h linux-2.6.32.48/include/linux/sunrpc/cache.h
64562--- linux-2.6.32.48/include/linux/sunrpc/cache.h 2011-11-08 19:02:43.000000000 -0500
64563+++ linux-2.6.32.48/include/linux/sunrpc/cache.h 2011-11-15 19:59:43.000000000 -0500
64564@@ -125,7 +125,7 @@ struct cache_detail {
64565 */
64566 struct cache_req {
64567 struct cache_deferred_req *(*defer)(struct cache_req *req);
64568-};
64569+} __no_const;
64570 /* this must be embedded in a deferred_request that is being
64571 * delayed awaiting cache-fill
64572 */
64573diff -urNp linux-2.6.32.48/include/linux/sunrpc/clnt.h linux-2.6.32.48/include/linux/sunrpc/clnt.h
64574--- linux-2.6.32.48/include/linux/sunrpc/clnt.h 2011-11-08 19:02:43.000000000 -0500
64575+++ linux-2.6.32.48/include/linux/sunrpc/clnt.h 2011-11-15 19:59:43.000000000 -0500
64576@@ -167,9 +167,9 @@ static inline unsigned short rpc_get_por
64577 {
64578 switch (sap->sa_family) {
64579 case AF_INET:
64580- return ntohs(((struct sockaddr_in *)sap)->sin_port);
64581+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
64582 case AF_INET6:
64583- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
64584+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
64585 }
64586 return 0;
64587 }
64588@@ -202,7 +202,7 @@ static inline bool __rpc_cmp_addr4(const
64589 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
64590 const struct sockaddr *src)
64591 {
64592- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
64593+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
64594 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
64595
64596 dsin->sin_family = ssin->sin_family;
64597@@ -299,7 +299,7 @@ static inline u32 rpc_get_scope_id(const
64598 if (sa->sa_family != AF_INET6)
64599 return 0;
64600
64601- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
64602+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
64603 }
64604
64605 #endif /* __KERNEL__ */
64606diff -urNp linux-2.6.32.48/include/linux/sunrpc/svc_rdma.h linux-2.6.32.48/include/linux/sunrpc/svc_rdma.h
64607--- linux-2.6.32.48/include/linux/sunrpc/svc_rdma.h 2011-11-08 19:02:43.000000000 -0500
64608+++ linux-2.6.32.48/include/linux/sunrpc/svc_rdma.h 2011-11-15 19:59:43.000000000 -0500
64609@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
64610 extern unsigned int svcrdma_max_requests;
64611 extern unsigned int svcrdma_max_req_size;
64612
64613-extern atomic_t rdma_stat_recv;
64614-extern atomic_t rdma_stat_read;
64615-extern atomic_t rdma_stat_write;
64616-extern atomic_t rdma_stat_sq_starve;
64617-extern atomic_t rdma_stat_rq_starve;
64618-extern atomic_t rdma_stat_rq_poll;
64619-extern atomic_t rdma_stat_rq_prod;
64620-extern atomic_t rdma_stat_sq_poll;
64621-extern atomic_t rdma_stat_sq_prod;
64622+extern atomic_unchecked_t rdma_stat_recv;
64623+extern atomic_unchecked_t rdma_stat_read;
64624+extern atomic_unchecked_t rdma_stat_write;
64625+extern atomic_unchecked_t rdma_stat_sq_starve;
64626+extern atomic_unchecked_t rdma_stat_rq_starve;
64627+extern atomic_unchecked_t rdma_stat_rq_poll;
64628+extern atomic_unchecked_t rdma_stat_rq_prod;
64629+extern atomic_unchecked_t rdma_stat_sq_poll;
64630+extern atomic_unchecked_t rdma_stat_sq_prod;
64631
64632 #define RPCRDMA_VERSION 1
64633
64634diff -urNp linux-2.6.32.48/include/linux/suspend.h linux-2.6.32.48/include/linux/suspend.h
64635--- linux-2.6.32.48/include/linux/suspend.h 2011-11-08 19:02:43.000000000 -0500
64636+++ linux-2.6.32.48/include/linux/suspend.h 2011-11-15 19:59:43.000000000 -0500
64637@@ -104,15 +104,15 @@ typedef int __bitwise suspend_state_t;
64638 * which require special recovery actions in that situation.
64639 */
64640 struct platform_suspend_ops {
64641- int (*valid)(suspend_state_t state);
64642- int (*begin)(suspend_state_t state);
64643- int (*prepare)(void);
64644- int (*prepare_late)(void);
64645- int (*enter)(suspend_state_t state);
64646- void (*wake)(void);
64647- void (*finish)(void);
64648- void (*end)(void);
64649- void (*recover)(void);
64650+ int (* const valid)(suspend_state_t state);
64651+ int (* const begin)(suspend_state_t state);
64652+ int (* const prepare)(void);
64653+ int (* const prepare_late)(void);
64654+ int (* const enter)(suspend_state_t state);
64655+ void (* const wake)(void);
64656+ void (* const finish)(void);
64657+ void (* const end)(void);
64658+ void (* const recover)(void);
64659 };
64660
64661 #ifdef CONFIG_SUSPEND
64662@@ -120,7 +120,7 @@ struct platform_suspend_ops {
64663 * suspend_set_ops - set platform dependent suspend operations
64664 * @ops: The new suspend operations to set.
64665 */
64666-extern void suspend_set_ops(struct platform_suspend_ops *ops);
64667+extern void suspend_set_ops(const struct platform_suspend_ops *ops);
64668 extern int suspend_valid_only_mem(suspend_state_t state);
64669
64670 /**
64671@@ -145,7 +145,7 @@ extern int pm_suspend(suspend_state_t st
64672 #else /* !CONFIG_SUSPEND */
64673 #define suspend_valid_only_mem NULL
64674
64675-static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
64676+static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
64677 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
64678 #endif /* !CONFIG_SUSPEND */
64679
64680@@ -215,16 +215,16 @@ extern void mark_free_pages(struct zone
64681 * platforms which require special recovery actions in that situation.
64682 */
64683 struct platform_hibernation_ops {
64684- int (*begin)(void);
64685- void (*end)(void);
64686- int (*pre_snapshot)(void);
64687- void (*finish)(void);
64688- int (*prepare)(void);
64689- int (*enter)(void);
64690- void (*leave)(void);
64691- int (*pre_restore)(void);
64692- void (*restore_cleanup)(void);
64693- void (*recover)(void);
64694+ int (* const begin)(void);
64695+ void (* const end)(void);
64696+ int (* const pre_snapshot)(void);
64697+ void (* const finish)(void);
64698+ int (* const prepare)(void);
64699+ int (* const enter)(void);
64700+ void (* const leave)(void);
64701+ int (* const pre_restore)(void);
64702+ void (* const restore_cleanup)(void);
64703+ void (* const recover)(void);
64704 };
64705
64706 #ifdef CONFIG_HIBERNATION
64707@@ -243,7 +243,7 @@ extern void swsusp_set_page_free(struct
64708 extern void swsusp_unset_page_free(struct page *);
64709 extern unsigned long get_safe_page(gfp_t gfp_mask);
64710
64711-extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
64712+extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
64713 extern int hibernate(void);
64714 extern bool system_entering_hibernation(void);
64715 #else /* CONFIG_HIBERNATION */
64716@@ -251,7 +251,7 @@ static inline int swsusp_page_is_forbidd
64717 static inline void swsusp_set_page_free(struct page *p) {}
64718 static inline void swsusp_unset_page_free(struct page *p) {}
64719
64720-static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
64721+static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
64722 static inline int hibernate(void) { return -ENOSYS; }
64723 static inline bool system_entering_hibernation(void) { return false; }
64724 #endif /* CONFIG_HIBERNATION */
64725diff -urNp linux-2.6.32.48/include/linux/sysctl.h linux-2.6.32.48/include/linux/sysctl.h
64726--- linux-2.6.32.48/include/linux/sysctl.h 2011-11-08 19:02:43.000000000 -0500
64727+++ linux-2.6.32.48/include/linux/sysctl.h 2011-11-15 19:59:43.000000000 -0500
64728@@ -164,7 +164,11 @@ enum
64729 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
64730 };
64731
64732-
64733+#ifdef CONFIG_PAX_SOFTMODE
64734+enum {
64735+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
64736+};
64737+#endif
64738
64739 /* CTL_VM names: */
64740 enum
64741@@ -982,6 +986,8 @@ typedef int proc_handler (struct ctl_tab
64742
64743 extern int proc_dostring(struct ctl_table *, int,
64744 void __user *, size_t *, loff_t *);
64745+extern int proc_dostring_modpriv(struct ctl_table *, int,
64746+ void __user *, size_t *, loff_t *);
64747 extern int proc_dointvec(struct ctl_table *, int,
64748 void __user *, size_t *, loff_t *);
64749 extern int proc_dointvec_minmax(struct ctl_table *, int,
64750@@ -1003,6 +1009,7 @@ extern int do_sysctl (int __user *name,
64751
64752 extern ctl_handler sysctl_data;
64753 extern ctl_handler sysctl_string;
64754+extern ctl_handler sysctl_string_modpriv;
64755 extern ctl_handler sysctl_intvec;
64756 extern ctl_handler sysctl_jiffies;
64757 extern ctl_handler sysctl_ms_jiffies;
64758diff -urNp linux-2.6.32.48/include/linux/sysfs.h linux-2.6.32.48/include/linux/sysfs.h
64759--- linux-2.6.32.48/include/linux/sysfs.h 2011-11-08 19:02:43.000000000 -0500
64760+++ linux-2.6.32.48/include/linux/sysfs.h 2011-11-15 19:59:43.000000000 -0500
64761@@ -75,8 +75,8 @@ struct bin_attribute {
64762 };
64763
64764 struct sysfs_ops {
64765- ssize_t (*show)(struct kobject *, struct attribute *,char *);
64766- ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
64767+ ssize_t (* const show)(struct kobject *, struct attribute *,char *);
64768+ ssize_t (* const store)(struct kobject *,struct attribute *,const char *, size_t);
64769 };
64770
64771 struct sysfs_dirent;
64772diff -urNp linux-2.6.32.48/include/linux/thread_info.h linux-2.6.32.48/include/linux/thread_info.h
64773--- linux-2.6.32.48/include/linux/thread_info.h 2011-11-08 19:02:43.000000000 -0500
64774+++ linux-2.6.32.48/include/linux/thread_info.h 2011-11-15 19:59:43.000000000 -0500
64775@@ -23,7 +23,7 @@ struct restart_block {
64776 };
64777 /* For futex_wait and futex_wait_requeue_pi */
64778 struct {
64779- u32 *uaddr;
64780+ u32 __user *uaddr;
64781 u32 val;
64782 u32 flags;
64783 u32 bitset;
64784diff -urNp linux-2.6.32.48/include/linux/tty.h linux-2.6.32.48/include/linux/tty.h
64785--- linux-2.6.32.48/include/linux/tty.h 2011-11-08 19:02:43.000000000 -0500
64786+++ linux-2.6.32.48/include/linux/tty.h 2011-11-15 19:59:43.000000000 -0500
64787@@ -493,7 +493,6 @@ extern void tty_ldisc_begin(void);
64788 /* This last one is just for the tty layer internals and shouldn't be used elsewhere */
64789 extern void tty_ldisc_enable(struct tty_struct *tty);
64790
64791-
64792 /* n_tty.c */
64793 extern struct tty_ldisc_ops tty_ldisc_N_TTY;
64794
64795diff -urNp linux-2.6.32.48/include/linux/tty_ldisc.h linux-2.6.32.48/include/linux/tty_ldisc.h
64796--- linux-2.6.32.48/include/linux/tty_ldisc.h 2011-11-08 19:02:43.000000000 -0500
64797+++ linux-2.6.32.48/include/linux/tty_ldisc.h 2011-11-15 19:59:43.000000000 -0500
64798@@ -139,7 +139,7 @@ struct tty_ldisc_ops {
64799
64800 struct module *owner;
64801
64802- int refcount;
64803+ atomic_t refcount;
64804 };
64805
64806 struct tty_ldisc {
64807diff -urNp linux-2.6.32.48/include/linux/types.h linux-2.6.32.48/include/linux/types.h
64808--- linux-2.6.32.48/include/linux/types.h 2011-11-08 19:02:43.000000000 -0500
64809+++ linux-2.6.32.48/include/linux/types.h 2011-11-15 19:59:43.000000000 -0500
64810@@ -191,10 +191,26 @@ typedef struct {
64811 volatile int counter;
64812 } atomic_t;
64813
64814+#ifdef CONFIG_PAX_REFCOUNT
64815+typedef struct {
64816+ volatile int counter;
64817+} atomic_unchecked_t;
64818+#else
64819+typedef atomic_t atomic_unchecked_t;
64820+#endif
64821+
64822 #ifdef CONFIG_64BIT
64823 typedef struct {
64824 volatile long counter;
64825 } atomic64_t;
64826+
64827+#ifdef CONFIG_PAX_REFCOUNT
64828+typedef struct {
64829+ volatile long counter;
64830+} atomic64_unchecked_t;
64831+#else
64832+typedef atomic64_t atomic64_unchecked_t;
64833+#endif
64834 #endif
64835
64836 struct ustat {
64837diff -urNp linux-2.6.32.48/include/linux/uaccess.h linux-2.6.32.48/include/linux/uaccess.h
64838--- linux-2.6.32.48/include/linux/uaccess.h 2011-11-08 19:02:43.000000000 -0500
64839+++ linux-2.6.32.48/include/linux/uaccess.h 2011-11-15 19:59:43.000000000 -0500
64840@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
64841 long ret; \
64842 mm_segment_t old_fs = get_fs(); \
64843 \
64844- set_fs(KERNEL_DS); \
64845 pagefault_disable(); \
64846- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
64847- pagefault_enable(); \
64848+ set_fs(KERNEL_DS); \
64849+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
64850 set_fs(old_fs); \
64851+ pagefault_enable(); \
64852 ret; \
64853 })
64854
64855@@ -93,7 +93,7 @@ static inline unsigned long __copy_from_
64856 * Safely read from address @src to the buffer at @dst. If a kernel fault
64857 * happens, handle that and return -EFAULT.
64858 */
64859-extern long probe_kernel_read(void *dst, void *src, size_t size);
64860+extern long probe_kernel_read(void *dst, const void *src, size_t size);
64861
64862 /*
64863 * probe_kernel_write(): safely attempt to write to a location
64864@@ -104,6 +104,6 @@ extern long probe_kernel_read(void *dst,
64865 * Safely write to address @dst from the buffer at @src. If a kernel fault
64866 * happens, handle that and return -EFAULT.
64867 */
64868-extern long probe_kernel_write(void *dst, void *src, size_t size);
64869+extern long probe_kernel_write(void *dst, const void *src, size_t size);
64870
64871 #endif /* __LINUX_UACCESS_H__ */
64872diff -urNp linux-2.6.32.48/include/linux/unaligned/access_ok.h linux-2.6.32.48/include/linux/unaligned/access_ok.h
64873--- linux-2.6.32.48/include/linux/unaligned/access_ok.h 2011-11-08 19:02:43.000000000 -0500
64874+++ linux-2.6.32.48/include/linux/unaligned/access_ok.h 2011-11-15 19:59:43.000000000 -0500
64875@@ -6,32 +6,32 @@
64876
64877 static inline u16 get_unaligned_le16(const void *p)
64878 {
64879- return le16_to_cpup((__le16 *)p);
64880+ return le16_to_cpup((const __le16 *)p);
64881 }
64882
64883 static inline u32 get_unaligned_le32(const void *p)
64884 {
64885- return le32_to_cpup((__le32 *)p);
64886+ return le32_to_cpup((const __le32 *)p);
64887 }
64888
64889 static inline u64 get_unaligned_le64(const void *p)
64890 {
64891- return le64_to_cpup((__le64 *)p);
64892+ return le64_to_cpup((const __le64 *)p);
64893 }
64894
64895 static inline u16 get_unaligned_be16(const void *p)
64896 {
64897- return be16_to_cpup((__be16 *)p);
64898+ return be16_to_cpup((const __be16 *)p);
64899 }
64900
64901 static inline u32 get_unaligned_be32(const void *p)
64902 {
64903- return be32_to_cpup((__be32 *)p);
64904+ return be32_to_cpup((const __be32 *)p);
64905 }
64906
64907 static inline u64 get_unaligned_be64(const void *p)
64908 {
64909- return be64_to_cpup((__be64 *)p);
64910+ return be64_to_cpup((const __be64 *)p);
64911 }
64912
64913 static inline void put_unaligned_le16(u16 val, void *p)
64914diff -urNp linux-2.6.32.48/include/linux/vermagic.h linux-2.6.32.48/include/linux/vermagic.h
64915--- linux-2.6.32.48/include/linux/vermagic.h 2011-11-08 19:02:43.000000000 -0500
64916+++ linux-2.6.32.48/include/linux/vermagic.h 2011-11-15 19:59:43.000000000 -0500
64917@@ -26,9 +26,28 @@
64918 #define MODULE_ARCH_VERMAGIC ""
64919 #endif
64920
64921+#ifdef CONFIG_PAX_REFCOUNT
64922+#define MODULE_PAX_REFCOUNT "REFCOUNT "
64923+#else
64924+#define MODULE_PAX_REFCOUNT ""
64925+#endif
64926+
64927+#ifdef CONSTIFY_PLUGIN
64928+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
64929+#else
64930+#define MODULE_CONSTIFY_PLUGIN ""
64931+#endif
64932+
64933+#ifdef CONFIG_GRKERNSEC
64934+#define MODULE_GRSEC "GRSEC "
64935+#else
64936+#define MODULE_GRSEC ""
64937+#endif
64938+
64939 #define VERMAGIC_STRING \
64940 UTS_RELEASE " " \
64941 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
64942 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
64943- MODULE_ARCH_VERMAGIC
64944+ MODULE_ARCH_VERMAGIC \
64945+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_GRSEC
64946
64947diff -urNp linux-2.6.32.48/include/linux/vmalloc.h linux-2.6.32.48/include/linux/vmalloc.h
64948--- linux-2.6.32.48/include/linux/vmalloc.h 2011-11-08 19:02:43.000000000 -0500
64949+++ linux-2.6.32.48/include/linux/vmalloc.h 2011-11-15 19:59:43.000000000 -0500
64950@@ -13,6 +13,11 @@ struct vm_area_struct; /* vma defining
64951 #define VM_MAP 0x00000004 /* vmap()ed pages */
64952 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
64953 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
64954+
64955+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
64956+#define VM_KERNEXEC 0x00000020 /* allocate from executable kernel memory range */
64957+#endif
64958+
64959 /* bits [20..32] reserved for arch specific ioremap internals */
64960
64961 /*
64962@@ -123,4 +128,81 @@ struct vm_struct **pcpu_get_vm_areas(con
64963
64964 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
64965
64966+#define vmalloc(x) \
64967+({ \
64968+ void *___retval; \
64969+ intoverflow_t ___x = (intoverflow_t)x; \
64970+ if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
64971+ ___retval = NULL; \
64972+ else \
64973+ ___retval = vmalloc((unsigned long)___x); \
64974+ ___retval; \
64975+})
64976+
64977+#define __vmalloc(x, y, z) \
64978+({ \
64979+ void *___retval; \
64980+ intoverflow_t ___x = (intoverflow_t)x; \
64981+ if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
64982+ ___retval = NULL; \
64983+ else \
64984+ ___retval = __vmalloc((unsigned long)___x, (y), (z));\
64985+ ___retval; \
64986+})
64987+
64988+#define vmalloc_user(x) \
64989+({ \
64990+ void *___retval; \
64991+ intoverflow_t ___x = (intoverflow_t)x; \
64992+ if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
64993+ ___retval = NULL; \
64994+ else \
64995+ ___retval = vmalloc_user((unsigned long)___x); \
64996+ ___retval; \
64997+})
64998+
64999+#define vmalloc_exec(x) \
65000+({ \
65001+ void *___retval; \
65002+ intoverflow_t ___x = (intoverflow_t)x; \
65003+ if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
65004+ ___retval = NULL; \
65005+ else \
65006+ ___retval = vmalloc_exec((unsigned long)___x); \
65007+ ___retval; \
65008+})
65009+
65010+#define vmalloc_node(x, y) \
65011+({ \
65012+ void *___retval; \
65013+ intoverflow_t ___x = (intoverflow_t)x; \
65014+ if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
65015+ ___retval = NULL; \
65016+ else \
65017+ ___retval = vmalloc_node((unsigned long)___x, (y));\
65018+ ___retval; \
65019+})
65020+
65021+#define vmalloc_32(x) \
65022+({ \
65023+ void *___retval; \
65024+ intoverflow_t ___x = (intoverflow_t)x; \
65025+ if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
65026+ ___retval = NULL; \
65027+ else \
65028+ ___retval = vmalloc_32((unsigned long)___x); \
65029+ ___retval; \
65030+})
65031+
65032+#define vmalloc_32_user(x) \
65033+({ \
65034+ void *___retval; \
65035+ intoverflow_t ___x = (intoverflow_t)x; \
65036+ if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
65037+ ___retval = NULL; \
65038+ else \
65039+ ___retval = vmalloc_32_user((unsigned long)___x);\
65040+ ___retval; \
65041+})
65042+
65043 #endif /* _LINUX_VMALLOC_H */
65044diff -urNp linux-2.6.32.48/include/linux/vmstat.h linux-2.6.32.48/include/linux/vmstat.h
65045--- linux-2.6.32.48/include/linux/vmstat.h 2011-11-08 19:02:43.000000000 -0500
65046+++ linux-2.6.32.48/include/linux/vmstat.h 2011-11-15 19:59:43.000000000 -0500
65047@@ -136,18 +136,18 @@ static inline void vm_events_fold_cpu(in
65048 /*
65049 * Zone based page accounting with per cpu differentials.
65050 */
65051-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
65052+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
65053
65054 static inline void zone_page_state_add(long x, struct zone *zone,
65055 enum zone_stat_item item)
65056 {
65057- atomic_long_add(x, &zone->vm_stat[item]);
65058- atomic_long_add(x, &vm_stat[item]);
65059+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
65060+ atomic_long_add_unchecked(x, &vm_stat[item]);
65061 }
65062
65063 static inline unsigned long global_page_state(enum zone_stat_item item)
65064 {
65065- long x = atomic_long_read(&vm_stat[item]);
65066+ long x = atomic_long_read_unchecked(&vm_stat[item]);
65067 #ifdef CONFIG_SMP
65068 if (x < 0)
65069 x = 0;
65070@@ -158,7 +158,7 @@ static inline unsigned long global_page_
65071 static inline unsigned long zone_page_state(struct zone *zone,
65072 enum zone_stat_item item)
65073 {
65074- long x = atomic_long_read(&zone->vm_stat[item]);
65075+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
65076 #ifdef CONFIG_SMP
65077 if (x < 0)
65078 x = 0;
65079@@ -175,7 +175,7 @@ static inline unsigned long zone_page_st
65080 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
65081 enum zone_stat_item item)
65082 {
65083- long x = atomic_long_read(&zone->vm_stat[item]);
65084+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
65085
65086 #ifdef CONFIG_SMP
65087 int cpu;
65088@@ -264,8 +264,8 @@ static inline void __mod_zone_page_state
65089
65090 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
65091 {
65092- atomic_long_inc(&zone->vm_stat[item]);
65093- atomic_long_inc(&vm_stat[item]);
65094+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
65095+ atomic_long_inc_unchecked(&vm_stat[item]);
65096 }
65097
65098 static inline void __inc_zone_page_state(struct page *page,
65099@@ -276,8 +276,8 @@ static inline void __inc_zone_page_state
65100
65101 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
65102 {
65103- atomic_long_dec(&zone->vm_stat[item]);
65104- atomic_long_dec(&vm_stat[item]);
65105+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
65106+ atomic_long_dec_unchecked(&vm_stat[item]);
65107 }
65108
65109 static inline void __dec_zone_page_state(struct page *page,
65110diff -urNp linux-2.6.32.48/include/media/saa7146_vv.h linux-2.6.32.48/include/media/saa7146_vv.h
65111--- linux-2.6.32.48/include/media/saa7146_vv.h 2011-11-08 19:02:43.000000000 -0500
65112+++ linux-2.6.32.48/include/media/saa7146_vv.h 2011-11-15 19:59:43.000000000 -0500
65113@@ -167,7 +167,7 @@ struct saa7146_ext_vv
65114 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
65115
65116 /* the extension can override this */
65117- struct v4l2_ioctl_ops ops;
65118+ v4l2_ioctl_ops_no_const ops;
65119 /* pointer to the saa7146 core ops */
65120 const struct v4l2_ioctl_ops *core_ops;
65121
65122diff -urNp linux-2.6.32.48/include/media/v4l2-dev.h linux-2.6.32.48/include/media/v4l2-dev.h
65123--- linux-2.6.32.48/include/media/v4l2-dev.h 2011-11-08 19:02:43.000000000 -0500
65124+++ linux-2.6.32.48/include/media/v4l2-dev.h 2011-11-15 19:59:43.000000000 -0500
65125@@ -34,7 +34,7 @@ struct v4l2_device;
65126 #define V4L2_FL_UNREGISTERED (0)
65127
65128 struct v4l2_file_operations {
65129- struct module *owner;
65130+ struct module * const owner;
65131 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
65132 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
65133 unsigned int (*poll) (struct file *, struct poll_table_struct *);
65134@@ -46,6 +46,7 @@ struct v4l2_file_operations {
65135 int (*open) (struct file *);
65136 int (*release) (struct file *);
65137 };
65138+typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
65139
65140 /*
65141 * Newer version of video_device, handled by videodev2.c
65142diff -urNp linux-2.6.32.48/include/media/v4l2-device.h linux-2.6.32.48/include/media/v4l2-device.h
65143--- linux-2.6.32.48/include/media/v4l2-device.h 2011-11-08 19:02:43.000000000 -0500
65144+++ linux-2.6.32.48/include/media/v4l2-device.h 2011-11-15 19:59:43.000000000 -0500
65145@@ -71,7 +71,7 @@ int __must_check v4l2_device_register(st
65146 this function returns 0. If the name ends with a digit (e.g. cx18),
65147 then the name will be set to cx18-0 since cx180 looks really odd. */
65148 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
65149- atomic_t *instance);
65150+ atomic_unchecked_t *instance);
65151
65152 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
65153 Since the parent disappears this ensures that v4l2_dev doesn't have an
65154diff -urNp linux-2.6.32.48/include/media/v4l2-ioctl.h linux-2.6.32.48/include/media/v4l2-ioctl.h
65155--- linux-2.6.32.48/include/media/v4l2-ioctl.h 2011-11-08 19:02:43.000000000 -0500
65156+++ linux-2.6.32.48/include/media/v4l2-ioctl.h 2011-11-15 19:59:43.000000000 -0500
65157@@ -243,6 +243,7 @@ struct v4l2_ioctl_ops {
65158 long (*vidioc_default) (struct file *file, void *fh,
65159 int cmd, void *arg);
65160 };
65161+typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
65162
65163
65164 /* v4l debugging and diagnostics */
65165diff -urNp linux-2.6.32.48/include/net/flow.h linux-2.6.32.48/include/net/flow.h
65166--- linux-2.6.32.48/include/net/flow.h 2011-11-08 19:02:43.000000000 -0500
65167+++ linux-2.6.32.48/include/net/flow.h 2011-11-15 19:59:43.000000000 -0500
65168@@ -92,7 +92,7 @@ typedef int (*flow_resolve_t)(struct net
65169 extern void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family,
65170 u8 dir, flow_resolve_t resolver);
65171 extern void flow_cache_flush(void);
65172-extern atomic_t flow_cache_genid;
65173+extern atomic_unchecked_t flow_cache_genid;
65174
65175 static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2)
65176 {
65177diff -urNp linux-2.6.32.48/include/net/inetpeer.h linux-2.6.32.48/include/net/inetpeer.h
65178--- linux-2.6.32.48/include/net/inetpeer.h 2011-11-08 19:02:43.000000000 -0500
65179+++ linux-2.6.32.48/include/net/inetpeer.h 2011-11-15 19:59:43.000000000 -0500
65180@@ -24,7 +24,7 @@ struct inet_peer
65181 __u32 dtime; /* the time of last use of not
65182 * referenced entries */
65183 atomic_t refcnt;
65184- atomic_t rid; /* Frag reception counter */
65185+ atomic_unchecked_t rid; /* Frag reception counter */
65186 __u32 tcp_ts;
65187 unsigned long tcp_ts_stamp;
65188 };
65189diff -urNp linux-2.6.32.48/include/net/ip_vs.h linux-2.6.32.48/include/net/ip_vs.h
65190--- linux-2.6.32.48/include/net/ip_vs.h 2011-11-08 19:02:43.000000000 -0500
65191+++ linux-2.6.32.48/include/net/ip_vs.h 2011-11-15 19:59:43.000000000 -0500
65192@@ -365,7 +365,7 @@ struct ip_vs_conn {
65193 struct ip_vs_conn *control; /* Master control connection */
65194 atomic_t n_control; /* Number of controlled ones */
65195 struct ip_vs_dest *dest; /* real server */
65196- atomic_t in_pkts; /* incoming packet counter */
65197+ atomic_unchecked_t in_pkts; /* incoming packet counter */
65198
65199 /* packet transmitter for different forwarding methods. If it
65200 mangles the packet, it must return NF_DROP or better NF_STOLEN,
65201@@ -466,7 +466,7 @@ struct ip_vs_dest {
65202 union nf_inet_addr addr; /* IP address of the server */
65203 __be16 port; /* port number of the server */
65204 volatile unsigned flags; /* dest status flags */
65205- atomic_t conn_flags; /* flags to copy to conn */
65206+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
65207 atomic_t weight; /* server weight */
65208
65209 atomic_t refcnt; /* reference counter */
65210diff -urNp linux-2.6.32.48/include/net/irda/ircomm_core.h linux-2.6.32.48/include/net/irda/ircomm_core.h
65211--- linux-2.6.32.48/include/net/irda/ircomm_core.h 2011-11-08 19:02:43.000000000 -0500
65212+++ linux-2.6.32.48/include/net/irda/ircomm_core.h 2011-11-15 19:59:43.000000000 -0500
65213@@ -51,7 +51,7 @@ typedef struct {
65214 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
65215 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
65216 struct ircomm_info *);
65217-} call_t;
65218+} __no_const call_t;
65219
65220 struct ircomm_cb {
65221 irda_queue_t queue;
65222diff -urNp linux-2.6.32.48/include/net/irda/ircomm_tty.h linux-2.6.32.48/include/net/irda/ircomm_tty.h
65223--- linux-2.6.32.48/include/net/irda/ircomm_tty.h 2011-11-08 19:02:43.000000000 -0500
65224+++ linux-2.6.32.48/include/net/irda/ircomm_tty.h 2011-11-15 19:59:43.000000000 -0500
65225@@ -35,6 +35,7 @@
65226 #include <linux/termios.h>
65227 #include <linux/timer.h>
65228 #include <linux/tty.h> /* struct tty_struct */
65229+#include <asm/local.h>
65230
65231 #include <net/irda/irias_object.h>
65232 #include <net/irda/ircomm_core.h>
65233@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
65234 unsigned short close_delay;
65235 unsigned short closing_wait; /* time to wait before closing */
65236
65237- int open_count;
65238- int blocked_open; /* # of blocked opens */
65239+ local_t open_count;
65240+ local_t blocked_open; /* # of blocked opens */
65241
65242 /* Protect concurent access to :
65243 * o self->open_count
65244diff -urNp linux-2.6.32.48/include/net/iucv/af_iucv.h linux-2.6.32.48/include/net/iucv/af_iucv.h
65245--- linux-2.6.32.48/include/net/iucv/af_iucv.h 2011-11-08 19:02:43.000000000 -0500
65246+++ linux-2.6.32.48/include/net/iucv/af_iucv.h 2011-11-15 19:59:43.000000000 -0500
65247@@ -87,7 +87,7 @@ struct iucv_sock {
65248 struct iucv_sock_list {
65249 struct hlist_head head;
65250 rwlock_t lock;
65251- atomic_t autobind_name;
65252+ atomic_unchecked_t autobind_name;
65253 };
65254
65255 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
65256diff -urNp linux-2.6.32.48/include/net/lapb.h linux-2.6.32.48/include/net/lapb.h
65257--- linux-2.6.32.48/include/net/lapb.h 2011-11-08 19:02:43.000000000 -0500
65258+++ linux-2.6.32.48/include/net/lapb.h 2011-11-15 19:59:43.000000000 -0500
65259@@ -95,7 +95,7 @@ struct lapb_cb {
65260 struct sk_buff_head write_queue;
65261 struct sk_buff_head ack_queue;
65262 unsigned char window;
65263- struct lapb_register_struct callbacks;
65264+ struct lapb_register_struct *callbacks;
65265
65266 /* FRMR control information */
65267 struct lapb_frame frmr_data;
65268diff -urNp linux-2.6.32.48/include/net/neighbour.h linux-2.6.32.48/include/net/neighbour.h
65269--- linux-2.6.32.48/include/net/neighbour.h 2011-11-08 19:02:43.000000000 -0500
65270+++ linux-2.6.32.48/include/net/neighbour.h 2011-11-15 19:59:43.000000000 -0500
65271@@ -131,7 +131,7 @@ struct neigh_ops
65272 int (*connected_output)(struct sk_buff*);
65273 int (*hh_output)(struct sk_buff*);
65274 int (*queue_xmit)(struct sk_buff*);
65275-};
65276+} __do_const;
65277
65278 struct pneigh_entry
65279 {
65280diff -urNp linux-2.6.32.48/include/net/netlink.h linux-2.6.32.48/include/net/netlink.h
65281--- linux-2.6.32.48/include/net/netlink.h 2011-11-08 19:02:43.000000000 -0500
65282+++ linux-2.6.32.48/include/net/netlink.h 2011-11-15 19:59:43.000000000 -0500
65283@@ -335,7 +335,7 @@ static inline int nlmsg_ok(const struct
65284 {
65285 return (remaining >= (int) sizeof(struct nlmsghdr) &&
65286 nlh->nlmsg_len >= sizeof(struct nlmsghdr) &&
65287- nlh->nlmsg_len <= remaining);
65288+ nlh->nlmsg_len <= (unsigned int)remaining);
65289 }
65290
65291 /**
65292@@ -558,7 +558,7 @@ static inline void *nlmsg_get_pos(struct
65293 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
65294 {
65295 if (mark)
65296- skb_trim(skb, (unsigned char *) mark - skb->data);
65297+ skb_trim(skb, (const unsigned char *) mark - skb->data);
65298 }
65299
65300 /**
65301diff -urNp linux-2.6.32.48/include/net/netns/ipv4.h linux-2.6.32.48/include/net/netns/ipv4.h
65302--- linux-2.6.32.48/include/net/netns/ipv4.h 2011-11-08 19:02:43.000000000 -0500
65303+++ linux-2.6.32.48/include/net/netns/ipv4.h 2011-11-15 19:59:43.000000000 -0500
65304@@ -54,7 +54,7 @@ struct netns_ipv4 {
65305 int current_rt_cache_rebuild_count;
65306
65307 struct timer_list rt_secret_timer;
65308- atomic_t rt_genid;
65309+ atomic_unchecked_t rt_genid;
65310
65311 #ifdef CONFIG_IP_MROUTE
65312 struct sock *mroute_sk;
65313diff -urNp linux-2.6.32.48/include/net/sctp/sctp.h linux-2.6.32.48/include/net/sctp/sctp.h
65314--- linux-2.6.32.48/include/net/sctp/sctp.h 2011-11-08 19:02:43.000000000 -0500
65315+++ linux-2.6.32.48/include/net/sctp/sctp.h 2011-11-15 19:59:43.000000000 -0500
65316@@ -305,8 +305,8 @@ extern int sctp_debug_flag;
65317
65318 #else /* SCTP_DEBUG */
65319
65320-#define SCTP_DEBUG_PRINTK(whatever...)
65321-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
65322+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
65323+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
65324 #define SCTP_ENABLE_DEBUG
65325 #define SCTP_DISABLE_DEBUG
65326 #define SCTP_ASSERT(expr, str, func)
65327diff -urNp linux-2.6.32.48/include/net/secure_seq.h linux-2.6.32.48/include/net/secure_seq.h
65328--- linux-2.6.32.48/include/net/secure_seq.h 2011-11-08 19:02:43.000000000 -0500
65329+++ linux-2.6.32.48/include/net/secure_seq.h 2011-11-15 19:59:43.000000000 -0500
65330@@ -7,14 +7,14 @@ extern __u32 secure_ip_id(__be32 daddr);
65331 extern __u32 secure_ipv6_id(const __be32 daddr[4]);
65332 extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
65333 extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
65334- __be16 dport);
65335+ __be16 dport);
65336 extern __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
65337 __be16 sport, __be16 dport);
65338 extern __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
65339- __be16 sport, __be16 dport);
65340+ __be16 sport, __be16 dport);
65341 extern u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
65342- __be16 sport, __be16 dport);
65343+ __be16 sport, __be16 dport);
65344 extern u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
65345- __be16 sport, __be16 dport);
65346+ __be16 sport, __be16 dport);
65347
65348 #endif /* _NET_SECURE_SEQ */
65349diff -urNp linux-2.6.32.48/include/net/sock.h linux-2.6.32.48/include/net/sock.h
65350--- linux-2.6.32.48/include/net/sock.h 2011-11-08 19:02:43.000000000 -0500
65351+++ linux-2.6.32.48/include/net/sock.h 2011-11-15 19:59:43.000000000 -0500
65352@@ -272,7 +272,7 @@ struct sock {
65353 rwlock_t sk_callback_lock;
65354 int sk_err,
65355 sk_err_soft;
65356- atomic_t sk_drops;
65357+ atomic_unchecked_t sk_drops;
65358 unsigned short sk_ack_backlog;
65359 unsigned short sk_max_ack_backlog;
65360 __u32 sk_priority;
65361@@ -737,7 +737,7 @@ static inline void sk_refcnt_debug_relea
65362 extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
65363 extern int sock_prot_inuse_get(struct net *net, struct proto *proto);
65364 #else
65365-static void inline sock_prot_inuse_add(struct net *net, struct proto *prot,
65366+static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
65367 int inc)
65368 {
65369 }
65370diff -urNp linux-2.6.32.48/include/net/tcp.h linux-2.6.32.48/include/net/tcp.h
65371--- linux-2.6.32.48/include/net/tcp.h 2011-11-08 19:02:43.000000000 -0500
65372+++ linux-2.6.32.48/include/net/tcp.h 2011-11-15 19:59:43.000000000 -0500
65373@@ -1444,8 +1444,8 @@ enum tcp_seq_states {
65374 struct tcp_seq_afinfo {
65375 char *name;
65376 sa_family_t family;
65377- struct file_operations seq_fops;
65378- struct seq_operations seq_ops;
65379+ file_operations_no_const seq_fops;
65380+ seq_operations_no_const seq_ops;
65381 };
65382
65383 struct tcp_iter_state {
65384diff -urNp linux-2.6.32.48/include/net/udp.h linux-2.6.32.48/include/net/udp.h
65385--- linux-2.6.32.48/include/net/udp.h 2011-11-08 19:02:43.000000000 -0500
65386+++ linux-2.6.32.48/include/net/udp.h 2011-11-15 19:59:43.000000000 -0500
65387@@ -187,8 +187,8 @@ struct udp_seq_afinfo {
65388 char *name;
65389 sa_family_t family;
65390 struct udp_table *udp_table;
65391- struct file_operations seq_fops;
65392- struct seq_operations seq_ops;
65393+ file_operations_no_const seq_fops;
65394+ seq_operations_no_const seq_ops;
65395 };
65396
65397 struct udp_iter_state {
65398diff -urNp linux-2.6.32.48/include/rdma/iw_cm.h linux-2.6.32.48/include/rdma/iw_cm.h
65399--- linux-2.6.32.48/include/rdma/iw_cm.h 2011-11-08 19:02:43.000000000 -0500
65400+++ linux-2.6.32.48/include/rdma/iw_cm.h 2011-11-15 19:59:43.000000000 -0500
65401@@ -129,7 +129,7 @@ struct iw_cm_verbs {
65402 int backlog);
65403
65404 int (*destroy_listen)(struct iw_cm_id *cm_id);
65405-};
65406+} __no_const;
65407
65408 /**
65409 * iw_create_cm_id - Create an IW CM identifier.
65410diff -urNp linux-2.6.32.48/include/scsi/libfc.h linux-2.6.32.48/include/scsi/libfc.h
65411--- linux-2.6.32.48/include/scsi/libfc.h 2011-11-08 19:02:43.000000000 -0500
65412+++ linux-2.6.32.48/include/scsi/libfc.h 2011-11-15 19:59:43.000000000 -0500
65413@@ -675,6 +675,7 @@ struct libfc_function_template {
65414 */
65415 void (*disc_stop_final) (struct fc_lport *);
65416 };
65417+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
65418
65419 /* information used by the discovery layer */
65420 struct fc_disc {
65421@@ -707,7 +708,7 @@ struct fc_lport {
65422 struct fc_disc disc;
65423
65424 /* Operational Information */
65425- struct libfc_function_template tt;
65426+ libfc_function_template_no_const tt;
65427 u8 link_up;
65428 u8 qfull;
65429 enum fc_lport_state state;
65430diff -urNp linux-2.6.32.48/include/scsi/scsi_device.h linux-2.6.32.48/include/scsi/scsi_device.h
65431--- linux-2.6.32.48/include/scsi/scsi_device.h 2011-11-08 19:02:43.000000000 -0500
65432+++ linux-2.6.32.48/include/scsi/scsi_device.h 2011-11-15 19:59:43.000000000 -0500
65433@@ -156,9 +156,9 @@ struct scsi_device {
65434 unsigned int max_device_blocked; /* what device_blocked counts down from */
65435 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
65436
65437- atomic_t iorequest_cnt;
65438- atomic_t iodone_cnt;
65439- atomic_t ioerr_cnt;
65440+ atomic_unchecked_t iorequest_cnt;
65441+ atomic_unchecked_t iodone_cnt;
65442+ atomic_unchecked_t ioerr_cnt;
65443
65444 struct device sdev_gendev,
65445 sdev_dev;
65446diff -urNp linux-2.6.32.48/include/scsi/scsi_transport_fc.h linux-2.6.32.48/include/scsi/scsi_transport_fc.h
65447--- linux-2.6.32.48/include/scsi/scsi_transport_fc.h 2011-11-08 19:02:43.000000000 -0500
65448+++ linux-2.6.32.48/include/scsi/scsi_transport_fc.h 2011-11-15 19:59:43.000000000 -0500
65449@@ -708,7 +708,7 @@ struct fc_function_template {
65450 unsigned long show_host_system_hostname:1;
65451
65452 unsigned long disable_target_scan:1;
65453-};
65454+} __do_const;
65455
65456
65457 /**
65458diff -urNp linux-2.6.32.48/include/sound/ac97_codec.h linux-2.6.32.48/include/sound/ac97_codec.h
65459--- linux-2.6.32.48/include/sound/ac97_codec.h 2011-11-08 19:02:43.000000000 -0500
65460+++ linux-2.6.32.48/include/sound/ac97_codec.h 2011-11-15 19:59:43.000000000 -0500
65461@@ -419,15 +419,15 @@
65462 struct snd_ac97;
65463
65464 struct snd_ac97_build_ops {
65465- int (*build_3d) (struct snd_ac97 *ac97);
65466- int (*build_specific) (struct snd_ac97 *ac97);
65467- int (*build_spdif) (struct snd_ac97 *ac97);
65468- int (*build_post_spdif) (struct snd_ac97 *ac97);
65469+ int (* const build_3d) (struct snd_ac97 *ac97);
65470+ int (* const build_specific) (struct snd_ac97 *ac97);
65471+ int (* const build_spdif) (struct snd_ac97 *ac97);
65472+ int (* const build_post_spdif) (struct snd_ac97 *ac97);
65473 #ifdef CONFIG_PM
65474- void (*suspend) (struct snd_ac97 *ac97);
65475- void (*resume) (struct snd_ac97 *ac97);
65476+ void (* const suspend) (struct snd_ac97 *ac97);
65477+ void (* const resume) (struct snd_ac97 *ac97);
65478 #endif
65479- void (*update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
65480+ void (* const update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
65481 };
65482
65483 struct snd_ac97_bus_ops {
65484@@ -477,7 +477,7 @@ struct snd_ac97_template {
65485
65486 struct snd_ac97 {
65487 /* -- lowlevel (hardware) driver specific -- */
65488- struct snd_ac97_build_ops * build_ops;
65489+ const struct snd_ac97_build_ops * build_ops;
65490 void *private_data;
65491 void (*private_free) (struct snd_ac97 *ac97);
65492 /* --- */
65493diff -urNp linux-2.6.32.48/include/sound/ak4xxx-adda.h linux-2.6.32.48/include/sound/ak4xxx-adda.h
65494--- linux-2.6.32.48/include/sound/ak4xxx-adda.h 2011-11-08 19:02:43.000000000 -0500
65495+++ linux-2.6.32.48/include/sound/ak4xxx-adda.h 2011-11-15 19:59:43.000000000 -0500
65496@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
65497 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
65498 unsigned char val);
65499 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
65500-};
65501+} __no_const;
65502
65503 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
65504
65505diff -urNp linux-2.6.32.48/include/sound/hwdep.h linux-2.6.32.48/include/sound/hwdep.h
65506--- linux-2.6.32.48/include/sound/hwdep.h 2011-11-08 19:02:43.000000000 -0500
65507+++ linux-2.6.32.48/include/sound/hwdep.h 2011-11-15 19:59:43.000000000 -0500
65508@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
65509 struct snd_hwdep_dsp_status *status);
65510 int (*dsp_load)(struct snd_hwdep *hw,
65511 struct snd_hwdep_dsp_image *image);
65512-};
65513+} __no_const;
65514
65515 struct snd_hwdep {
65516 struct snd_card *card;
65517diff -urNp linux-2.6.32.48/include/sound/info.h linux-2.6.32.48/include/sound/info.h
65518--- linux-2.6.32.48/include/sound/info.h 2011-11-08 19:02:43.000000000 -0500
65519+++ linux-2.6.32.48/include/sound/info.h 2011-11-15 19:59:43.000000000 -0500
65520@@ -44,7 +44,7 @@ struct snd_info_entry_text {
65521 struct snd_info_buffer *buffer);
65522 void (*write)(struct snd_info_entry *entry,
65523 struct snd_info_buffer *buffer);
65524-};
65525+} __no_const;
65526
65527 struct snd_info_entry_ops {
65528 int (*open)(struct snd_info_entry *entry,
65529diff -urNp linux-2.6.32.48/include/sound/pcm.h linux-2.6.32.48/include/sound/pcm.h
65530--- linux-2.6.32.48/include/sound/pcm.h 2011-11-08 19:02:43.000000000 -0500
65531+++ linux-2.6.32.48/include/sound/pcm.h 2011-11-15 19:59:43.000000000 -0500
65532@@ -80,6 +80,7 @@ struct snd_pcm_ops {
65533 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
65534 int (*ack)(struct snd_pcm_substream *substream);
65535 };
65536+typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
65537
65538 /*
65539 *
65540diff -urNp linux-2.6.32.48/include/sound/sb16_csp.h linux-2.6.32.48/include/sound/sb16_csp.h
65541--- linux-2.6.32.48/include/sound/sb16_csp.h 2011-11-08 19:02:43.000000000 -0500
65542+++ linux-2.6.32.48/include/sound/sb16_csp.h 2011-11-15 19:59:43.000000000 -0500
65543@@ -139,7 +139,7 @@ struct snd_sb_csp_ops {
65544 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
65545 int (*csp_stop) (struct snd_sb_csp * p);
65546 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
65547-};
65548+} __no_const;
65549
65550 /*
65551 * CSP private data
65552diff -urNp linux-2.6.32.48/include/sound/ymfpci.h linux-2.6.32.48/include/sound/ymfpci.h
65553--- linux-2.6.32.48/include/sound/ymfpci.h 2011-11-08 19:02:43.000000000 -0500
65554+++ linux-2.6.32.48/include/sound/ymfpci.h 2011-11-15 19:59:43.000000000 -0500
65555@@ -358,7 +358,7 @@ struct snd_ymfpci {
65556 spinlock_t reg_lock;
65557 spinlock_t voice_lock;
65558 wait_queue_head_t interrupt_sleep;
65559- atomic_t interrupt_sleep_count;
65560+ atomic_unchecked_t interrupt_sleep_count;
65561 struct snd_info_entry *proc_entry;
65562 const struct firmware *dsp_microcode;
65563 const struct firmware *controller_microcode;
65564diff -urNp linux-2.6.32.48/include/trace/events/irq.h linux-2.6.32.48/include/trace/events/irq.h
65565--- linux-2.6.32.48/include/trace/events/irq.h 2011-11-08 19:02:43.000000000 -0500
65566+++ linux-2.6.32.48/include/trace/events/irq.h 2011-11-15 19:59:43.000000000 -0500
65567@@ -34,7 +34,7 @@
65568 */
65569 TRACE_EVENT(irq_handler_entry,
65570
65571- TP_PROTO(int irq, struct irqaction *action),
65572+ TP_PROTO(int irq, const struct irqaction *action),
65573
65574 TP_ARGS(irq, action),
65575
65576@@ -64,7 +64,7 @@ TRACE_EVENT(irq_handler_entry,
65577 */
65578 TRACE_EVENT(irq_handler_exit,
65579
65580- TP_PROTO(int irq, struct irqaction *action, int ret),
65581+ TP_PROTO(int irq, const struct irqaction *action, int ret),
65582
65583 TP_ARGS(irq, action, ret),
65584
65585@@ -95,7 +95,7 @@ TRACE_EVENT(irq_handler_exit,
65586 */
65587 TRACE_EVENT(softirq_entry,
65588
65589- TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
65590+ TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
65591
65592 TP_ARGS(h, vec),
65593
65594@@ -124,7 +124,7 @@ TRACE_EVENT(softirq_entry,
65595 */
65596 TRACE_EVENT(softirq_exit,
65597
65598- TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
65599+ TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
65600
65601 TP_ARGS(h, vec),
65602
65603diff -urNp linux-2.6.32.48/include/video/uvesafb.h linux-2.6.32.48/include/video/uvesafb.h
65604--- linux-2.6.32.48/include/video/uvesafb.h 2011-11-08 19:02:43.000000000 -0500
65605+++ linux-2.6.32.48/include/video/uvesafb.h 2011-11-15 19:59:43.000000000 -0500
65606@@ -177,6 +177,7 @@ struct uvesafb_par {
65607 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
65608 u8 pmi_setpal; /* PMI for palette changes */
65609 u16 *pmi_base; /* protected mode interface location */
65610+ u8 *pmi_code; /* protected mode code location */
65611 void *pmi_start;
65612 void *pmi_pal;
65613 u8 *vbe_state_orig; /*
65614diff -urNp linux-2.6.32.48/init/do_mounts.c linux-2.6.32.48/init/do_mounts.c
65615--- linux-2.6.32.48/init/do_mounts.c 2011-11-08 19:02:43.000000000 -0500
65616+++ linux-2.6.32.48/init/do_mounts.c 2011-11-15 19:59:43.000000000 -0500
65617@@ -216,11 +216,11 @@ static void __init get_fs_names(char *pa
65618
65619 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
65620 {
65621- int err = sys_mount(name, "/root", fs, flags, data);
65622+ int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
65623 if (err)
65624 return err;
65625
65626- sys_chdir("/root");
65627+ sys_chdir((__force const char __user *)"/root");
65628 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
65629 printk("VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
65630 current->fs->pwd.mnt->mnt_sb->s_type->name,
65631@@ -311,18 +311,18 @@ void __init change_floppy(char *fmt, ...
65632 va_start(args, fmt);
65633 vsprintf(buf, fmt, args);
65634 va_end(args);
65635- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
65636+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
65637 if (fd >= 0) {
65638 sys_ioctl(fd, FDEJECT, 0);
65639 sys_close(fd);
65640 }
65641 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
65642- fd = sys_open("/dev/console", O_RDWR, 0);
65643+ fd = sys_open((char __user *)"/dev/console", O_RDWR, 0);
65644 if (fd >= 0) {
65645 sys_ioctl(fd, TCGETS, (long)&termios);
65646 termios.c_lflag &= ~ICANON;
65647 sys_ioctl(fd, TCSETSF, (long)&termios);
65648- sys_read(fd, &c, 1);
65649+ sys_read(fd, (char __user *)&c, 1);
65650 termios.c_lflag |= ICANON;
65651 sys_ioctl(fd, TCSETSF, (long)&termios);
65652 sys_close(fd);
65653@@ -416,6 +416,6 @@ void __init prepare_namespace(void)
65654 mount_root();
65655 out:
65656 devtmpfs_mount("dev");
65657- sys_mount(".", "/", NULL, MS_MOVE, NULL);
65658- sys_chroot(".");
65659+ sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
65660+ sys_chroot((__force char __user *)".");
65661 }
65662diff -urNp linux-2.6.32.48/init/do_mounts.h linux-2.6.32.48/init/do_mounts.h
65663--- linux-2.6.32.48/init/do_mounts.h 2011-11-08 19:02:43.000000000 -0500
65664+++ linux-2.6.32.48/init/do_mounts.h 2011-11-15 19:59:43.000000000 -0500
65665@@ -15,15 +15,15 @@ extern int root_mountflags;
65666
65667 static inline int create_dev(char *name, dev_t dev)
65668 {
65669- sys_unlink(name);
65670- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
65671+ sys_unlink((char __force_user *)name);
65672+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
65673 }
65674
65675 #if BITS_PER_LONG == 32
65676 static inline u32 bstat(char *name)
65677 {
65678 struct stat64 stat;
65679- if (sys_stat64(name, &stat) != 0)
65680+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
65681 return 0;
65682 if (!S_ISBLK(stat.st_mode))
65683 return 0;
65684@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
65685 static inline u32 bstat(char *name)
65686 {
65687 struct stat stat;
65688- if (sys_newstat(name, &stat) != 0)
65689+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
65690 return 0;
65691 if (!S_ISBLK(stat.st_mode))
65692 return 0;
65693diff -urNp linux-2.6.32.48/init/do_mounts_initrd.c linux-2.6.32.48/init/do_mounts_initrd.c
65694--- linux-2.6.32.48/init/do_mounts_initrd.c 2011-11-08 19:02:43.000000000 -0500
65695+++ linux-2.6.32.48/init/do_mounts_initrd.c 2011-11-15 19:59:43.000000000 -0500
65696@@ -32,7 +32,7 @@ static int __init do_linuxrc(void * shel
65697 sys_close(old_fd);sys_close(root_fd);
65698 sys_close(0);sys_close(1);sys_close(2);
65699 sys_setsid();
65700- (void) sys_open("/dev/console",O_RDWR,0);
65701+ (void) sys_open((__force const char __user *)"/dev/console",O_RDWR,0);
65702 (void) sys_dup(0);
65703 (void) sys_dup(0);
65704 return kernel_execve(shell, argv, envp_init);
65705@@ -47,13 +47,13 @@ static void __init handle_initrd(void)
65706 create_dev("/dev/root.old", Root_RAM0);
65707 /* mount initrd on rootfs' /root */
65708 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
65709- sys_mkdir("/old", 0700);
65710- root_fd = sys_open("/", 0, 0);
65711- old_fd = sys_open("/old", 0, 0);
65712+ sys_mkdir((const char __force_user *)"/old", 0700);
65713+ root_fd = sys_open((const char __force_user *)"/", 0, 0);
65714+ old_fd = sys_open((const char __force_user *)"/old", 0, 0);
65715 /* move initrd over / and chdir/chroot in initrd root */
65716- sys_chdir("/root");
65717- sys_mount(".", "/", NULL, MS_MOVE, NULL);
65718- sys_chroot(".");
65719+ sys_chdir((const char __force_user *)"/root");
65720+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
65721+ sys_chroot((const char __force_user *)".");
65722
65723 /*
65724 * In case that a resume from disk is carried out by linuxrc or one of
65725@@ -70,15 +70,15 @@ static void __init handle_initrd(void)
65726
65727 /* move initrd to rootfs' /old */
65728 sys_fchdir(old_fd);
65729- sys_mount("/", ".", NULL, MS_MOVE, NULL);
65730+ sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
65731 /* switch root and cwd back to / of rootfs */
65732 sys_fchdir(root_fd);
65733- sys_chroot(".");
65734+ sys_chroot((const char __force_user *)".");
65735 sys_close(old_fd);
65736 sys_close(root_fd);
65737
65738 if (new_decode_dev(real_root_dev) == Root_RAM0) {
65739- sys_chdir("/old");
65740+ sys_chdir((const char __force_user *)"/old");
65741 return;
65742 }
65743
65744@@ -86,17 +86,17 @@ static void __init handle_initrd(void)
65745 mount_root();
65746
65747 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
65748- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
65749+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
65750 if (!error)
65751 printk("okay\n");
65752 else {
65753- int fd = sys_open("/dev/root.old", O_RDWR, 0);
65754+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
65755 if (error == -ENOENT)
65756 printk("/initrd does not exist. Ignored.\n");
65757 else
65758 printk("failed\n");
65759 printk(KERN_NOTICE "Unmounting old root\n");
65760- sys_umount("/old", MNT_DETACH);
65761+ sys_umount((char __force_user *)"/old", MNT_DETACH);
65762 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
65763 if (fd < 0) {
65764 error = fd;
65765@@ -119,11 +119,11 @@ int __init initrd_load(void)
65766 * mounted in the normal path.
65767 */
65768 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
65769- sys_unlink("/initrd.image");
65770+ sys_unlink((const char __force_user *)"/initrd.image");
65771 handle_initrd();
65772 return 1;
65773 }
65774 }
65775- sys_unlink("/initrd.image");
65776+ sys_unlink((const char __force_user *)"/initrd.image");
65777 return 0;
65778 }
65779diff -urNp linux-2.6.32.48/init/do_mounts_md.c linux-2.6.32.48/init/do_mounts_md.c
65780--- linux-2.6.32.48/init/do_mounts_md.c 2011-11-08 19:02:43.000000000 -0500
65781+++ linux-2.6.32.48/init/do_mounts_md.c 2011-11-15 19:59:43.000000000 -0500
65782@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
65783 partitioned ? "_d" : "", minor,
65784 md_setup_args[ent].device_names);
65785
65786- fd = sys_open(name, 0, 0);
65787+ fd = sys_open((char __force_user *)name, 0, 0);
65788 if (fd < 0) {
65789 printk(KERN_ERR "md: open failed - cannot start "
65790 "array %s\n", name);
65791@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
65792 * array without it
65793 */
65794 sys_close(fd);
65795- fd = sys_open(name, 0, 0);
65796+ fd = sys_open((char __force_user *)name, 0, 0);
65797 sys_ioctl(fd, BLKRRPART, 0);
65798 }
65799 sys_close(fd);
65800@@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
65801
65802 wait_for_device_probe();
65803
65804- fd = sys_open("/dev/md0", 0, 0);
65805+ fd = sys_open((__force char __user *)"/dev/md0", 0, 0);
65806 if (fd >= 0) {
65807 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
65808 sys_close(fd);
65809diff -urNp linux-2.6.32.48/init/initramfs.c linux-2.6.32.48/init/initramfs.c
65810--- linux-2.6.32.48/init/initramfs.c 2011-11-08 19:02:43.000000000 -0500
65811+++ linux-2.6.32.48/init/initramfs.c 2011-11-15 19:59:43.000000000 -0500
65812@@ -74,7 +74,7 @@ static void __init free_hash(void)
65813 }
65814 }
65815
65816-static long __init do_utime(char __user *filename, time_t mtime)
65817+static long __init do_utime(__force char __user *filename, time_t mtime)
65818 {
65819 struct timespec t[2];
65820
65821@@ -109,7 +109,7 @@ static void __init dir_utime(void)
65822 struct dir_entry *de, *tmp;
65823 list_for_each_entry_safe(de, tmp, &dir_list, list) {
65824 list_del(&de->list);
65825- do_utime(de->name, de->mtime);
65826+ do_utime((char __force_user *)de->name, de->mtime);
65827 kfree(de->name);
65828 kfree(de);
65829 }
65830@@ -271,7 +271,7 @@ static int __init maybe_link(void)
65831 if (nlink >= 2) {
65832 char *old = find_link(major, minor, ino, mode, collected);
65833 if (old)
65834- return (sys_link(old, collected) < 0) ? -1 : 1;
65835+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
65836 }
65837 return 0;
65838 }
65839@@ -280,11 +280,11 @@ static void __init clean_path(char *path
65840 {
65841 struct stat st;
65842
65843- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
65844+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
65845 if (S_ISDIR(st.st_mode))
65846- sys_rmdir(path);
65847+ sys_rmdir((char __force_user *)path);
65848 else
65849- sys_unlink(path);
65850+ sys_unlink((char __force_user *)path);
65851 }
65852 }
65853
65854@@ -305,7 +305,7 @@ static int __init do_name(void)
65855 int openflags = O_WRONLY|O_CREAT;
65856 if (ml != 1)
65857 openflags |= O_TRUNC;
65858- wfd = sys_open(collected, openflags, mode);
65859+ wfd = sys_open((char __force_user *)collected, openflags, mode);
65860
65861 if (wfd >= 0) {
65862 sys_fchown(wfd, uid, gid);
65863@@ -317,17 +317,17 @@ static int __init do_name(void)
65864 }
65865 }
65866 } else if (S_ISDIR(mode)) {
65867- sys_mkdir(collected, mode);
65868- sys_chown(collected, uid, gid);
65869- sys_chmod(collected, mode);
65870+ sys_mkdir((char __force_user *)collected, mode);
65871+ sys_chown((char __force_user *)collected, uid, gid);
65872+ sys_chmod((char __force_user *)collected, mode);
65873 dir_add(collected, mtime);
65874 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
65875 S_ISFIFO(mode) || S_ISSOCK(mode)) {
65876 if (maybe_link() == 0) {
65877- sys_mknod(collected, mode, rdev);
65878- sys_chown(collected, uid, gid);
65879- sys_chmod(collected, mode);
65880- do_utime(collected, mtime);
65881+ sys_mknod((char __force_user *)collected, mode, rdev);
65882+ sys_chown((char __force_user *)collected, uid, gid);
65883+ sys_chmod((char __force_user *)collected, mode);
65884+ do_utime((char __force_user *)collected, mtime);
65885 }
65886 }
65887 return 0;
65888@@ -336,15 +336,15 @@ static int __init do_name(void)
65889 static int __init do_copy(void)
65890 {
65891 if (count >= body_len) {
65892- sys_write(wfd, victim, body_len);
65893+ sys_write(wfd, (char __force_user *)victim, body_len);
65894 sys_close(wfd);
65895- do_utime(vcollected, mtime);
65896+ do_utime((char __force_user *)vcollected, mtime);
65897 kfree(vcollected);
65898 eat(body_len);
65899 state = SkipIt;
65900 return 0;
65901 } else {
65902- sys_write(wfd, victim, count);
65903+ sys_write(wfd, (char __force_user *)victim, count);
65904 body_len -= count;
65905 eat(count);
65906 return 1;
65907@@ -355,9 +355,9 @@ static int __init do_symlink(void)
65908 {
65909 collected[N_ALIGN(name_len) + body_len] = '\0';
65910 clean_path(collected, 0);
65911- sys_symlink(collected + N_ALIGN(name_len), collected);
65912- sys_lchown(collected, uid, gid);
65913- do_utime(collected, mtime);
65914+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
65915+ sys_lchown((char __force_user *)collected, uid, gid);
65916+ do_utime((char __force_user *)collected, mtime);
65917 state = SkipIt;
65918 next_state = Reset;
65919 return 0;
65920diff -urNp linux-2.6.32.48/init/Kconfig linux-2.6.32.48/init/Kconfig
65921--- linux-2.6.32.48/init/Kconfig 2011-11-08 19:02:43.000000000 -0500
65922+++ linux-2.6.32.48/init/Kconfig 2011-11-15 19:59:43.000000000 -0500
65923@@ -1004,7 +1004,7 @@ config SLUB_DEBUG
65924
65925 config COMPAT_BRK
65926 bool "Disable heap randomization"
65927- default y
65928+ default n
65929 help
65930 Randomizing heap placement makes heap exploits harder, but it
65931 also breaks ancient binaries (including anything libc5 based).
65932diff -urNp linux-2.6.32.48/init/main.c linux-2.6.32.48/init/main.c
65933--- linux-2.6.32.48/init/main.c 2011-11-08 19:02:43.000000000 -0500
65934+++ linux-2.6.32.48/init/main.c 2011-11-15 19:59:43.000000000 -0500
65935@@ -97,6 +97,7 @@ static inline void mark_rodata_ro(void)
65936 #ifdef CONFIG_TC
65937 extern void tc_init(void);
65938 #endif
65939+extern void grsecurity_init(void);
65940
65941 enum system_states system_state __read_mostly;
65942 EXPORT_SYMBOL(system_state);
65943@@ -183,6 +184,49 @@ static int __init set_reset_devices(char
65944
65945 __setup("reset_devices", set_reset_devices);
65946
65947+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
65948+extern char pax_enter_kernel_user[];
65949+extern char pax_exit_kernel_user[];
65950+extern pgdval_t clone_pgd_mask;
65951+#endif
65952+
65953+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
65954+static int __init setup_pax_nouderef(char *str)
65955+{
65956+#ifdef CONFIG_X86_32
65957+ unsigned int cpu;
65958+ struct desc_struct *gdt;
65959+
65960+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
65961+ gdt = get_cpu_gdt_table(cpu);
65962+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
65963+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
65964+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
65965+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
65966+ }
65967+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
65968+#else
65969+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
65970+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
65971+ clone_pgd_mask = ~(pgdval_t)0UL;
65972+#endif
65973+
65974+ return 0;
65975+}
65976+early_param("pax_nouderef", setup_pax_nouderef);
65977+#endif
65978+
65979+#ifdef CONFIG_PAX_SOFTMODE
65980+int pax_softmode;
65981+
65982+static int __init setup_pax_softmode(char *str)
65983+{
65984+ get_option(&str, &pax_softmode);
65985+ return 1;
65986+}
65987+__setup("pax_softmode=", setup_pax_softmode);
65988+#endif
65989+
65990 static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
65991 char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
65992 static const char *panic_later, *panic_param;
65993@@ -705,52 +749,53 @@ int initcall_debug;
65994 core_param(initcall_debug, initcall_debug, bool, 0644);
65995
65996 static char msgbuf[64];
65997-static struct boot_trace_call call;
65998-static struct boot_trace_ret ret;
65999+static struct boot_trace_call trace_call;
66000+static struct boot_trace_ret trace_ret;
66001
66002 int do_one_initcall(initcall_t fn)
66003 {
66004 int count = preempt_count();
66005 ktime_t calltime, delta, rettime;
66006+ const char *msg1 = "", *msg2 = "";
66007
66008 if (initcall_debug) {
66009- call.caller = task_pid_nr(current);
66010- printk("calling %pF @ %i\n", fn, call.caller);
66011+ trace_call.caller = task_pid_nr(current);
66012+ printk("calling %pF @ %i\n", fn, trace_call.caller);
66013 calltime = ktime_get();
66014- trace_boot_call(&call, fn);
66015+ trace_boot_call(&trace_call, fn);
66016 enable_boot_trace();
66017 }
66018
66019- ret.result = fn();
66020+ trace_ret.result = fn();
66021
66022 if (initcall_debug) {
66023 disable_boot_trace();
66024 rettime = ktime_get();
66025 delta = ktime_sub(rettime, calltime);
66026- ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
66027- trace_boot_ret(&ret, fn);
66028+ trace_ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
66029+ trace_boot_ret(&trace_ret, fn);
66030 printk("initcall %pF returned %d after %Ld usecs\n", fn,
66031- ret.result, ret.duration);
66032+ trace_ret.result, trace_ret.duration);
66033 }
66034
66035 msgbuf[0] = 0;
66036
66037- if (ret.result && ret.result != -ENODEV && initcall_debug)
66038- sprintf(msgbuf, "error code %d ", ret.result);
66039+ if (trace_ret.result && trace_ret.result != -ENODEV && initcall_debug)
66040+ sprintf(msgbuf, "error code %d ", trace_ret.result);
66041
66042 if (preempt_count() != count) {
66043- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
66044+ msg1 = " preemption imbalance";
66045 preempt_count() = count;
66046 }
66047 if (irqs_disabled()) {
66048- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
66049+ msg2 = " disabled interrupts";
66050 local_irq_enable();
66051 }
66052- if (msgbuf[0]) {
66053- printk("initcall %pF returned with %s\n", fn, msgbuf);
66054+ if (msgbuf[0] || *msg1 || *msg2) {
66055+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
66056 }
66057
66058- return ret.result;
66059+ return trace_ret.result;
66060 }
66061
66062
66063@@ -893,11 +938,13 @@ static int __init kernel_init(void * unu
66064 if (!ramdisk_execute_command)
66065 ramdisk_execute_command = "/init";
66066
66067- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
66068+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
66069 ramdisk_execute_command = NULL;
66070 prepare_namespace();
66071 }
66072
66073+ grsecurity_init();
66074+
66075 /*
66076 * Ok, we have completed the initial bootup, and
66077 * we're essentially up and running. Get rid of the
66078diff -urNp linux-2.6.32.48/init/noinitramfs.c linux-2.6.32.48/init/noinitramfs.c
66079--- linux-2.6.32.48/init/noinitramfs.c 2011-11-08 19:02:43.000000000 -0500
66080+++ linux-2.6.32.48/init/noinitramfs.c 2011-11-15 19:59:43.000000000 -0500
66081@@ -29,7 +29,7 @@ static int __init default_rootfs(void)
66082 {
66083 int err;
66084
66085- err = sys_mkdir("/dev", 0755);
66086+ err = sys_mkdir((const char __user *)"/dev", 0755);
66087 if (err < 0)
66088 goto out;
66089
66090@@ -39,7 +39,7 @@ static int __init default_rootfs(void)
66091 if (err < 0)
66092 goto out;
66093
66094- err = sys_mkdir("/root", 0700);
66095+ err = sys_mkdir((const char __user *)"/root", 0700);
66096 if (err < 0)
66097 goto out;
66098
66099diff -urNp linux-2.6.32.48/ipc/mqueue.c linux-2.6.32.48/ipc/mqueue.c
66100--- linux-2.6.32.48/ipc/mqueue.c 2011-11-08 19:02:43.000000000 -0500
66101+++ linux-2.6.32.48/ipc/mqueue.c 2011-11-15 19:59:43.000000000 -0500
66102@@ -150,6 +150,7 @@ static struct inode *mqueue_get_inode(st
66103 mq_bytes = (mq_msg_tblsz +
66104 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
66105
66106+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
66107 spin_lock(&mq_lock);
66108 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
66109 u->mq_bytes + mq_bytes >
66110diff -urNp linux-2.6.32.48/ipc/msg.c linux-2.6.32.48/ipc/msg.c
66111--- linux-2.6.32.48/ipc/msg.c 2011-11-08 19:02:43.000000000 -0500
66112+++ linux-2.6.32.48/ipc/msg.c 2011-11-15 19:59:43.000000000 -0500
66113@@ -310,18 +310,19 @@ static inline int msg_security(struct ke
66114 return security_msg_queue_associate(msq, msgflg);
66115 }
66116
66117+static struct ipc_ops msg_ops = {
66118+ .getnew = newque,
66119+ .associate = msg_security,
66120+ .more_checks = NULL
66121+};
66122+
66123 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
66124 {
66125 struct ipc_namespace *ns;
66126- struct ipc_ops msg_ops;
66127 struct ipc_params msg_params;
66128
66129 ns = current->nsproxy->ipc_ns;
66130
66131- msg_ops.getnew = newque;
66132- msg_ops.associate = msg_security;
66133- msg_ops.more_checks = NULL;
66134-
66135 msg_params.key = key;
66136 msg_params.flg = msgflg;
66137
66138diff -urNp linux-2.6.32.48/ipc/sem.c linux-2.6.32.48/ipc/sem.c
66139--- linux-2.6.32.48/ipc/sem.c 2011-11-08 19:02:43.000000000 -0500
66140+++ linux-2.6.32.48/ipc/sem.c 2011-11-15 19:59:43.000000000 -0500
66141@@ -309,10 +309,15 @@ static inline int sem_more_checks(struct
66142 return 0;
66143 }
66144
66145+static struct ipc_ops sem_ops = {
66146+ .getnew = newary,
66147+ .associate = sem_security,
66148+ .more_checks = sem_more_checks
66149+};
66150+
66151 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
66152 {
66153 struct ipc_namespace *ns;
66154- struct ipc_ops sem_ops;
66155 struct ipc_params sem_params;
66156
66157 ns = current->nsproxy->ipc_ns;
66158@@ -320,10 +325,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int,
66159 if (nsems < 0 || nsems > ns->sc_semmsl)
66160 return -EINVAL;
66161
66162- sem_ops.getnew = newary;
66163- sem_ops.associate = sem_security;
66164- sem_ops.more_checks = sem_more_checks;
66165-
66166 sem_params.key = key;
66167 sem_params.flg = semflg;
66168 sem_params.u.nsems = nsems;
66169@@ -671,6 +672,8 @@ static int semctl_main(struct ipc_namesp
66170 ushort* sem_io = fast_sem_io;
66171 int nsems;
66172
66173+ pax_track_stack();
66174+
66175 sma = sem_lock_check(ns, semid);
66176 if (IS_ERR(sma))
66177 return PTR_ERR(sma);
66178@@ -1071,6 +1074,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
66179 unsigned long jiffies_left = 0;
66180 struct ipc_namespace *ns;
66181
66182+ pax_track_stack();
66183+
66184 ns = current->nsproxy->ipc_ns;
66185
66186 if (nsops < 1 || semid < 0)
66187diff -urNp linux-2.6.32.48/ipc/shm.c linux-2.6.32.48/ipc/shm.c
66188--- linux-2.6.32.48/ipc/shm.c 2011-11-08 19:02:43.000000000 -0500
66189+++ linux-2.6.32.48/ipc/shm.c 2011-11-15 19:59:43.000000000 -0500
66190@@ -70,6 +70,14 @@ static void shm_destroy (struct ipc_name
66191 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
66192 #endif
66193
66194+#ifdef CONFIG_GRKERNSEC
66195+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
66196+ const time_t shm_createtime, const uid_t cuid,
66197+ const int shmid);
66198+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
66199+ const time_t shm_createtime);
66200+#endif
66201+
66202 void shm_init_ns(struct ipc_namespace *ns)
66203 {
66204 ns->shm_ctlmax = SHMMAX;
66205@@ -396,6 +404,14 @@ static int newseg(struct ipc_namespace *
66206 shp->shm_lprid = 0;
66207 shp->shm_atim = shp->shm_dtim = 0;
66208 shp->shm_ctim = get_seconds();
66209+#ifdef CONFIG_GRKERNSEC
66210+ {
66211+ struct timespec timeval;
66212+ do_posix_clock_monotonic_gettime(&timeval);
66213+
66214+ shp->shm_createtime = timeval.tv_sec;
66215+ }
66216+#endif
66217 shp->shm_segsz = size;
66218 shp->shm_nattch = 0;
66219 shp->shm_file = file;
66220@@ -446,18 +462,19 @@ static inline int shm_more_checks(struct
66221 return 0;
66222 }
66223
66224+static struct ipc_ops shm_ops = {
66225+ .getnew = newseg,
66226+ .associate = shm_security,
66227+ .more_checks = shm_more_checks
66228+};
66229+
66230 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
66231 {
66232 struct ipc_namespace *ns;
66233- struct ipc_ops shm_ops;
66234 struct ipc_params shm_params;
66235
66236 ns = current->nsproxy->ipc_ns;
66237
66238- shm_ops.getnew = newseg;
66239- shm_ops.associate = shm_security;
66240- shm_ops.more_checks = shm_more_checks;
66241-
66242 shm_params.key = key;
66243 shm_params.flg = shmflg;
66244 shm_params.u.size = size;
66245@@ -880,9 +897,21 @@ long do_shmat(int shmid, char __user *sh
66246 if (err)
66247 goto out_unlock;
66248
66249+#ifdef CONFIG_GRKERNSEC
66250+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
66251+ shp->shm_perm.cuid, shmid) ||
66252+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
66253+ err = -EACCES;
66254+ goto out_unlock;
66255+ }
66256+#endif
66257+
66258 path.dentry = dget(shp->shm_file->f_path.dentry);
66259 path.mnt = shp->shm_file->f_path.mnt;
66260 shp->shm_nattch++;
66261+#ifdef CONFIG_GRKERNSEC
66262+ shp->shm_lapid = current->pid;
66263+#endif
66264 size = i_size_read(path.dentry->d_inode);
66265 shm_unlock(shp);
66266
66267diff -urNp linux-2.6.32.48/kernel/acct.c linux-2.6.32.48/kernel/acct.c
66268--- linux-2.6.32.48/kernel/acct.c 2011-11-08 19:02:43.000000000 -0500
66269+++ linux-2.6.32.48/kernel/acct.c 2011-11-15 19:59:43.000000000 -0500
66270@@ -579,7 +579,7 @@ static void do_acct_process(struct bsd_a
66271 */
66272 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
66273 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
66274- file->f_op->write(file, (char *)&ac,
66275+ file->f_op->write(file, (char __force_user *)&ac,
66276 sizeof(acct_t), &file->f_pos);
66277 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
66278 set_fs(fs);
66279diff -urNp linux-2.6.32.48/kernel/audit.c linux-2.6.32.48/kernel/audit.c
66280--- linux-2.6.32.48/kernel/audit.c 2011-11-08 19:02:43.000000000 -0500
66281+++ linux-2.6.32.48/kernel/audit.c 2011-11-15 19:59:43.000000000 -0500
66282@@ -110,7 +110,7 @@ u32 audit_sig_sid = 0;
66283 3) suppressed due to audit_rate_limit
66284 4) suppressed due to audit_backlog_limit
66285 */
66286-static atomic_t audit_lost = ATOMIC_INIT(0);
66287+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
66288
66289 /* The netlink socket. */
66290 static struct sock *audit_sock;
66291@@ -232,7 +232,7 @@ void audit_log_lost(const char *message)
66292 unsigned long now;
66293 int print;
66294
66295- atomic_inc(&audit_lost);
66296+ atomic_inc_unchecked(&audit_lost);
66297
66298 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
66299
66300@@ -251,7 +251,7 @@ void audit_log_lost(const char *message)
66301 printk(KERN_WARNING
66302 "audit: audit_lost=%d audit_rate_limit=%d "
66303 "audit_backlog_limit=%d\n",
66304- atomic_read(&audit_lost),
66305+ atomic_read_unchecked(&audit_lost),
66306 audit_rate_limit,
66307 audit_backlog_limit);
66308 audit_panic(message);
66309@@ -691,7 +691,7 @@ static int audit_receive_msg(struct sk_b
66310 status_set.pid = audit_pid;
66311 status_set.rate_limit = audit_rate_limit;
66312 status_set.backlog_limit = audit_backlog_limit;
66313- status_set.lost = atomic_read(&audit_lost);
66314+ status_set.lost = atomic_read_unchecked(&audit_lost);
66315 status_set.backlog = skb_queue_len(&audit_skb_queue);
66316 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
66317 &status_set, sizeof(status_set));
66318@@ -891,8 +891,10 @@ static int audit_receive_msg(struct sk_b
66319 spin_unlock_irq(&tsk->sighand->siglock);
66320 }
66321 read_unlock(&tasklist_lock);
66322- audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_TTY_GET, 0, 0,
66323- &s, sizeof(s));
66324+
66325+ if (!err)
66326+ audit_send_reply(NETLINK_CB(skb).pid, seq,
66327+ AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
66328 break;
66329 }
66330 case AUDIT_TTY_SET: {
66331diff -urNp linux-2.6.32.48/kernel/auditsc.c linux-2.6.32.48/kernel/auditsc.c
66332--- linux-2.6.32.48/kernel/auditsc.c 2011-11-08 19:02:43.000000000 -0500
66333+++ linux-2.6.32.48/kernel/auditsc.c 2011-11-15 19:59:43.000000000 -0500
66334@@ -2113,7 +2113,7 @@ int auditsc_get_stamp(struct audit_conte
66335 }
66336
66337 /* global counter which is incremented every time something logs in */
66338-static atomic_t session_id = ATOMIC_INIT(0);
66339+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
66340
66341 /**
66342 * audit_set_loginuid - set a task's audit_context loginuid
66343@@ -2126,7 +2126,7 @@ static atomic_t session_id = ATOMIC_INIT
66344 */
66345 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
66346 {
66347- unsigned int sessionid = atomic_inc_return(&session_id);
66348+ unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
66349 struct audit_context *context = task->audit_context;
66350
66351 if (context && context->in_syscall) {
66352diff -urNp linux-2.6.32.48/kernel/capability.c linux-2.6.32.48/kernel/capability.c
66353--- linux-2.6.32.48/kernel/capability.c 2011-11-08 19:02:43.000000000 -0500
66354+++ linux-2.6.32.48/kernel/capability.c 2011-11-15 19:59:43.000000000 -0500
66355@@ -305,10 +305,26 @@ int capable(int cap)
66356 BUG();
66357 }
66358
66359- if (security_capable(cap) == 0) {
66360+ if (security_capable(cap) == 0 && gr_is_capable(cap)) {
66361 current->flags |= PF_SUPERPRIV;
66362 return 1;
66363 }
66364 return 0;
66365 }
66366+
66367+int capable_nolog(int cap)
66368+{
66369+ if (unlikely(!cap_valid(cap))) {
66370+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
66371+ BUG();
66372+ }
66373+
66374+ if (security_capable(cap) == 0 && gr_is_capable_nolog(cap)) {
66375+ current->flags |= PF_SUPERPRIV;
66376+ return 1;
66377+ }
66378+ return 0;
66379+}
66380+
66381 EXPORT_SYMBOL(capable);
66382+EXPORT_SYMBOL(capable_nolog);
66383diff -urNp linux-2.6.32.48/kernel/cgroup.c linux-2.6.32.48/kernel/cgroup.c
66384--- linux-2.6.32.48/kernel/cgroup.c 2011-11-08 19:02:43.000000000 -0500
66385+++ linux-2.6.32.48/kernel/cgroup.c 2011-11-15 19:59:43.000000000 -0500
66386@@ -536,6 +536,8 @@ static struct css_set *find_css_set(
66387 struct hlist_head *hhead;
66388 struct cg_cgroup_link *link;
66389
66390+ pax_track_stack();
66391+
66392 /* First see if we already have a cgroup group that matches
66393 * the desired set */
66394 read_lock(&css_set_lock);
66395diff -urNp linux-2.6.32.48/kernel/compat.c linux-2.6.32.48/kernel/compat.c
66396--- linux-2.6.32.48/kernel/compat.c 2011-11-08 19:02:43.000000000 -0500
66397+++ linux-2.6.32.48/kernel/compat.c 2011-11-15 19:59:43.000000000 -0500
66398@@ -108,7 +108,7 @@ static long compat_nanosleep_restart(str
66399 mm_segment_t oldfs;
66400 long ret;
66401
66402- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
66403+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
66404 oldfs = get_fs();
66405 set_fs(KERNEL_DS);
66406 ret = hrtimer_nanosleep_restart(restart);
66407@@ -140,7 +140,7 @@ asmlinkage long compat_sys_nanosleep(str
66408 oldfs = get_fs();
66409 set_fs(KERNEL_DS);
66410 ret = hrtimer_nanosleep(&tu,
66411- rmtp ? (struct timespec __user *)&rmt : NULL,
66412+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
66413 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
66414 set_fs(oldfs);
66415
66416@@ -247,7 +247,7 @@ asmlinkage long compat_sys_sigpending(co
66417 mm_segment_t old_fs = get_fs();
66418
66419 set_fs(KERNEL_DS);
66420- ret = sys_sigpending((old_sigset_t __user *) &s);
66421+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
66422 set_fs(old_fs);
66423 if (ret == 0)
66424 ret = put_user(s, set);
66425@@ -266,8 +266,8 @@ asmlinkage long compat_sys_sigprocmask(i
66426 old_fs = get_fs();
66427 set_fs(KERNEL_DS);
66428 ret = sys_sigprocmask(how,
66429- set ? (old_sigset_t __user *) &s : NULL,
66430- oset ? (old_sigset_t __user *) &s : NULL);
66431+ set ? (old_sigset_t __force_user *) &s : NULL,
66432+ oset ? (old_sigset_t __force_user *) &s : NULL);
66433 set_fs(old_fs);
66434 if (ret == 0)
66435 if (oset)
66436@@ -310,7 +310,7 @@ asmlinkage long compat_sys_old_getrlimit
66437 mm_segment_t old_fs = get_fs();
66438
66439 set_fs(KERNEL_DS);
66440- ret = sys_old_getrlimit(resource, &r);
66441+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
66442 set_fs(old_fs);
66443
66444 if (!ret) {
66445@@ -385,7 +385,7 @@ asmlinkage long compat_sys_getrusage(int
66446 mm_segment_t old_fs = get_fs();
66447
66448 set_fs(KERNEL_DS);
66449- ret = sys_getrusage(who, (struct rusage __user *) &r);
66450+ ret = sys_getrusage(who, (struct rusage __force_user *) &r);
66451 set_fs(old_fs);
66452
66453 if (ret)
66454@@ -412,8 +412,8 @@ compat_sys_wait4(compat_pid_t pid, compa
66455 set_fs (KERNEL_DS);
66456 ret = sys_wait4(pid,
66457 (stat_addr ?
66458- (unsigned int __user *) &status : NULL),
66459- options, (struct rusage __user *) &r);
66460+ (unsigned int __force_user *) &status : NULL),
66461+ options, (struct rusage __force_user *) &r);
66462 set_fs (old_fs);
66463
66464 if (ret > 0) {
66465@@ -438,8 +438,8 @@ asmlinkage long compat_sys_waitid(int wh
66466 memset(&info, 0, sizeof(info));
66467
66468 set_fs(KERNEL_DS);
66469- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
66470- uru ? (struct rusage __user *)&ru : NULL);
66471+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
66472+ uru ? (struct rusage __force_user *)&ru : NULL);
66473 set_fs(old_fs);
66474
66475 if ((ret < 0) || (info.si_signo == 0))
66476@@ -569,8 +569,8 @@ long compat_sys_timer_settime(timer_t ti
66477 oldfs = get_fs();
66478 set_fs(KERNEL_DS);
66479 err = sys_timer_settime(timer_id, flags,
66480- (struct itimerspec __user *) &newts,
66481- (struct itimerspec __user *) &oldts);
66482+ (struct itimerspec __force_user *) &newts,
66483+ (struct itimerspec __force_user *) &oldts);
66484 set_fs(oldfs);
66485 if (!err && old && put_compat_itimerspec(old, &oldts))
66486 return -EFAULT;
66487@@ -587,7 +587,7 @@ long compat_sys_timer_gettime(timer_t ti
66488 oldfs = get_fs();
66489 set_fs(KERNEL_DS);
66490 err = sys_timer_gettime(timer_id,
66491- (struct itimerspec __user *) &ts);
66492+ (struct itimerspec __force_user *) &ts);
66493 set_fs(oldfs);
66494 if (!err && put_compat_itimerspec(setting, &ts))
66495 return -EFAULT;
66496@@ -606,7 +606,7 @@ long compat_sys_clock_settime(clockid_t
66497 oldfs = get_fs();
66498 set_fs(KERNEL_DS);
66499 err = sys_clock_settime(which_clock,
66500- (struct timespec __user *) &ts);
66501+ (struct timespec __force_user *) &ts);
66502 set_fs(oldfs);
66503 return err;
66504 }
66505@@ -621,7 +621,7 @@ long compat_sys_clock_gettime(clockid_t
66506 oldfs = get_fs();
66507 set_fs(KERNEL_DS);
66508 err = sys_clock_gettime(which_clock,
66509- (struct timespec __user *) &ts);
66510+ (struct timespec __force_user *) &ts);
66511 set_fs(oldfs);
66512 if (!err && put_compat_timespec(&ts, tp))
66513 return -EFAULT;
66514@@ -638,7 +638,7 @@ long compat_sys_clock_getres(clockid_t w
66515 oldfs = get_fs();
66516 set_fs(KERNEL_DS);
66517 err = sys_clock_getres(which_clock,
66518- (struct timespec __user *) &ts);
66519+ (struct timespec __force_user *) &ts);
66520 set_fs(oldfs);
66521 if (!err && tp && put_compat_timespec(&ts, tp))
66522 return -EFAULT;
66523@@ -650,9 +650,9 @@ static long compat_clock_nanosleep_resta
66524 long err;
66525 mm_segment_t oldfs;
66526 struct timespec tu;
66527- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
66528+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
66529
66530- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
66531+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
66532 oldfs = get_fs();
66533 set_fs(KERNEL_DS);
66534 err = clock_nanosleep_restart(restart);
66535@@ -684,8 +684,8 @@ long compat_sys_clock_nanosleep(clockid_
66536 oldfs = get_fs();
66537 set_fs(KERNEL_DS);
66538 err = sys_clock_nanosleep(which_clock, flags,
66539- (struct timespec __user *) &in,
66540- (struct timespec __user *) &out);
66541+ (struct timespec __force_user *) &in,
66542+ (struct timespec __force_user *) &out);
66543 set_fs(oldfs);
66544
66545 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
66546diff -urNp linux-2.6.32.48/kernel/configs.c linux-2.6.32.48/kernel/configs.c
66547--- linux-2.6.32.48/kernel/configs.c 2011-11-08 19:02:43.000000000 -0500
66548+++ linux-2.6.32.48/kernel/configs.c 2011-11-15 19:59:43.000000000 -0500
66549@@ -73,8 +73,19 @@ static int __init ikconfig_init(void)
66550 struct proc_dir_entry *entry;
66551
66552 /* create the current config file */
66553+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
66554+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
66555+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
66556+ &ikconfig_file_ops);
66557+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66558+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
66559+ &ikconfig_file_ops);
66560+#endif
66561+#else
66562 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
66563 &ikconfig_file_ops);
66564+#endif
66565+
66566 if (!entry)
66567 return -ENOMEM;
66568
66569diff -urNp linux-2.6.32.48/kernel/cpu.c linux-2.6.32.48/kernel/cpu.c
66570--- linux-2.6.32.48/kernel/cpu.c 2011-11-08 19:02:43.000000000 -0500
66571+++ linux-2.6.32.48/kernel/cpu.c 2011-11-15 19:59:43.000000000 -0500
66572@@ -19,7 +19,7 @@
66573 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
66574 static DEFINE_MUTEX(cpu_add_remove_lock);
66575
66576-static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
66577+static RAW_NOTIFIER_HEAD(cpu_chain);
66578
66579 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
66580 * Should always be manipulated under cpu_add_remove_lock
66581diff -urNp linux-2.6.32.48/kernel/cred.c linux-2.6.32.48/kernel/cred.c
66582--- linux-2.6.32.48/kernel/cred.c 2011-11-08 19:02:43.000000000 -0500
66583+++ linux-2.6.32.48/kernel/cred.c 2011-11-15 19:59:43.000000000 -0500
66584@@ -160,6 +160,8 @@ static void put_cred_rcu(struct rcu_head
66585 */
66586 void __put_cred(struct cred *cred)
66587 {
66588+ pax_track_stack();
66589+
66590 kdebug("__put_cred(%p{%d,%d})", cred,
66591 atomic_read(&cred->usage),
66592 read_cred_subscribers(cred));
66593@@ -184,6 +186,8 @@ void exit_creds(struct task_struct *tsk)
66594 {
66595 struct cred *cred;
66596
66597+ pax_track_stack();
66598+
66599 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
66600 atomic_read(&tsk->cred->usage),
66601 read_cred_subscribers(tsk->cred));
66602@@ -222,6 +226,8 @@ const struct cred *get_task_cred(struct
66603 {
66604 const struct cred *cred;
66605
66606+ pax_track_stack();
66607+
66608 rcu_read_lock();
66609
66610 do {
66611@@ -241,6 +247,8 @@ struct cred *cred_alloc_blank(void)
66612 {
66613 struct cred *new;
66614
66615+ pax_track_stack();
66616+
66617 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
66618 if (!new)
66619 return NULL;
66620@@ -289,6 +297,8 @@ struct cred *prepare_creds(void)
66621 const struct cred *old;
66622 struct cred *new;
66623
66624+ pax_track_stack();
66625+
66626 validate_process_creds();
66627
66628 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
66629@@ -335,6 +345,8 @@ struct cred *prepare_exec_creds(void)
66630 struct thread_group_cred *tgcred = NULL;
66631 struct cred *new;
66632
66633+ pax_track_stack();
66634+
66635 #ifdef CONFIG_KEYS
66636 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
66637 if (!tgcred)
66638@@ -441,6 +453,8 @@ int copy_creds(struct task_struct *p, un
66639 struct cred *new;
66640 int ret;
66641
66642+ pax_track_stack();
66643+
66644 mutex_init(&p->cred_guard_mutex);
66645
66646 if (
66647@@ -528,6 +542,8 @@ int commit_creds(struct cred *new)
66648 struct task_struct *task = current;
66649 const struct cred *old = task->real_cred;
66650
66651+ pax_track_stack();
66652+
66653 kdebug("commit_creds(%p{%d,%d})", new,
66654 atomic_read(&new->usage),
66655 read_cred_subscribers(new));
66656@@ -544,6 +560,8 @@ int commit_creds(struct cred *new)
66657
66658 get_cred(new); /* we will require a ref for the subj creds too */
66659
66660+ gr_set_role_label(task, new->uid, new->gid);
66661+
66662 /* dumpability changes */
66663 if (old->euid != new->euid ||
66664 old->egid != new->egid ||
66665@@ -563,10 +581,8 @@ int commit_creds(struct cred *new)
66666 key_fsgid_changed(task);
66667
66668 /* do it
66669- * - What if a process setreuid()'s and this brings the
66670- * new uid over his NPROC rlimit? We can check this now
66671- * cheaply with the new uid cache, so if it matters
66672- * we should be checking for it. -DaveM
66673+ * RLIMIT_NPROC limits on user->processes have already been checked
66674+ * in set_user().
66675 */
66676 alter_cred_subscribers(new, 2);
66677 if (new->user != old->user)
66678@@ -606,6 +622,8 @@ EXPORT_SYMBOL(commit_creds);
66679 */
66680 void abort_creds(struct cred *new)
66681 {
66682+ pax_track_stack();
66683+
66684 kdebug("abort_creds(%p{%d,%d})", new,
66685 atomic_read(&new->usage),
66686 read_cred_subscribers(new));
66687@@ -629,6 +647,8 @@ const struct cred *override_creds(const
66688 {
66689 const struct cred *old = current->cred;
66690
66691+ pax_track_stack();
66692+
66693 kdebug("override_creds(%p{%d,%d})", new,
66694 atomic_read(&new->usage),
66695 read_cred_subscribers(new));
66696@@ -658,6 +678,8 @@ void revert_creds(const struct cred *old
66697 {
66698 const struct cred *override = current->cred;
66699
66700+ pax_track_stack();
66701+
66702 kdebug("revert_creds(%p{%d,%d})", old,
66703 atomic_read(&old->usage),
66704 read_cred_subscribers(old));
66705@@ -704,6 +726,8 @@ struct cred *prepare_kernel_cred(struct
66706 const struct cred *old;
66707 struct cred *new;
66708
66709+ pax_track_stack();
66710+
66711 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
66712 if (!new)
66713 return NULL;
66714@@ -758,6 +782,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
66715 */
66716 int set_security_override(struct cred *new, u32 secid)
66717 {
66718+ pax_track_stack();
66719+
66720 return security_kernel_act_as(new, secid);
66721 }
66722 EXPORT_SYMBOL(set_security_override);
66723@@ -777,6 +803,8 @@ int set_security_override_from_ctx(struc
66724 u32 secid;
66725 int ret;
66726
66727+ pax_track_stack();
66728+
66729 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
66730 if (ret < 0)
66731 return ret;
66732diff -urNp linux-2.6.32.48/kernel/exit.c linux-2.6.32.48/kernel/exit.c
66733--- linux-2.6.32.48/kernel/exit.c 2011-11-08 19:02:43.000000000 -0500
66734+++ linux-2.6.32.48/kernel/exit.c 2011-11-15 19:59:43.000000000 -0500
66735@@ -55,6 +55,10 @@
66736 #include <asm/pgtable.h>
66737 #include <asm/mmu_context.h>
66738
66739+#ifdef CONFIG_GRKERNSEC
66740+extern rwlock_t grsec_exec_file_lock;
66741+#endif
66742+
66743 static void exit_mm(struct task_struct * tsk);
66744
66745 static void __unhash_process(struct task_struct *p)
66746@@ -174,6 +178,10 @@ void release_task(struct task_struct * p
66747 struct task_struct *leader;
66748 int zap_leader;
66749 repeat:
66750+#ifdef CONFIG_NET
66751+ gr_del_task_from_ip_table(p);
66752+#endif
66753+
66754 tracehook_prepare_release_task(p);
66755 /* don't need to get the RCU readlock here - the process is dead and
66756 * can't be modifying its own credentials */
66757@@ -341,11 +349,22 @@ static void reparent_to_kthreadd(void)
66758 {
66759 write_lock_irq(&tasklist_lock);
66760
66761+#ifdef CONFIG_GRKERNSEC
66762+ write_lock(&grsec_exec_file_lock);
66763+ if (current->exec_file) {
66764+ fput(current->exec_file);
66765+ current->exec_file = NULL;
66766+ }
66767+ write_unlock(&grsec_exec_file_lock);
66768+#endif
66769+
66770 ptrace_unlink(current);
66771 /* Reparent to init */
66772 current->real_parent = current->parent = kthreadd_task;
66773 list_move_tail(&current->sibling, &current->real_parent->children);
66774
66775+ gr_set_kernel_label(current);
66776+
66777 /* Set the exit signal to SIGCHLD so we signal init on exit */
66778 current->exit_signal = SIGCHLD;
66779
66780@@ -397,7 +416,7 @@ int allow_signal(int sig)
66781 * know it'll be handled, so that they don't get converted to
66782 * SIGKILL or just silently dropped.
66783 */
66784- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
66785+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
66786 recalc_sigpending();
66787 spin_unlock_irq(&current->sighand->siglock);
66788 return 0;
66789@@ -433,6 +452,17 @@ void daemonize(const char *name, ...)
66790 vsnprintf(current->comm, sizeof(current->comm), name, args);
66791 va_end(args);
66792
66793+#ifdef CONFIG_GRKERNSEC
66794+ write_lock(&grsec_exec_file_lock);
66795+ if (current->exec_file) {
66796+ fput(current->exec_file);
66797+ current->exec_file = NULL;
66798+ }
66799+ write_unlock(&grsec_exec_file_lock);
66800+#endif
66801+
66802+ gr_set_kernel_label(current);
66803+
66804 /*
66805 * If we were started as result of loading a module, close all of the
66806 * user space pages. We don't need them, and if we didn't close them
66807@@ -897,17 +927,17 @@ NORET_TYPE void do_exit(long code)
66808 struct task_struct *tsk = current;
66809 int group_dead;
66810
66811- profile_task_exit(tsk);
66812-
66813- WARN_ON(atomic_read(&tsk->fs_excl));
66814-
66815+ /*
66816+ * Check this first since set_fs() below depends on
66817+ * current_thread_info(), which we better not access when we're in
66818+ * interrupt context. Other than that, we want to do the set_fs()
66819+ * as early as possible.
66820+ */
66821 if (unlikely(in_interrupt()))
66822 panic("Aiee, killing interrupt handler!");
66823- if (unlikely(!tsk->pid))
66824- panic("Attempted to kill the idle task!");
66825
66826 /*
66827- * If do_exit is called because this processes oopsed, it's possible
66828+ * If do_exit is called because this processes Oops'ed, it's possible
66829 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
66830 * continuing. Amongst other possible reasons, this is to prevent
66831 * mm_release()->clear_child_tid() from writing to a user-controlled
66832@@ -915,6 +945,13 @@ NORET_TYPE void do_exit(long code)
66833 */
66834 set_fs(USER_DS);
66835
66836+ profile_task_exit(tsk);
66837+
66838+ WARN_ON(atomic_read(&tsk->fs_excl));
66839+
66840+ if (unlikely(!tsk->pid))
66841+ panic("Attempted to kill the idle task!");
66842+
66843 tracehook_report_exit(&code);
66844
66845 validate_creds_for_do_exit(tsk);
66846@@ -973,6 +1010,9 @@ NORET_TYPE void do_exit(long code)
66847 tsk->exit_code = code;
66848 taskstats_exit(tsk, group_dead);
66849
66850+ gr_acl_handle_psacct(tsk, code);
66851+ gr_acl_handle_exit();
66852+
66853 exit_mm(tsk);
66854
66855 if (group_dead)
66856@@ -1188,7 +1228,7 @@ static int wait_task_zombie(struct wait_
66857
66858 if (unlikely(wo->wo_flags & WNOWAIT)) {
66859 int exit_code = p->exit_code;
66860- int why, status;
66861+ int why;
66862
66863 get_task_struct(p);
66864 read_unlock(&tasklist_lock);
66865diff -urNp linux-2.6.32.48/kernel/fork.c linux-2.6.32.48/kernel/fork.c
66866--- linux-2.6.32.48/kernel/fork.c 2011-11-08 19:02:43.000000000 -0500
66867+++ linux-2.6.32.48/kernel/fork.c 2011-11-15 19:59:43.000000000 -0500
66868@@ -253,7 +253,7 @@ static struct task_struct *dup_task_stru
66869 *stackend = STACK_END_MAGIC; /* for overflow detection */
66870
66871 #ifdef CONFIG_CC_STACKPROTECTOR
66872- tsk->stack_canary = get_random_int();
66873+ tsk->stack_canary = pax_get_random_long();
66874 #endif
66875
66876 /* One for us, one for whoever does the "release_task()" (usually parent) */
66877@@ -293,8 +293,8 @@ static int dup_mmap(struct mm_struct *mm
66878 mm->locked_vm = 0;
66879 mm->mmap = NULL;
66880 mm->mmap_cache = NULL;
66881- mm->free_area_cache = oldmm->mmap_base;
66882- mm->cached_hole_size = ~0UL;
66883+ mm->free_area_cache = oldmm->free_area_cache;
66884+ mm->cached_hole_size = oldmm->cached_hole_size;
66885 mm->map_count = 0;
66886 cpumask_clear(mm_cpumask(mm));
66887 mm->mm_rb = RB_ROOT;
66888@@ -335,6 +335,7 @@ static int dup_mmap(struct mm_struct *mm
66889 tmp->vm_flags &= ~VM_LOCKED;
66890 tmp->vm_mm = mm;
66891 tmp->vm_next = tmp->vm_prev = NULL;
66892+ tmp->vm_mirror = NULL;
66893 anon_vma_link(tmp);
66894 file = tmp->vm_file;
66895 if (file) {
66896@@ -384,6 +385,31 @@ static int dup_mmap(struct mm_struct *mm
66897 if (retval)
66898 goto out;
66899 }
66900+
66901+#ifdef CONFIG_PAX_SEGMEXEC
66902+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
66903+ struct vm_area_struct *mpnt_m;
66904+
66905+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
66906+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
66907+
66908+ if (!mpnt->vm_mirror)
66909+ continue;
66910+
66911+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
66912+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
66913+ mpnt->vm_mirror = mpnt_m;
66914+ } else {
66915+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
66916+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
66917+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
66918+ mpnt->vm_mirror->vm_mirror = mpnt;
66919+ }
66920+ }
66921+ BUG_ON(mpnt_m);
66922+ }
66923+#endif
66924+
66925 /* a new mm has just been created */
66926 arch_dup_mmap(oldmm, mm);
66927 retval = 0;
66928@@ -734,13 +760,14 @@ static int copy_fs(unsigned long clone_f
66929 write_unlock(&fs->lock);
66930 return -EAGAIN;
66931 }
66932- fs->users++;
66933+ atomic_inc(&fs->users);
66934 write_unlock(&fs->lock);
66935 return 0;
66936 }
66937 tsk->fs = copy_fs_struct(fs);
66938 if (!tsk->fs)
66939 return -ENOMEM;
66940+ gr_set_chroot_entries(tsk, &tsk->fs->root);
66941 return 0;
66942 }
66943
66944@@ -1033,12 +1060,16 @@ static struct task_struct *copy_process(
66945 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
66946 #endif
66947 retval = -EAGAIN;
66948+
66949+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
66950+
66951 if (atomic_read(&p->real_cred->user->processes) >=
66952 p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
66953- if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
66954- p->real_cred->user != INIT_USER)
66955+ if (p->real_cred->user != INIT_USER &&
66956+ !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
66957 goto bad_fork_free;
66958 }
66959+ current->flags &= ~PF_NPROC_EXCEEDED;
66960
66961 retval = copy_creds(p, clone_flags);
66962 if (retval < 0)
66963@@ -1183,6 +1214,8 @@ static struct task_struct *copy_process(
66964 goto bad_fork_free_pid;
66965 }
66966
66967+ gr_copy_label(p);
66968+
66969 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
66970 /*
66971 * Clear TID on mm_release()?
66972@@ -1333,6 +1366,8 @@ bad_fork_cleanup_count:
66973 bad_fork_free:
66974 free_task(p);
66975 fork_out:
66976+ gr_log_forkfail(retval);
66977+
66978 return ERR_PTR(retval);
66979 }
66980
66981@@ -1426,6 +1461,8 @@ long do_fork(unsigned long clone_flags,
66982 if (clone_flags & CLONE_PARENT_SETTID)
66983 put_user(nr, parent_tidptr);
66984
66985+ gr_handle_brute_check();
66986+
66987 if (clone_flags & CLONE_VFORK) {
66988 p->vfork_done = &vfork;
66989 init_completion(&vfork);
66990@@ -1558,7 +1595,7 @@ static int unshare_fs(unsigned long unsh
66991 return 0;
66992
66993 /* don't need lock here; in the worst case we'll do useless copy */
66994- if (fs->users == 1)
66995+ if (atomic_read(&fs->users) == 1)
66996 return 0;
66997
66998 *new_fsp = copy_fs_struct(fs);
66999@@ -1681,7 +1718,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
67000 fs = current->fs;
67001 write_lock(&fs->lock);
67002 current->fs = new_fs;
67003- if (--fs->users)
67004+ gr_set_chroot_entries(current, &current->fs->root);
67005+ if (atomic_dec_return(&fs->users))
67006 new_fs = NULL;
67007 else
67008 new_fs = fs;
67009diff -urNp linux-2.6.32.48/kernel/futex.c linux-2.6.32.48/kernel/futex.c
67010--- linux-2.6.32.48/kernel/futex.c 2011-11-08 19:02:43.000000000 -0500
67011+++ linux-2.6.32.48/kernel/futex.c 2011-11-15 19:59:43.000000000 -0500
67012@@ -54,6 +54,7 @@
67013 #include <linux/mount.h>
67014 #include <linux/pagemap.h>
67015 #include <linux/syscalls.h>
67016+#include <linux/ptrace.h>
67017 #include <linux/signal.h>
67018 #include <linux/module.h>
67019 #include <linux/magic.h>
67020@@ -223,6 +224,11 @@ get_futex_key(u32 __user *uaddr, int fsh
67021 struct page *page;
67022 int err, ro = 0;
67023
67024+#ifdef CONFIG_PAX_SEGMEXEC
67025+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
67026+ return -EFAULT;
67027+#endif
67028+
67029 /*
67030 * The futex address must be "naturally" aligned.
67031 */
67032@@ -1819,6 +1825,8 @@ static int futex_wait(u32 __user *uaddr,
67033 struct futex_q q;
67034 int ret;
67035
67036+ pax_track_stack();
67037+
67038 if (!bitset)
67039 return -EINVAL;
67040
67041@@ -1871,7 +1879,7 @@ retry:
67042
67043 restart = &current_thread_info()->restart_block;
67044 restart->fn = futex_wait_restart;
67045- restart->futex.uaddr = (u32 *)uaddr;
67046+ restart->futex.uaddr = uaddr;
67047 restart->futex.val = val;
67048 restart->futex.time = abs_time->tv64;
67049 restart->futex.bitset = bitset;
67050@@ -2233,6 +2241,8 @@ static int futex_wait_requeue_pi(u32 __u
67051 struct futex_q q;
67052 int res, ret;
67053
67054+ pax_track_stack();
67055+
67056 if (!bitset)
67057 return -EINVAL;
67058
67059@@ -2407,7 +2417,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
67060 {
67061 struct robust_list_head __user *head;
67062 unsigned long ret;
67063+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
67064 const struct cred *cred = current_cred(), *pcred;
67065+#endif
67066
67067 if (!futex_cmpxchg_enabled)
67068 return -ENOSYS;
67069@@ -2423,11 +2435,16 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
67070 if (!p)
67071 goto err_unlock;
67072 ret = -EPERM;
67073+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67074+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
67075+ goto err_unlock;
67076+#else
67077 pcred = __task_cred(p);
67078 if (cred->euid != pcred->euid &&
67079 cred->euid != pcred->uid &&
67080 !capable(CAP_SYS_PTRACE))
67081 goto err_unlock;
67082+#endif
67083 head = p->robust_list;
67084 rcu_read_unlock();
67085 }
67086@@ -2489,7 +2506,7 @@ retry:
67087 */
67088 static inline int fetch_robust_entry(struct robust_list __user **entry,
67089 struct robust_list __user * __user *head,
67090- int *pi)
67091+ unsigned int *pi)
67092 {
67093 unsigned long uentry;
67094
67095@@ -2670,6 +2687,7 @@ static int __init futex_init(void)
67096 {
67097 u32 curval;
67098 int i;
67099+ mm_segment_t oldfs;
67100
67101 /*
67102 * This will fail and we want it. Some arch implementations do
67103@@ -2681,7 +2699,10 @@ static int __init futex_init(void)
67104 * implementation, the non functional ones will return
67105 * -ENOSYS.
67106 */
67107+ oldfs = get_fs();
67108+ set_fs(USER_DS);
67109 curval = cmpxchg_futex_value_locked(NULL, 0, 0);
67110+ set_fs(oldfs);
67111 if (curval == -EFAULT)
67112 futex_cmpxchg_enabled = 1;
67113
67114diff -urNp linux-2.6.32.48/kernel/futex_compat.c linux-2.6.32.48/kernel/futex_compat.c
67115--- linux-2.6.32.48/kernel/futex_compat.c 2011-11-08 19:02:43.000000000 -0500
67116+++ linux-2.6.32.48/kernel/futex_compat.c 2011-11-15 19:59:43.000000000 -0500
67117@@ -10,6 +10,7 @@
67118 #include <linux/compat.h>
67119 #include <linux/nsproxy.h>
67120 #include <linux/futex.h>
67121+#include <linux/ptrace.h>
67122
67123 #include <asm/uaccess.h>
67124
67125@@ -135,7 +136,10 @@ compat_sys_get_robust_list(int pid, comp
67126 {
67127 struct compat_robust_list_head __user *head;
67128 unsigned long ret;
67129- const struct cred *cred = current_cred(), *pcred;
67130+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
67131+ const struct cred *cred = current_cred();
67132+ const struct cred *pcred;
67133+#endif
67134
67135 if (!futex_cmpxchg_enabled)
67136 return -ENOSYS;
67137@@ -151,11 +155,16 @@ compat_sys_get_robust_list(int pid, comp
67138 if (!p)
67139 goto err_unlock;
67140 ret = -EPERM;
67141+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67142+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
67143+ goto err_unlock;
67144+#else
67145 pcred = __task_cred(p);
67146 if (cred->euid != pcred->euid &&
67147 cred->euid != pcred->uid &&
67148 !capable(CAP_SYS_PTRACE))
67149 goto err_unlock;
67150+#endif
67151 head = p->compat_robust_list;
67152 read_unlock(&tasklist_lock);
67153 }
67154diff -urNp linux-2.6.32.48/kernel/gcov/base.c linux-2.6.32.48/kernel/gcov/base.c
67155--- linux-2.6.32.48/kernel/gcov/base.c 2011-11-08 19:02:43.000000000 -0500
67156+++ linux-2.6.32.48/kernel/gcov/base.c 2011-11-15 19:59:43.000000000 -0500
67157@@ -102,11 +102,6 @@ void gcov_enable_events(void)
67158 }
67159
67160 #ifdef CONFIG_MODULES
67161-static inline int within(void *addr, void *start, unsigned long size)
67162-{
67163- return ((addr >= start) && (addr < start + size));
67164-}
67165-
67166 /* Update list and generate events when modules are unloaded. */
67167 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
67168 void *data)
67169@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
67170 prev = NULL;
67171 /* Remove entries located in module from linked list. */
67172 for (info = gcov_info_head; info; info = info->next) {
67173- if (within(info, mod->module_core, mod->core_size)) {
67174+ if (within_module_core_rw((unsigned long)info, mod)) {
67175 if (prev)
67176 prev->next = info->next;
67177 else
67178diff -urNp linux-2.6.32.48/kernel/hrtimer.c linux-2.6.32.48/kernel/hrtimer.c
67179--- linux-2.6.32.48/kernel/hrtimer.c 2011-11-08 19:02:43.000000000 -0500
67180+++ linux-2.6.32.48/kernel/hrtimer.c 2011-11-15 19:59:43.000000000 -0500
67181@@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
67182 local_irq_restore(flags);
67183 }
67184
67185-static void run_hrtimer_softirq(struct softirq_action *h)
67186+static void run_hrtimer_softirq(void)
67187 {
67188 hrtimer_peek_ahead_timers();
67189 }
67190diff -urNp linux-2.6.32.48/kernel/kallsyms.c linux-2.6.32.48/kernel/kallsyms.c
67191--- linux-2.6.32.48/kernel/kallsyms.c 2011-11-08 19:02:43.000000000 -0500
67192+++ linux-2.6.32.48/kernel/kallsyms.c 2011-11-15 19:59:43.000000000 -0500
67193@@ -11,6 +11,9 @@
67194 * Changed the compression method from stem compression to "table lookup"
67195 * compression (see scripts/kallsyms.c for a more complete description)
67196 */
67197+#ifdef CONFIG_GRKERNSEC_HIDESYM
67198+#define __INCLUDED_BY_HIDESYM 1
67199+#endif
67200 #include <linux/kallsyms.h>
67201 #include <linux/module.h>
67202 #include <linux/init.h>
67203@@ -51,12 +54,33 @@ extern const unsigned long kallsyms_mark
67204
67205 static inline int is_kernel_inittext(unsigned long addr)
67206 {
67207+ if (system_state != SYSTEM_BOOTING)
67208+ return 0;
67209+
67210 if (addr >= (unsigned long)_sinittext
67211 && addr <= (unsigned long)_einittext)
67212 return 1;
67213 return 0;
67214 }
67215
67216+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
67217+#ifdef CONFIG_MODULES
67218+static inline int is_module_text(unsigned long addr)
67219+{
67220+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
67221+ return 1;
67222+
67223+ addr = ktla_ktva(addr);
67224+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
67225+}
67226+#else
67227+static inline int is_module_text(unsigned long addr)
67228+{
67229+ return 0;
67230+}
67231+#endif
67232+#endif
67233+
67234 static inline int is_kernel_text(unsigned long addr)
67235 {
67236 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
67237@@ -67,13 +91,28 @@ static inline int is_kernel_text(unsigne
67238
67239 static inline int is_kernel(unsigned long addr)
67240 {
67241+
67242+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
67243+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
67244+ return 1;
67245+
67246+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
67247+#else
67248 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
67249+#endif
67250+
67251 return 1;
67252 return in_gate_area_no_task(addr);
67253 }
67254
67255 static int is_ksym_addr(unsigned long addr)
67256 {
67257+
67258+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
67259+ if (is_module_text(addr))
67260+ return 0;
67261+#endif
67262+
67263 if (all_var)
67264 return is_kernel(addr);
67265
67266@@ -413,7 +452,6 @@ static unsigned long get_ksymbol_core(st
67267
67268 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
67269 {
67270- iter->name[0] = '\0';
67271 iter->nameoff = get_symbol_offset(new_pos);
67272 iter->pos = new_pos;
67273 }
67274@@ -461,6 +499,11 @@ static int s_show(struct seq_file *m, vo
67275 {
67276 struct kallsym_iter *iter = m->private;
67277
67278+#ifdef CONFIG_GRKERNSEC_HIDESYM
67279+ if (current_uid())
67280+ return 0;
67281+#endif
67282+
67283 /* Some debugging symbols have no name. Ignore them. */
67284 if (!iter->name[0])
67285 return 0;
67286@@ -501,7 +544,7 @@ static int kallsyms_open(struct inode *i
67287 struct kallsym_iter *iter;
67288 int ret;
67289
67290- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
67291+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
67292 if (!iter)
67293 return -ENOMEM;
67294 reset_iter(iter, 0);
67295diff -urNp linux-2.6.32.48/kernel/kexec.c linux-2.6.32.48/kernel/kexec.c
67296--- linux-2.6.32.48/kernel/kexec.c 2011-11-08 19:02:43.000000000 -0500
67297+++ linux-2.6.32.48/kernel/kexec.c 2011-11-15 19:59:43.000000000 -0500
67298@@ -1028,7 +1028,8 @@ asmlinkage long compat_sys_kexec_load(un
67299 unsigned long flags)
67300 {
67301 struct compat_kexec_segment in;
67302- struct kexec_segment out, __user *ksegments;
67303+ struct kexec_segment out;
67304+ struct kexec_segment __user *ksegments;
67305 unsigned long i, result;
67306
67307 /* Don't allow clients that don't understand the native
67308diff -urNp linux-2.6.32.48/kernel/kgdb.c linux-2.6.32.48/kernel/kgdb.c
67309--- linux-2.6.32.48/kernel/kgdb.c 2011-11-08 19:02:43.000000000 -0500
67310+++ linux-2.6.32.48/kernel/kgdb.c 2011-11-15 19:59:43.000000000 -0500
67311@@ -86,7 +86,7 @@ static int kgdb_io_module_registered;
67312 /* Guard for recursive entry */
67313 static int exception_level;
67314
67315-static struct kgdb_io *kgdb_io_ops;
67316+static const struct kgdb_io *kgdb_io_ops;
67317 static DEFINE_SPINLOCK(kgdb_registration_lock);
67318
67319 /* kgdb console driver is loaded */
67320@@ -123,7 +123,7 @@ atomic_t kgdb_active = ATOMIC_INIT(-1)
67321 */
67322 static atomic_t passive_cpu_wait[NR_CPUS];
67323 static atomic_t cpu_in_kgdb[NR_CPUS];
67324-atomic_t kgdb_setting_breakpoint;
67325+atomic_unchecked_t kgdb_setting_breakpoint;
67326
67327 struct task_struct *kgdb_usethread;
67328 struct task_struct *kgdb_contthread;
67329@@ -140,7 +140,7 @@ static unsigned long gdb_regs[(NUMREGBY
67330 sizeof(unsigned long)];
67331
67332 /* to keep track of the CPU which is doing the single stepping*/
67333-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
67334+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
67335
67336 /*
67337 * If you are debugging a problem where roundup (the collection of
67338@@ -815,7 +815,7 @@ static int kgdb_io_ready(int print_wait)
67339 return 0;
67340 if (kgdb_connected)
67341 return 1;
67342- if (atomic_read(&kgdb_setting_breakpoint))
67343+ if (atomic_read_unchecked(&kgdb_setting_breakpoint))
67344 return 1;
67345 if (print_wait)
67346 printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
67347@@ -1426,8 +1426,8 @@ acquirelock:
67348 * instance of the exception handler wanted to come into the
67349 * debugger on a different CPU via a single step
67350 */
67351- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
67352- atomic_read(&kgdb_cpu_doing_single_step) != cpu) {
67353+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
67354+ atomic_read_unchecked(&kgdb_cpu_doing_single_step) != cpu) {
67355
67356 atomic_set(&kgdb_active, -1);
67357 touch_softlockup_watchdog();
67358@@ -1634,7 +1634,7 @@ static void kgdb_initial_breakpoint(void
67359 *
67360 * Register it with the KGDB core.
67361 */
67362-int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops)
67363+int kgdb_register_io_module(const struct kgdb_io *new_kgdb_io_ops)
67364 {
67365 int err;
67366
67367@@ -1679,7 +1679,7 @@ EXPORT_SYMBOL_GPL(kgdb_register_io_modul
67368 *
67369 * Unregister it with the KGDB core.
67370 */
67371-void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops)
67372+void kgdb_unregister_io_module(const struct kgdb_io *old_kgdb_io_ops)
67373 {
67374 BUG_ON(kgdb_connected);
67375
67376@@ -1712,11 +1712,11 @@ EXPORT_SYMBOL_GPL(kgdb_unregister_io_mod
67377 */
67378 void kgdb_breakpoint(void)
67379 {
67380- atomic_set(&kgdb_setting_breakpoint, 1);
67381+ atomic_set_unchecked(&kgdb_setting_breakpoint, 1);
67382 wmb(); /* Sync point before breakpoint */
67383 arch_kgdb_breakpoint();
67384 wmb(); /* Sync point after breakpoint */
67385- atomic_set(&kgdb_setting_breakpoint, 0);
67386+ atomic_set_unchecked(&kgdb_setting_breakpoint, 0);
67387 }
67388 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
67389
67390diff -urNp linux-2.6.32.48/kernel/kmod.c linux-2.6.32.48/kernel/kmod.c
67391--- linux-2.6.32.48/kernel/kmod.c 2011-11-08 19:02:43.000000000 -0500
67392+++ linux-2.6.32.48/kernel/kmod.c 2011-11-15 19:59:43.000000000 -0500
67393@@ -65,13 +65,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
67394 * If module auto-loading support is disabled then this function
67395 * becomes a no-operation.
67396 */
67397-int __request_module(bool wait, const char *fmt, ...)
67398+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
67399 {
67400- va_list args;
67401 char module_name[MODULE_NAME_LEN];
67402 unsigned int max_modprobes;
67403 int ret;
67404- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
67405+ char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
67406 static char *envp[] = { "HOME=/",
67407 "TERM=linux",
67408 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
67409@@ -84,12 +83,24 @@ int __request_module(bool wait, const ch
67410 if (ret)
67411 return ret;
67412
67413- va_start(args, fmt);
67414- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
67415- va_end(args);
67416+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
67417 if (ret >= MODULE_NAME_LEN)
67418 return -ENAMETOOLONG;
67419
67420+#ifdef CONFIG_GRKERNSEC_MODHARDEN
67421+ if (!current_uid()) {
67422+ /* hack to workaround consolekit/udisks stupidity */
67423+ read_lock(&tasklist_lock);
67424+ if (!strcmp(current->comm, "mount") &&
67425+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
67426+ read_unlock(&tasklist_lock);
67427+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
67428+ return -EPERM;
67429+ }
67430+ read_unlock(&tasklist_lock);
67431+ }
67432+#endif
67433+
67434 /* If modprobe needs a service that is in a module, we get a recursive
67435 * loop. Limit the number of running kmod threads to max_threads/2 or
67436 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
67437@@ -123,6 +134,48 @@ int __request_module(bool wait, const ch
67438 atomic_dec(&kmod_concurrent);
67439 return ret;
67440 }
67441+
67442+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
67443+{
67444+ va_list args;
67445+ int ret;
67446+
67447+ va_start(args, fmt);
67448+ ret = ____request_module(wait, module_param, fmt, args);
67449+ va_end(args);
67450+
67451+ return ret;
67452+}
67453+
67454+int __request_module(bool wait, const char *fmt, ...)
67455+{
67456+ va_list args;
67457+ int ret;
67458+
67459+#ifdef CONFIG_GRKERNSEC_MODHARDEN
67460+ if (current_uid()) {
67461+ char module_param[MODULE_NAME_LEN];
67462+
67463+ memset(module_param, 0, sizeof(module_param));
67464+
67465+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
67466+
67467+ va_start(args, fmt);
67468+ ret = ____request_module(wait, module_param, fmt, args);
67469+ va_end(args);
67470+
67471+ return ret;
67472+ }
67473+#endif
67474+
67475+ va_start(args, fmt);
67476+ ret = ____request_module(wait, NULL, fmt, args);
67477+ va_end(args);
67478+
67479+ return ret;
67480+}
67481+
67482+
67483 EXPORT_SYMBOL(__request_module);
67484 #endif /* CONFIG_MODULES */
67485
67486@@ -228,7 +281,7 @@ static int wait_for_helper(void *data)
67487 *
67488 * Thus the __user pointer cast is valid here.
67489 */
67490- sys_wait4(pid, (int __user *)&ret, 0, NULL);
67491+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
67492
67493 /*
67494 * If ret is 0, either ____call_usermodehelper failed and the
67495diff -urNp linux-2.6.32.48/kernel/kprobes.c linux-2.6.32.48/kernel/kprobes.c
67496--- linux-2.6.32.48/kernel/kprobes.c 2011-11-08 19:02:43.000000000 -0500
67497+++ linux-2.6.32.48/kernel/kprobes.c 2011-11-15 19:59:43.000000000 -0500
67498@@ -183,7 +183,7 @@ static kprobe_opcode_t __kprobes *__get_
67499 * kernel image and loaded module images reside. This is required
67500 * so x86_64 can correctly handle the %rip-relative fixups.
67501 */
67502- kip->insns = module_alloc(PAGE_SIZE);
67503+ kip->insns = module_alloc_exec(PAGE_SIZE);
67504 if (!kip->insns) {
67505 kfree(kip);
67506 return NULL;
67507@@ -220,7 +220,7 @@ static int __kprobes collect_one_slot(st
67508 */
67509 if (!list_is_singular(&kprobe_insn_pages)) {
67510 list_del(&kip->list);
67511- module_free(NULL, kip->insns);
67512+ module_free_exec(NULL, kip->insns);
67513 kfree(kip);
67514 }
67515 return 1;
67516@@ -1189,7 +1189,7 @@ static int __init init_kprobes(void)
67517 {
67518 int i, err = 0;
67519 unsigned long offset = 0, size = 0;
67520- char *modname, namebuf[128];
67521+ char *modname, namebuf[KSYM_NAME_LEN];
67522 const char *symbol_name;
67523 void *addr;
67524 struct kprobe_blackpoint *kb;
67525@@ -1304,7 +1304,7 @@ static int __kprobes show_kprobe_addr(st
67526 const char *sym = NULL;
67527 unsigned int i = *(loff_t *) v;
67528 unsigned long offset = 0;
67529- char *modname, namebuf[128];
67530+ char *modname, namebuf[KSYM_NAME_LEN];
67531
67532 head = &kprobe_table[i];
67533 preempt_disable();
67534diff -urNp linux-2.6.32.48/kernel/lockdep.c linux-2.6.32.48/kernel/lockdep.c
67535--- linux-2.6.32.48/kernel/lockdep.c 2011-11-08 19:02:43.000000000 -0500
67536+++ linux-2.6.32.48/kernel/lockdep.c 2011-11-15 19:59:43.000000000 -0500
67537@@ -421,20 +421,20 @@ static struct stack_trace lockdep_init_t
67538 /*
67539 * Various lockdep statistics:
67540 */
67541-atomic_t chain_lookup_hits;
67542-atomic_t chain_lookup_misses;
67543-atomic_t hardirqs_on_events;
67544-atomic_t hardirqs_off_events;
67545-atomic_t redundant_hardirqs_on;
67546-atomic_t redundant_hardirqs_off;
67547-atomic_t softirqs_on_events;
67548-atomic_t softirqs_off_events;
67549-atomic_t redundant_softirqs_on;
67550-atomic_t redundant_softirqs_off;
67551-atomic_t nr_unused_locks;
67552-atomic_t nr_cyclic_checks;
67553-atomic_t nr_find_usage_forwards_checks;
67554-atomic_t nr_find_usage_backwards_checks;
67555+atomic_unchecked_t chain_lookup_hits;
67556+atomic_unchecked_t chain_lookup_misses;
67557+atomic_unchecked_t hardirqs_on_events;
67558+atomic_unchecked_t hardirqs_off_events;
67559+atomic_unchecked_t redundant_hardirqs_on;
67560+atomic_unchecked_t redundant_hardirqs_off;
67561+atomic_unchecked_t softirqs_on_events;
67562+atomic_unchecked_t softirqs_off_events;
67563+atomic_unchecked_t redundant_softirqs_on;
67564+atomic_unchecked_t redundant_softirqs_off;
67565+atomic_unchecked_t nr_unused_locks;
67566+atomic_unchecked_t nr_cyclic_checks;
67567+atomic_unchecked_t nr_find_usage_forwards_checks;
67568+atomic_unchecked_t nr_find_usage_backwards_checks;
67569 #endif
67570
67571 /*
67572@@ -577,6 +577,10 @@ static int static_obj(void *obj)
67573 int i;
67574 #endif
67575
67576+#ifdef CONFIG_PAX_KERNEXEC
67577+ start = ktla_ktva(start);
67578+#endif
67579+
67580 /*
67581 * static variable?
67582 */
67583@@ -592,8 +596,7 @@ static int static_obj(void *obj)
67584 */
67585 for_each_possible_cpu(i) {
67586 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
67587- end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
67588- + per_cpu_offset(i);
67589+ end = start + PERCPU_ENOUGH_ROOM;
67590
67591 if ((addr >= start) && (addr < end))
67592 return 1;
67593@@ -710,6 +713,7 @@ register_lock_class(struct lockdep_map *
67594 if (!static_obj(lock->key)) {
67595 debug_locks_off();
67596 printk("INFO: trying to register non-static key.\n");
67597+ printk("lock:%pS key:%pS.\n", lock, lock->key);
67598 printk("the code is fine but needs lockdep annotation.\n");
67599 printk("turning off the locking correctness validator.\n");
67600 dump_stack();
67601@@ -2751,7 +2755,7 @@ static int __lock_acquire(struct lockdep
67602 if (!class)
67603 return 0;
67604 }
67605- debug_atomic_inc((atomic_t *)&class->ops);
67606+ debug_atomic_inc((atomic_unchecked_t *)&class->ops);
67607 if (very_verbose(class)) {
67608 printk("\nacquire class [%p] %s", class->key, class->name);
67609 if (class->name_version > 1)
67610diff -urNp linux-2.6.32.48/kernel/lockdep_internals.h linux-2.6.32.48/kernel/lockdep_internals.h
67611--- linux-2.6.32.48/kernel/lockdep_internals.h 2011-11-08 19:02:43.000000000 -0500
67612+++ linux-2.6.32.48/kernel/lockdep_internals.h 2011-11-15 19:59:43.000000000 -0500
67613@@ -113,26 +113,26 @@ lockdep_count_backward_deps(struct lock_
67614 /*
67615 * Various lockdep statistics:
67616 */
67617-extern atomic_t chain_lookup_hits;
67618-extern atomic_t chain_lookup_misses;
67619-extern atomic_t hardirqs_on_events;
67620-extern atomic_t hardirqs_off_events;
67621-extern atomic_t redundant_hardirqs_on;
67622-extern atomic_t redundant_hardirqs_off;
67623-extern atomic_t softirqs_on_events;
67624-extern atomic_t softirqs_off_events;
67625-extern atomic_t redundant_softirqs_on;
67626-extern atomic_t redundant_softirqs_off;
67627-extern atomic_t nr_unused_locks;
67628-extern atomic_t nr_cyclic_checks;
67629-extern atomic_t nr_cyclic_check_recursions;
67630-extern atomic_t nr_find_usage_forwards_checks;
67631-extern atomic_t nr_find_usage_forwards_recursions;
67632-extern atomic_t nr_find_usage_backwards_checks;
67633-extern atomic_t nr_find_usage_backwards_recursions;
67634-# define debug_atomic_inc(ptr) atomic_inc(ptr)
67635-# define debug_atomic_dec(ptr) atomic_dec(ptr)
67636-# define debug_atomic_read(ptr) atomic_read(ptr)
67637+extern atomic_unchecked_t chain_lookup_hits;
67638+extern atomic_unchecked_t chain_lookup_misses;
67639+extern atomic_unchecked_t hardirqs_on_events;
67640+extern atomic_unchecked_t hardirqs_off_events;
67641+extern atomic_unchecked_t redundant_hardirqs_on;
67642+extern atomic_unchecked_t redundant_hardirqs_off;
67643+extern atomic_unchecked_t softirqs_on_events;
67644+extern atomic_unchecked_t softirqs_off_events;
67645+extern atomic_unchecked_t redundant_softirqs_on;
67646+extern atomic_unchecked_t redundant_softirqs_off;
67647+extern atomic_unchecked_t nr_unused_locks;
67648+extern atomic_unchecked_t nr_cyclic_checks;
67649+extern atomic_unchecked_t nr_cyclic_check_recursions;
67650+extern atomic_unchecked_t nr_find_usage_forwards_checks;
67651+extern atomic_unchecked_t nr_find_usage_forwards_recursions;
67652+extern atomic_unchecked_t nr_find_usage_backwards_checks;
67653+extern atomic_unchecked_t nr_find_usage_backwards_recursions;
67654+# define debug_atomic_inc(ptr) atomic_inc_unchecked(ptr)
67655+# define debug_atomic_dec(ptr) atomic_dec_unchecked(ptr)
67656+# define debug_atomic_read(ptr) atomic_read_unchecked(ptr)
67657 #else
67658 # define debug_atomic_inc(ptr) do { } while (0)
67659 # define debug_atomic_dec(ptr) do { } while (0)
67660diff -urNp linux-2.6.32.48/kernel/lockdep_proc.c linux-2.6.32.48/kernel/lockdep_proc.c
67661--- linux-2.6.32.48/kernel/lockdep_proc.c 2011-11-08 19:02:43.000000000 -0500
67662+++ linux-2.6.32.48/kernel/lockdep_proc.c 2011-11-15 19:59:43.000000000 -0500
67663@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
67664
67665 static void print_name(struct seq_file *m, struct lock_class *class)
67666 {
67667- char str[128];
67668+ char str[KSYM_NAME_LEN];
67669 const char *name = class->name;
67670
67671 if (!name) {
67672diff -urNp linux-2.6.32.48/kernel/module.c linux-2.6.32.48/kernel/module.c
67673--- linux-2.6.32.48/kernel/module.c 2011-11-08 19:02:43.000000000 -0500
67674+++ linux-2.6.32.48/kernel/module.c 2011-11-15 19:59:43.000000000 -0500
67675@@ -55,6 +55,7 @@
67676 #include <linux/async.h>
67677 #include <linux/percpu.h>
67678 #include <linux/kmemleak.h>
67679+#include <linux/grsecurity.h>
67680
67681 #define CREATE_TRACE_POINTS
67682 #include <trace/events/module.h>
67683@@ -89,7 +90,8 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq
67684 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
67685
67686 /* Bounds of module allocation, for speeding __module_address */
67687-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
67688+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
67689+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
67690
67691 int register_module_notifier(struct notifier_block * nb)
67692 {
67693@@ -245,7 +247,7 @@ bool each_symbol(bool (*fn)(const struct
67694 return true;
67695
67696 list_for_each_entry_rcu(mod, &modules, list) {
67697- struct symsearch arr[] = {
67698+ struct symsearch modarr[] = {
67699 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
67700 NOT_GPL_ONLY, false },
67701 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
67702@@ -267,7 +269,7 @@ bool each_symbol(bool (*fn)(const struct
67703 #endif
67704 };
67705
67706- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
67707+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
67708 return true;
67709 }
67710 return false;
67711@@ -442,7 +444,7 @@ static void *percpu_modalloc(unsigned lo
67712 void *ptr;
67713 int cpu;
67714
67715- if (align > PAGE_SIZE) {
67716+ if (align-1 >= PAGE_SIZE) {
67717 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
67718 name, align, PAGE_SIZE);
67719 align = PAGE_SIZE;
67720@@ -1158,7 +1160,7 @@ static const struct kernel_symbol *resol
67721 * /sys/module/foo/sections stuff
67722 * J. Corbet <corbet@lwn.net>
67723 */
67724-#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS)
67725+#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
67726
67727 static inline bool sect_empty(const Elf_Shdr *sect)
67728 {
67729@@ -1545,7 +1547,8 @@ static void free_module(struct module *m
67730 destroy_params(mod->kp, mod->num_kp);
67731
67732 /* This may be NULL, but that's OK */
67733- module_free(mod, mod->module_init);
67734+ module_free(mod, mod->module_init_rw);
67735+ module_free_exec(mod, mod->module_init_rx);
67736 kfree(mod->args);
67737 if (mod->percpu)
67738 percpu_modfree(mod->percpu);
67739@@ -1554,10 +1557,12 @@ static void free_module(struct module *m
67740 percpu_modfree(mod->refptr);
67741 #endif
67742 /* Free lock-classes: */
67743- lockdep_free_key_range(mod->module_core, mod->core_size);
67744+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
67745+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
67746
67747 /* Finally, free the core (containing the module structure) */
67748- module_free(mod, mod->module_core);
67749+ module_free_exec(mod, mod->module_core_rx);
67750+ module_free(mod, mod->module_core_rw);
67751
67752 #ifdef CONFIG_MPU
67753 update_protections(current->mm);
67754@@ -1628,8 +1633,32 @@ static int simplify_symbols(Elf_Shdr *se
67755 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
67756 int ret = 0;
67757 const struct kernel_symbol *ksym;
67758+#ifdef CONFIG_GRKERNSEC_MODHARDEN
67759+ int is_fs_load = 0;
67760+ int register_filesystem_found = 0;
67761+ char *p;
67762+
67763+ p = strstr(mod->args, "grsec_modharden_fs");
67764+
67765+ if (p) {
67766+ char *endptr = p + strlen("grsec_modharden_fs");
67767+ /* copy \0 as well */
67768+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
67769+ is_fs_load = 1;
67770+ }
67771+#endif
67772+
67773
67774 for (i = 1; i < n; i++) {
67775+#ifdef CONFIG_GRKERNSEC_MODHARDEN
67776+ const char *name = strtab + sym[i].st_name;
67777+
67778+ /* it's a real shame this will never get ripped and copied
67779+ upstream! ;(
67780+ */
67781+ if (is_fs_load && !strcmp(name, "register_filesystem"))
67782+ register_filesystem_found = 1;
67783+#endif
67784 switch (sym[i].st_shndx) {
67785 case SHN_COMMON:
67786 /* We compiled with -fno-common. These are not
67787@@ -1651,7 +1680,9 @@ static int simplify_symbols(Elf_Shdr *se
67788 strtab + sym[i].st_name, mod);
67789 /* Ok if resolved. */
67790 if (ksym) {
67791+ pax_open_kernel();
67792 sym[i].st_value = ksym->value;
67793+ pax_close_kernel();
67794 break;
67795 }
67796
67797@@ -1670,11 +1701,20 @@ static int simplify_symbols(Elf_Shdr *se
67798 secbase = (unsigned long)mod->percpu;
67799 else
67800 secbase = sechdrs[sym[i].st_shndx].sh_addr;
67801+ pax_open_kernel();
67802 sym[i].st_value += secbase;
67803+ pax_close_kernel();
67804 break;
67805 }
67806 }
67807
67808+#ifdef CONFIG_GRKERNSEC_MODHARDEN
67809+ if (is_fs_load && !register_filesystem_found) {
67810+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
67811+ ret = -EPERM;
67812+ }
67813+#endif
67814+
67815 return ret;
67816 }
67817
67818@@ -1731,11 +1771,12 @@ static void layout_sections(struct modul
67819 || s->sh_entsize != ~0UL
67820 || strstarts(secstrings + s->sh_name, ".init"))
67821 continue;
67822- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
67823+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
67824+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
67825+ else
67826+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
67827 DEBUGP("\t%s\n", secstrings + s->sh_name);
67828 }
67829- if (m == 0)
67830- mod->core_text_size = mod->core_size;
67831 }
67832
67833 DEBUGP("Init section allocation order:\n");
67834@@ -1748,12 +1789,13 @@ static void layout_sections(struct modul
67835 || s->sh_entsize != ~0UL
67836 || !strstarts(secstrings + s->sh_name, ".init"))
67837 continue;
67838- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
67839- | INIT_OFFSET_MASK);
67840+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
67841+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
67842+ else
67843+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
67844+ s->sh_entsize |= INIT_OFFSET_MASK;
67845 DEBUGP("\t%s\n", secstrings + s->sh_name);
67846 }
67847- if (m == 0)
67848- mod->init_text_size = mod->init_size;
67849 }
67850 }
67851
67852@@ -1857,9 +1899,8 @@ static int is_exported(const char *name,
67853
67854 /* As per nm */
67855 static char elf_type(const Elf_Sym *sym,
67856- Elf_Shdr *sechdrs,
67857- const char *secstrings,
67858- struct module *mod)
67859+ const Elf_Shdr *sechdrs,
67860+ const char *secstrings)
67861 {
67862 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
67863 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
67864@@ -1934,7 +1975,7 @@ static unsigned long layout_symtab(struc
67865
67866 /* Put symbol section at end of init part of module. */
67867 symsect->sh_flags |= SHF_ALLOC;
67868- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
67869+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
67870 symindex) | INIT_OFFSET_MASK;
67871 DEBUGP("\t%s\n", secstrings + symsect->sh_name);
67872
67873@@ -1951,19 +1992,19 @@ static unsigned long layout_symtab(struc
67874 }
67875
67876 /* Append room for core symbols at end of core part. */
67877- symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
67878- mod->core_size = symoffs + ndst * sizeof(Elf_Sym);
67879+ symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
67880+ mod->core_size_rx = symoffs + ndst * sizeof(Elf_Sym);
67881
67882 /* Put string table section at end of init part of module. */
67883 strsect->sh_flags |= SHF_ALLOC;
67884- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
67885+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
67886 strindex) | INIT_OFFSET_MASK;
67887 DEBUGP("\t%s\n", secstrings + strsect->sh_name);
67888
67889 /* Append room for core symbols' strings at end of core part. */
67890- *pstroffs = mod->core_size;
67891+ *pstroffs = mod->core_size_rx;
67892 __set_bit(0, strmap);
67893- mod->core_size += bitmap_weight(strmap, strsect->sh_size);
67894+ mod->core_size_rx += bitmap_weight(strmap, strsect->sh_size);
67895
67896 return symoffs;
67897 }
67898@@ -1987,12 +2028,14 @@ static void add_kallsyms(struct module *
67899 mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
67900 mod->strtab = (void *)sechdrs[strindex].sh_addr;
67901
67902+ pax_open_kernel();
67903+
67904 /* Set types up while we still have access to sections. */
67905 for (i = 0; i < mod->num_symtab; i++)
67906 mod->symtab[i].st_info
67907- = elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
67908+ = elf_type(&mod->symtab[i], sechdrs, secstrings);
67909
67910- mod->core_symtab = dst = mod->module_core + symoffs;
67911+ mod->core_symtab = dst = mod->module_core_rx + symoffs;
67912 src = mod->symtab;
67913 *dst = *src;
67914 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
67915@@ -2004,10 +2047,12 @@ static void add_kallsyms(struct module *
67916 }
67917 mod->core_num_syms = ndst;
67918
67919- mod->core_strtab = s = mod->module_core + stroffs;
67920+ mod->core_strtab = s = mod->module_core_rx + stroffs;
67921 for (*s = 0, i = 1; i < sechdrs[strindex].sh_size; ++i)
67922 if (test_bit(i, strmap))
67923 *++s = mod->strtab[i];
67924+
67925+ pax_close_kernel();
67926 }
67927 #else
67928 static inline unsigned long layout_symtab(struct module *mod,
67929@@ -2044,16 +2089,30 @@ static void dynamic_debug_setup(struct _
67930 #endif
67931 }
67932
67933-static void *module_alloc_update_bounds(unsigned long size)
67934+static void *module_alloc_update_bounds_rw(unsigned long size)
67935 {
67936 void *ret = module_alloc(size);
67937
67938 if (ret) {
67939 /* Update module bounds. */
67940- if ((unsigned long)ret < module_addr_min)
67941- module_addr_min = (unsigned long)ret;
67942- if ((unsigned long)ret + size > module_addr_max)
67943- module_addr_max = (unsigned long)ret + size;
67944+ if ((unsigned long)ret < module_addr_min_rw)
67945+ module_addr_min_rw = (unsigned long)ret;
67946+ if ((unsigned long)ret + size > module_addr_max_rw)
67947+ module_addr_max_rw = (unsigned long)ret + size;
67948+ }
67949+ return ret;
67950+}
67951+
67952+static void *module_alloc_update_bounds_rx(unsigned long size)
67953+{
67954+ void *ret = module_alloc_exec(size);
67955+
67956+ if (ret) {
67957+ /* Update module bounds. */
67958+ if ((unsigned long)ret < module_addr_min_rx)
67959+ module_addr_min_rx = (unsigned long)ret;
67960+ if ((unsigned long)ret + size > module_addr_max_rx)
67961+ module_addr_max_rx = (unsigned long)ret + size;
67962 }
67963 return ret;
67964 }
67965@@ -2065,8 +2124,8 @@ static void kmemleak_load_module(struct
67966 unsigned int i;
67967
67968 /* only scan the sections containing data */
67969- kmemleak_scan_area(mod->module_core, (unsigned long)mod -
67970- (unsigned long)mod->module_core,
67971+ kmemleak_scan_area(mod->module_core_rw, (unsigned long)mod -
67972+ (unsigned long)mod->module_core_rw,
67973 sizeof(struct module), GFP_KERNEL);
67974
67975 for (i = 1; i < hdr->e_shnum; i++) {
67976@@ -2076,8 +2135,8 @@ static void kmemleak_load_module(struct
67977 && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
67978 continue;
67979
67980- kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
67981- (unsigned long)mod->module_core,
67982+ kmemleak_scan_area(mod->module_core_rw, sechdrs[i].sh_addr -
67983+ (unsigned long)mod->module_core_rw,
67984 sechdrs[i].sh_size, GFP_KERNEL);
67985 }
67986 }
67987@@ -2263,7 +2322,7 @@ static noinline struct module *load_modu
67988 secstrings, &stroffs, strmap);
67989
67990 /* Do the allocs. */
67991- ptr = module_alloc_update_bounds(mod->core_size);
67992+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
67993 /*
67994 * The pointer to this block is stored in the module structure
67995 * which is inside the block. Just mark it as not being a
67996@@ -2274,23 +2333,47 @@ static noinline struct module *load_modu
67997 err = -ENOMEM;
67998 goto free_percpu;
67999 }
68000- memset(ptr, 0, mod->core_size);
68001- mod->module_core = ptr;
68002+ memset(ptr, 0, mod->core_size_rw);
68003+ mod->module_core_rw = ptr;
68004
68005- ptr = module_alloc_update_bounds(mod->init_size);
68006+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
68007 /*
68008 * The pointer to this block is stored in the module structure
68009 * which is inside the block. This block doesn't need to be
68010 * scanned as it contains data and code that will be freed
68011 * after the module is initialized.
68012 */
68013- kmemleak_ignore(ptr);
68014- if (!ptr && mod->init_size) {
68015+ kmemleak_not_leak(ptr);
68016+ if (!ptr && mod->init_size_rw) {
68017+ err = -ENOMEM;
68018+ goto free_core_rw;
68019+ }
68020+ memset(ptr, 0, mod->init_size_rw);
68021+ mod->module_init_rw = ptr;
68022+
68023+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
68024+ kmemleak_not_leak(ptr);
68025+ if (!ptr) {
68026 err = -ENOMEM;
68027- goto free_core;
68028+ goto free_init_rw;
68029 }
68030- memset(ptr, 0, mod->init_size);
68031- mod->module_init = ptr;
68032+
68033+ pax_open_kernel();
68034+ memset(ptr, 0, mod->core_size_rx);
68035+ pax_close_kernel();
68036+ mod->module_core_rx = ptr;
68037+
68038+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
68039+ kmemleak_not_leak(ptr);
68040+ if (!ptr && mod->init_size_rx) {
68041+ err = -ENOMEM;
68042+ goto free_core_rx;
68043+ }
68044+
68045+ pax_open_kernel();
68046+ memset(ptr, 0, mod->init_size_rx);
68047+ pax_close_kernel();
68048+ mod->module_init_rx = ptr;
68049
68050 /* Transfer each section which specifies SHF_ALLOC */
68051 DEBUGP("final section addresses:\n");
68052@@ -2300,17 +2383,45 @@ static noinline struct module *load_modu
68053 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
68054 continue;
68055
68056- if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK)
68057- dest = mod->module_init
68058- + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
68059- else
68060- dest = mod->module_core + sechdrs[i].sh_entsize;
68061+ if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) {
68062+ if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
68063+ dest = mod->module_init_rw
68064+ + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
68065+ else
68066+ dest = mod->module_init_rx
68067+ + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
68068+ } else {
68069+ if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
68070+ dest = mod->module_core_rw + sechdrs[i].sh_entsize;
68071+ else
68072+ dest = mod->module_core_rx + sechdrs[i].sh_entsize;
68073+ }
68074+
68075+ if (sechdrs[i].sh_type != SHT_NOBITS) {
68076
68077- if (sechdrs[i].sh_type != SHT_NOBITS)
68078- memcpy(dest, (void *)sechdrs[i].sh_addr,
68079- sechdrs[i].sh_size);
68080+#ifdef CONFIG_PAX_KERNEXEC
68081+#ifdef CONFIG_X86_64
68082+ if ((sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_EXECINSTR))
68083+ set_memory_x((unsigned long)dest, (sechdrs[i].sh_size + PAGE_SIZE) >> PAGE_SHIFT);
68084+#endif
68085+ if (!(sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_ALLOC)) {
68086+ pax_open_kernel();
68087+ memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
68088+ pax_close_kernel();
68089+ } else
68090+#endif
68091+
68092+ memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
68093+ }
68094 /* Update sh_addr to point to copy in image. */
68095- sechdrs[i].sh_addr = (unsigned long)dest;
68096+
68097+#ifdef CONFIG_PAX_KERNEXEC
68098+ if (sechdrs[i].sh_flags & SHF_EXECINSTR)
68099+ sechdrs[i].sh_addr = ktva_ktla((unsigned long)dest);
68100+ else
68101+#endif
68102+
68103+ sechdrs[i].sh_addr = (unsigned long)dest;
68104 DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name);
68105 }
68106 /* Module has been moved. */
68107@@ -2322,7 +2433,7 @@ static noinline struct module *load_modu
68108 mod->name);
68109 if (!mod->refptr) {
68110 err = -ENOMEM;
68111- goto free_init;
68112+ goto free_init_rx;
68113 }
68114 #endif
68115 /* Now we've moved module, initialize linked lists, etc. */
68116@@ -2351,6 +2462,31 @@ static noinline struct module *load_modu
68117 /* Set up MODINFO_ATTR fields */
68118 setup_modinfo(mod, sechdrs, infoindex);
68119
68120+ mod->args = args;
68121+
68122+#ifdef CONFIG_GRKERNSEC_MODHARDEN
68123+ {
68124+ char *p, *p2;
68125+
68126+ if (strstr(mod->args, "grsec_modharden_netdev")) {
68127+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
68128+ err = -EPERM;
68129+ goto cleanup;
68130+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
68131+ p += strlen("grsec_modharden_normal");
68132+ p2 = strstr(p, "_");
68133+ if (p2) {
68134+ *p2 = '\0';
68135+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
68136+ *p2 = '_';
68137+ }
68138+ err = -EPERM;
68139+ goto cleanup;
68140+ }
68141+ }
68142+#endif
68143+
68144+
68145 /* Fix up syms, so that st_value is a pointer to location. */
68146 err = simplify_symbols(sechdrs, symindex, strtab, versindex, pcpuindex,
68147 mod);
68148@@ -2431,8 +2567,8 @@ static noinline struct module *load_modu
68149
68150 /* Now do relocations. */
68151 for (i = 1; i < hdr->e_shnum; i++) {
68152- const char *strtab = (char *)sechdrs[strindex].sh_addr;
68153 unsigned int info = sechdrs[i].sh_info;
68154+ strtab = (char *)sechdrs[strindex].sh_addr;
68155
68156 /* Not a valid relocation section? */
68157 if (info >= hdr->e_shnum)
68158@@ -2493,16 +2629,15 @@ static noinline struct module *load_modu
68159 * Do it before processing of module parameters, so the module
68160 * can provide parameter accessor functions of its own.
68161 */
68162- if (mod->module_init)
68163- flush_icache_range((unsigned long)mod->module_init,
68164- (unsigned long)mod->module_init
68165- + mod->init_size);
68166- flush_icache_range((unsigned long)mod->module_core,
68167- (unsigned long)mod->module_core + mod->core_size);
68168+ if (mod->module_init_rx)
68169+ flush_icache_range((unsigned long)mod->module_init_rx,
68170+ (unsigned long)mod->module_init_rx
68171+ + mod->init_size_rx);
68172+ flush_icache_range((unsigned long)mod->module_core_rx,
68173+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
68174
68175 set_fs(old_fs);
68176
68177- mod->args = args;
68178 if (section_addr(hdr, sechdrs, secstrings, "__obsparm"))
68179 printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
68180 mod->name);
68181@@ -2546,12 +2681,16 @@ static noinline struct module *load_modu
68182 free_unload:
68183 module_unload_free(mod);
68184 #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
68185+ free_init_rx:
68186 percpu_modfree(mod->refptr);
68187- free_init:
68188 #endif
68189- module_free(mod, mod->module_init);
68190- free_core:
68191- module_free(mod, mod->module_core);
68192+ module_free_exec(mod, mod->module_init_rx);
68193+ free_core_rx:
68194+ module_free_exec(mod, mod->module_core_rx);
68195+ free_init_rw:
68196+ module_free(mod, mod->module_init_rw);
68197+ free_core_rw:
68198+ module_free(mod, mod->module_core_rw);
68199 /* mod will be freed with core. Don't access it beyond this line! */
68200 free_percpu:
68201 if (percpu)
68202@@ -2653,10 +2792,12 @@ SYSCALL_DEFINE3(init_module, void __user
68203 mod->symtab = mod->core_symtab;
68204 mod->strtab = mod->core_strtab;
68205 #endif
68206- module_free(mod, mod->module_init);
68207- mod->module_init = NULL;
68208- mod->init_size = 0;
68209- mod->init_text_size = 0;
68210+ module_free(mod, mod->module_init_rw);
68211+ module_free_exec(mod, mod->module_init_rx);
68212+ mod->module_init_rw = NULL;
68213+ mod->module_init_rx = NULL;
68214+ mod->init_size_rw = 0;
68215+ mod->init_size_rx = 0;
68216 mutex_unlock(&module_mutex);
68217
68218 return 0;
68219@@ -2687,10 +2828,16 @@ static const char *get_ksymbol(struct mo
68220 unsigned long nextval;
68221
68222 /* At worse, next value is at end of module */
68223- if (within_module_init(addr, mod))
68224- nextval = (unsigned long)mod->module_init+mod->init_text_size;
68225+ if (within_module_init_rx(addr, mod))
68226+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
68227+ else if (within_module_init_rw(addr, mod))
68228+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
68229+ else if (within_module_core_rx(addr, mod))
68230+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
68231+ else if (within_module_core_rw(addr, mod))
68232+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
68233 else
68234- nextval = (unsigned long)mod->module_core+mod->core_text_size;
68235+ return NULL;
68236
68237 /* Scan for closest preceeding symbol, and next symbol. (ELF
68238 starts real symbols at 1). */
68239@@ -2936,7 +3083,7 @@ static int m_show(struct seq_file *m, vo
68240 char buf[8];
68241
68242 seq_printf(m, "%s %u",
68243- mod->name, mod->init_size + mod->core_size);
68244+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
68245 print_unload_info(m, mod);
68246
68247 /* Informative for users. */
68248@@ -2945,7 +3092,7 @@ static int m_show(struct seq_file *m, vo
68249 mod->state == MODULE_STATE_COMING ? "Loading":
68250 "Live");
68251 /* Used by oprofile and other similar tools. */
68252- seq_printf(m, " 0x%p", mod->module_core);
68253+ seq_printf(m, " 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
68254
68255 /* Taints info */
68256 if (mod->taints)
68257@@ -2981,7 +3128,17 @@ static const struct file_operations proc
68258
68259 static int __init proc_modules_init(void)
68260 {
68261+#ifndef CONFIG_GRKERNSEC_HIDESYM
68262+#ifdef CONFIG_GRKERNSEC_PROC_USER
68263+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
68264+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68265+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
68266+#else
68267 proc_create("modules", 0, NULL, &proc_modules_operations);
68268+#endif
68269+#else
68270+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
68271+#endif
68272 return 0;
68273 }
68274 module_init(proc_modules_init);
68275@@ -3040,12 +3197,12 @@ struct module *__module_address(unsigned
68276 {
68277 struct module *mod;
68278
68279- if (addr < module_addr_min || addr > module_addr_max)
68280+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
68281+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
68282 return NULL;
68283
68284 list_for_each_entry_rcu(mod, &modules, list)
68285- if (within_module_core(addr, mod)
68286- || within_module_init(addr, mod))
68287+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
68288 return mod;
68289 return NULL;
68290 }
68291@@ -3079,11 +3236,20 @@ bool is_module_text_address(unsigned lon
68292 */
68293 struct module *__module_text_address(unsigned long addr)
68294 {
68295- struct module *mod = __module_address(addr);
68296+ struct module *mod;
68297+
68298+#ifdef CONFIG_X86_32
68299+ addr = ktla_ktva(addr);
68300+#endif
68301+
68302+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
68303+ return NULL;
68304+
68305+ mod = __module_address(addr);
68306+
68307 if (mod) {
68308 /* Make sure it's within the text section. */
68309- if (!within(addr, mod->module_init, mod->init_text_size)
68310- && !within(addr, mod->module_core, mod->core_text_size))
68311+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
68312 mod = NULL;
68313 }
68314 return mod;
68315diff -urNp linux-2.6.32.48/kernel/mutex.c linux-2.6.32.48/kernel/mutex.c
68316--- linux-2.6.32.48/kernel/mutex.c 2011-11-08 19:02:43.000000000 -0500
68317+++ linux-2.6.32.48/kernel/mutex.c 2011-11-15 19:59:43.000000000 -0500
68318@@ -169,7 +169,7 @@ __mutex_lock_common(struct mutex *lock,
68319 */
68320
68321 for (;;) {
68322- struct thread_info *owner;
68323+ struct task_struct *owner;
68324
68325 /*
68326 * If we own the BKL, then don't spin. The owner of
68327@@ -214,7 +214,7 @@ __mutex_lock_common(struct mutex *lock,
68328 spin_lock_mutex(&lock->wait_lock, flags);
68329
68330 debug_mutex_lock_common(lock, &waiter);
68331- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
68332+ debug_mutex_add_waiter(lock, &waiter, task);
68333
68334 /* add waiting tasks to the end of the waitqueue (FIFO): */
68335 list_add_tail(&waiter.list, &lock->wait_list);
68336@@ -243,8 +243,7 @@ __mutex_lock_common(struct mutex *lock,
68337 * TASK_UNINTERRUPTIBLE case.)
68338 */
68339 if (unlikely(signal_pending_state(state, task))) {
68340- mutex_remove_waiter(lock, &waiter,
68341- task_thread_info(task));
68342+ mutex_remove_waiter(lock, &waiter, task);
68343 mutex_release(&lock->dep_map, 1, ip);
68344 spin_unlock_mutex(&lock->wait_lock, flags);
68345
68346@@ -265,7 +264,7 @@ __mutex_lock_common(struct mutex *lock,
68347 done:
68348 lock_acquired(&lock->dep_map, ip);
68349 /* got the lock - rejoice! */
68350- mutex_remove_waiter(lock, &waiter, current_thread_info());
68351+ mutex_remove_waiter(lock, &waiter, task);
68352 mutex_set_owner(lock);
68353
68354 /* set it to 0 if there are no waiters left: */
68355diff -urNp linux-2.6.32.48/kernel/mutex-debug.c linux-2.6.32.48/kernel/mutex-debug.c
68356--- linux-2.6.32.48/kernel/mutex-debug.c 2011-11-08 19:02:43.000000000 -0500
68357+++ linux-2.6.32.48/kernel/mutex-debug.c 2011-11-15 19:59:43.000000000 -0500
68358@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
68359 }
68360
68361 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
68362- struct thread_info *ti)
68363+ struct task_struct *task)
68364 {
68365 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
68366
68367 /* Mark the current thread as blocked on the lock: */
68368- ti->task->blocked_on = waiter;
68369+ task->blocked_on = waiter;
68370 }
68371
68372 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
68373- struct thread_info *ti)
68374+ struct task_struct *task)
68375 {
68376 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
68377- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
68378- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
68379- ti->task->blocked_on = NULL;
68380+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
68381+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
68382+ task->blocked_on = NULL;
68383
68384 list_del_init(&waiter->list);
68385 waiter->task = NULL;
68386@@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lo
68387 return;
68388
68389 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
68390- DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
68391+ DEBUG_LOCKS_WARN_ON(lock->owner != current);
68392 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
68393 mutex_clear_owner(lock);
68394 }
68395diff -urNp linux-2.6.32.48/kernel/mutex-debug.h linux-2.6.32.48/kernel/mutex-debug.h
68396--- linux-2.6.32.48/kernel/mutex-debug.h 2011-11-08 19:02:43.000000000 -0500
68397+++ linux-2.6.32.48/kernel/mutex-debug.h 2011-11-15 19:59:43.000000000 -0500
68398@@ -20,16 +20,16 @@ extern void debug_mutex_wake_waiter(stru
68399 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
68400 extern void debug_mutex_add_waiter(struct mutex *lock,
68401 struct mutex_waiter *waiter,
68402- struct thread_info *ti);
68403+ struct task_struct *task);
68404 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
68405- struct thread_info *ti);
68406+ struct task_struct *task);
68407 extern void debug_mutex_unlock(struct mutex *lock);
68408 extern void debug_mutex_init(struct mutex *lock, const char *name,
68409 struct lock_class_key *key);
68410
68411 static inline void mutex_set_owner(struct mutex *lock)
68412 {
68413- lock->owner = current_thread_info();
68414+ lock->owner = current;
68415 }
68416
68417 static inline void mutex_clear_owner(struct mutex *lock)
68418diff -urNp linux-2.6.32.48/kernel/mutex.h linux-2.6.32.48/kernel/mutex.h
68419--- linux-2.6.32.48/kernel/mutex.h 2011-11-08 19:02:43.000000000 -0500
68420+++ linux-2.6.32.48/kernel/mutex.h 2011-11-15 19:59:43.000000000 -0500
68421@@ -19,7 +19,7 @@
68422 #ifdef CONFIG_SMP
68423 static inline void mutex_set_owner(struct mutex *lock)
68424 {
68425- lock->owner = current_thread_info();
68426+ lock->owner = current;
68427 }
68428
68429 static inline void mutex_clear_owner(struct mutex *lock)
68430diff -urNp linux-2.6.32.48/kernel/panic.c linux-2.6.32.48/kernel/panic.c
68431--- linux-2.6.32.48/kernel/panic.c 2011-11-08 19:02:43.000000000 -0500
68432+++ linux-2.6.32.48/kernel/panic.c 2011-11-15 19:59:43.000000000 -0500
68433@@ -352,7 +352,7 @@ static void warn_slowpath_common(const c
68434 const char *board;
68435
68436 printk(KERN_WARNING "------------[ cut here ]------------\n");
68437- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
68438+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
68439 board = dmi_get_system_info(DMI_PRODUCT_NAME);
68440 if (board)
68441 printk(KERN_WARNING "Hardware name: %s\n", board);
68442@@ -392,7 +392,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
68443 */
68444 void __stack_chk_fail(void)
68445 {
68446- panic("stack-protector: Kernel stack is corrupted in: %p\n",
68447+ dump_stack();
68448+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
68449 __builtin_return_address(0));
68450 }
68451 EXPORT_SYMBOL(__stack_chk_fail);
68452diff -urNp linux-2.6.32.48/kernel/params.c linux-2.6.32.48/kernel/params.c
68453--- linux-2.6.32.48/kernel/params.c 2011-11-08 19:02:43.000000000 -0500
68454+++ linux-2.6.32.48/kernel/params.c 2011-11-15 19:59:43.000000000 -0500
68455@@ -725,7 +725,7 @@ static ssize_t module_attr_store(struct
68456 return ret;
68457 }
68458
68459-static struct sysfs_ops module_sysfs_ops = {
68460+static const struct sysfs_ops module_sysfs_ops = {
68461 .show = module_attr_show,
68462 .store = module_attr_store,
68463 };
68464@@ -739,7 +739,7 @@ static int uevent_filter(struct kset *ks
68465 return 0;
68466 }
68467
68468-static struct kset_uevent_ops module_uevent_ops = {
68469+static const struct kset_uevent_ops module_uevent_ops = {
68470 .filter = uevent_filter,
68471 };
68472
68473diff -urNp linux-2.6.32.48/kernel/perf_event.c linux-2.6.32.48/kernel/perf_event.c
68474--- linux-2.6.32.48/kernel/perf_event.c 2011-11-08 19:02:43.000000000 -0500
68475+++ linux-2.6.32.48/kernel/perf_event.c 2011-11-15 19:59:43.000000000 -0500
68476@@ -77,7 +77,7 @@ int sysctl_perf_event_mlock __read_mostl
68477 */
68478 int sysctl_perf_event_sample_rate __read_mostly = 100000;
68479
68480-static atomic64_t perf_event_id;
68481+static atomic64_unchecked_t perf_event_id;
68482
68483 /*
68484 * Lock for (sysadmin-configurable) event reservations:
68485@@ -1094,9 +1094,9 @@ static void __perf_event_sync_stat(struc
68486 * In order to keep per-task stats reliable we need to flip the event
68487 * values when we flip the contexts.
68488 */
68489- value = atomic64_read(&next_event->count);
68490- value = atomic64_xchg(&event->count, value);
68491- atomic64_set(&next_event->count, value);
68492+ value = atomic64_read_unchecked(&next_event->count);
68493+ value = atomic64_xchg_unchecked(&event->count, value);
68494+ atomic64_set_unchecked(&next_event->count, value);
68495
68496 swap(event->total_time_enabled, next_event->total_time_enabled);
68497 swap(event->total_time_running, next_event->total_time_running);
68498@@ -1552,7 +1552,7 @@ static u64 perf_event_read(struct perf_e
68499 update_event_times(event);
68500 }
68501
68502- return atomic64_read(&event->count);
68503+ return atomic64_read_unchecked(&event->count);
68504 }
68505
68506 /*
68507@@ -1790,11 +1790,11 @@ static int perf_event_read_group(struct
68508 values[n++] = 1 + leader->nr_siblings;
68509 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
68510 values[n++] = leader->total_time_enabled +
68511- atomic64_read(&leader->child_total_time_enabled);
68512+ atomic64_read_unchecked(&leader->child_total_time_enabled);
68513 }
68514 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
68515 values[n++] = leader->total_time_running +
68516- atomic64_read(&leader->child_total_time_running);
68517+ atomic64_read_unchecked(&leader->child_total_time_running);
68518 }
68519
68520 size = n * sizeof(u64);
68521@@ -1829,11 +1829,11 @@ static int perf_event_read_one(struct pe
68522 values[n++] = perf_event_read_value(event);
68523 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
68524 values[n++] = event->total_time_enabled +
68525- atomic64_read(&event->child_total_time_enabled);
68526+ atomic64_read_unchecked(&event->child_total_time_enabled);
68527 }
68528 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
68529 values[n++] = event->total_time_running +
68530- atomic64_read(&event->child_total_time_running);
68531+ atomic64_read_unchecked(&event->child_total_time_running);
68532 }
68533 if (read_format & PERF_FORMAT_ID)
68534 values[n++] = primary_event_id(event);
68535@@ -1903,7 +1903,7 @@ static unsigned int perf_poll(struct fil
68536 static void perf_event_reset(struct perf_event *event)
68537 {
68538 (void)perf_event_read(event);
68539- atomic64_set(&event->count, 0);
68540+ atomic64_set_unchecked(&event->count, 0);
68541 perf_event_update_userpage(event);
68542 }
68543
68544@@ -2079,15 +2079,15 @@ void perf_event_update_userpage(struct p
68545 ++userpg->lock;
68546 barrier();
68547 userpg->index = perf_event_index(event);
68548- userpg->offset = atomic64_read(&event->count);
68549+ userpg->offset = atomic64_read_unchecked(&event->count);
68550 if (event->state == PERF_EVENT_STATE_ACTIVE)
68551- userpg->offset -= atomic64_read(&event->hw.prev_count);
68552+ userpg->offset -= atomic64_read_unchecked(&event->hw.prev_count);
68553
68554 userpg->time_enabled = event->total_time_enabled +
68555- atomic64_read(&event->child_total_time_enabled);
68556+ atomic64_read_unchecked(&event->child_total_time_enabled);
68557
68558 userpg->time_running = event->total_time_running +
68559- atomic64_read(&event->child_total_time_running);
68560+ atomic64_read_unchecked(&event->child_total_time_running);
68561
68562 barrier();
68563 ++userpg->lock;
68564@@ -2903,14 +2903,14 @@ static void perf_output_read_one(struct
68565 u64 values[4];
68566 int n = 0;
68567
68568- values[n++] = atomic64_read(&event->count);
68569+ values[n++] = atomic64_read_unchecked(&event->count);
68570 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
68571 values[n++] = event->total_time_enabled +
68572- atomic64_read(&event->child_total_time_enabled);
68573+ atomic64_read_unchecked(&event->child_total_time_enabled);
68574 }
68575 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
68576 values[n++] = event->total_time_running +
68577- atomic64_read(&event->child_total_time_running);
68578+ atomic64_read_unchecked(&event->child_total_time_running);
68579 }
68580 if (read_format & PERF_FORMAT_ID)
68581 values[n++] = primary_event_id(event);
68582@@ -2940,7 +2940,7 @@ static void perf_output_read_group(struc
68583 if (leader != event)
68584 leader->pmu->read(leader);
68585
68586- values[n++] = atomic64_read(&leader->count);
68587+ values[n++] = atomic64_read_unchecked(&leader->count);
68588 if (read_format & PERF_FORMAT_ID)
68589 values[n++] = primary_event_id(leader);
68590
68591@@ -2952,7 +2952,7 @@ static void perf_output_read_group(struc
68592 if (sub != event)
68593 sub->pmu->read(sub);
68594
68595- values[n++] = atomic64_read(&sub->count);
68596+ values[n++] = atomic64_read_unchecked(&sub->count);
68597 if (read_format & PERF_FORMAT_ID)
68598 values[n++] = primary_event_id(sub);
68599
68600@@ -3525,12 +3525,12 @@ static void perf_event_mmap_event(struct
68601 * need to add enough zero bytes after the string to handle
68602 * the 64bit alignment we do later.
68603 */
68604- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
68605+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
68606 if (!buf) {
68607 name = strncpy(tmp, "//enomem", sizeof(tmp));
68608 goto got_name;
68609 }
68610- name = d_path(&file->f_path, buf, PATH_MAX);
68611+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
68612 if (IS_ERR(name)) {
68613 name = strncpy(tmp, "//toolong", sizeof(tmp));
68614 goto got_name;
68615@@ -3783,7 +3783,7 @@ static void perf_swevent_add(struct perf
68616 {
68617 struct hw_perf_event *hwc = &event->hw;
68618
68619- atomic64_add(nr, &event->count);
68620+ atomic64_add_unchecked(nr, &event->count);
68621
68622 if (!hwc->sample_period)
68623 return;
68624@@ -4040,9 +4040,9 @@ static void cpu_clock_perf_event_update(
68625 u64 now;
68626
68627 now = cpu_clock(cpu);
68628- prev = atomic64_read(&event->hw.prev_count);
68629- atomic64_set(&event->hw.prev_count, now);
68630- atomic64_add(now - prev, &event->count);
68631+ prev = atomic64_read_unchecked(&event->hw.prev_count);
68632+ atomic64_set_unchecked(&event->hw.prev_count, now);
68633+ atomic64_add_unchecked(now - prev, &event->count);
68634 }
68635
68636 static int cpu_clock_perf_event_enable(struct perf_event *event)
68637@@ -4050,7 +4050,7 @@ static int cpu_clock_perf_event_enable(s
68638 struct hw_perf_event *hwc = &event->hw;
68639 int cpu = raw_smp_processor_id();
68640
68641- atomic64_set(&hwc->prev_count, cpu_clock(cpu));
68642+ atomic64_set_unchecked(&hwc->prev_count, cpu_clock(cpu));
68643 perf_swevent_start_hrtimer(event);
68644
68645 return 0;
68646@@ -4082,9 +4082,9 @@ static void task_clock_perf_event_update
68647 u64 prev;
68648 s64 delta;
68649
68650- prev = atomic64_xchg(&event->hw.prev_count, now);
68651+ prev = atomic64_xchg_unchecked(&event->hw.prev_count, now);
68652 delta = now - prev;
68653- atomic64_add(delta, &event->count);
68654+ atomic64_add_unchecked(delta, &event->count);
68655 }
68656
68657 static int task_clock_perf_event_enable(struct perf_event *event)
68658@@ -4094,7 +4094,7 @@ static int task_clock_perf_event_enable(
68659
68660 now = event->ctx->time;
68661
68662- atomic64_set(&hwc->prev_count, now);
68663+ atomic64_set_unchecked(&hwc->prev_count, now);
68664
68665 perf_swevent_start_hrtimer(event);
68666
68667@@ -4289,7 +4289,7 @@ perf_event_alloc(struct perf_event_attr
68668 event->parent = parent_event;
68669
68670 event->ns = get_pid_ns(current->nsproxy->pid_ns);
68671- event->id = atomic64_inc_return(&perf_event_id);
68672+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
68673
68674 event->state = PERF_EVENT_STATE_INACTIVE;
68675
68676@@ -4720,15 +4720,15 @@ static void sync_child_event(struct perf
68677 if (child_event->attr.inherit_stat)
68678 perf_event_read_event(child_event, child);
68679
68680- child_val = atomic64_read(&child_event->count);
68681+ child_val = atomic64_read_unchecked(&child_event->count);
68682
68683 /*
68684 * Add back the child's count to the parent's count:
68685 */
68686- atomic64_add(child_val, &parent_event->count);
68687- atomic64_add(child_event->total_time_enabled,
68688+ atomic64_add_unchecked(child_val, &parent_event->count);
68689+ atomic64_add_unchecked(child_event->total_time_enabled,
68690 &parent_event->child_total_time_enabled);
68691- atomic64_add(child_event->total_time_running,
68692+ atomic64_add_unchecked(child_event->total_time_running,
68693 &parent_event->child_total_time_running);
68694
68695 /*
68696diff -urNp linux-2.6.32.48/kernel/pid.c linux-2.6.32.48/kernel/pid.c
68697--- linux-2.6.32.48/kernel/pid.c 2011-11-08 19:02:43.000000000 -0500
68698+++ linux-2.6.32.48/kernel/pid.c 2011-11-15 19:59:43.000000000 -0500
68699@@ -33,6 +33,7 @@
68700 #include <linux/rculist.h>
68701 #include <linux/bootmem.h>
68702 #include <linux/hash.h>
68703+#include <linux/security.h>
68704 #include <linux/pid_namespace.h>
68705 #include <linux/init_task.h>
68706 #include <linux/syscalls.h>
68707@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
68708
68709 int pid_max = PID_MAX_DEFAULT;
68710
68711-#define RESERVED_PIDS 300
68712+#define RESERVED_PIDS 500
68713
68714 int pid_max_min = RESERVED_PIDS + 1;
68715 int pid_max_max = PID_MAX_LIMIT;
68716@@ -383,7 +384,14 @@ EXPORT_SYMBOL(pid_task);
68717 */
68718 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
68719 {
68720- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
68721+ struct task_struct *task;
68722+
68723+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
68724+
68725+ if (gr_pid_is_chrooted(task))
68726+ return NULL;
68727+
68728+ return task;
68729 }
68730
68731 struct task_struct *find_task_by_vpid(pid_t vnr)
68732@@ -391,6 +399,11 @@ struct task_struct *find_task_by_vpid(pi
68733 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
68734 }
68735
68736+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
68737+{
68738+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
68739+}
68740+
68741 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
68742 {
68743 struct pid *pid;
68744diff -urNp linux-2.6.32.48/kernel/posix-cpu-timers.c linux-2.6.32.48/kernel/posix-cpu-timers.c
68745--- linux-2.6.32.48/kernel/posix-cpu-timers.c 2011-11-08 19:02:43.000000000 -0500
68746+++ linux-2.6.32.48/kernel/posix-cpu-timers.c 2011-11-15 19:59:43.000000000 -0500
68747@@ -6,6 +6,7 @@
68748 #include <linux/posix-timers.h>
68749 #include <linux/errno.h>
68750 #include <linux/math64.h>
68751+#include <linux/security.h>
68752 #include <asm/uaccess.h>
68753 #include <linux/kernel_stat.h>
68754 #include <trace/events/timer.h>
68755@@ -1697,7 +1698,7 @@ static long thread_cpu_nsleep_restart(st
68756
68757 static __init int init_posix_cpu_timers(void)
68758 {
68759- struct k_clock process = {
68760+ static struct k_clock process = {
68761 .clock_getres = process_cpu_clock_getres,
68762 .clock_get = process_cpu_clock_get,
68763 .clock_set = do_posix_clock_nosettime,
68764@@ -1705,7 +1706,7 @@ static __init int init_posix_cpu_timers(
68765 .nsleep = process_cpu_nsleep,
68766 .nsleep_restart = process_cpu_nsleep_restart,
68767 };
68768- struct k_clock thread = {
68769+ static struct k_clock thread = {
68770 .clock_getres = thread_cpu_clock_getres,
68771 .clock_get = thread_cpu_clock_get,
68772 .clock_set = do_posix_clock_nosettime,
68773diff -urNp linux-2.6.32.48/kernel/posix-timers.c linux-2.6.32.48/kernel/posix-timers.c
68774--- linux-2.6.32.48/kernel/posix-timers.c 2011-11-08 19:02:43.000000000 -0500
68775+++ linux-2.6.32.48/kernel/posix-timers.c 2011-11-15 19:59:43.000000000 -0500
68776@@ -42,6 +42,7 @@
68777 #include <linux/compiler.h>
68778 #include <linux/idr.h>
68779 #include <linux/posix-timers.h>
68780+#include <linux/grsecurity.h>
68781 #include <linux/syscalls.h>
68782 #include <linux/wait.h>
68783 #include <linux/workqueue.h>
68784@@ -131,7 +132,7 @@ static DEFINE_SPINLOCK(idr_lock);
68785 * which we beg off on and pass to do_sys_settimeofday().
68786 */
68787
68788-static struct k_clock posix_clocks[MAX_CLOCKS];
68789+static struct k_clock *posix_clocks[MAX_CLOCKS];
68790
68791 /*
68792 * These ones are defined below.
68793@@ -157,8 +158,8 @@ static inline void unlock_timer(struct k
68794 */
68795 #define CLOCK_DISPATCH(clock, call, arglist) \
68796 ((clock) < 0 ? posix_cpu_##call arglist : \
68797- (posix_clocks[clock].call != NULL \
68798- ? (*posix_clocks[clock].call) arglist : common_##call arglist))
68799+ (posix_clocks[clock]->call != NULL \
68800+ ? (*posix_clocks[clock]->call) arglist : common_##call arglist))
68801
68802 /*
68803 * Default clock hook functions when the struct k_clock passed
68804@@ -172,7 +173,7 @@ static inline int common_clock_getres(co
68805 struct timespec *tp)
68806 {
68807 tp->tv_sec = 0;
68808- tp->tv_nsec = posix_clocks[which_clock].res;
68809+ tp->tv_nsec = posix_clocks[which_clock]->res;
68810 return 0;
68811 }
68812
68813@@ -217,9 +218,11 @@ static inline int invalid_clockid(const
68814 return 0;
68815 if ((unsigned) which_clock >= MAX_CLOCKS)
68816 return 1;
68817- if (posix_clocks[which_clock].clock_getres != NULL)
68818+ if (posix_clocks[which_clock] == NULL)
68819 return 0;
68820- if (posix_clocks[which_clock].res != 0)
68821+ if (posix_clocks[which_clock]->clock_getres != NULL)
68822+ return 0;
68823+ if (posix_clocks[which_clock]->res != 0)
68824 return 0;
68825 return 1;
68826 }
68827@@ -266,29 +269,29 @@ int posix_get_coarse_res(const clockid_t
68828 */
68829 static __init int init_posix_timers(void)
68830 {
68831- struct k_clock clock_realtime = {
68832+ static struct k_clock clock_realtime = {
68833 .clock_getres = hrtimer_get_res,
68834 };
68835- struct k_clock clock_monotonic = {
68836+ static struct k_clock clock_monotonic = {
68837 .clock_getres = hrtimer_get_res,
68838 .clock_get = posix_ktime_get_ts,
68839 .clock_set = do_posix_clock_nosettime,
68840 };
68841- struct k_clock clock_monotonic_raw = {
68842+ static struct k_clock clock_monotonic_raw = {
68843 .clock_getres = hrtimer_get_res,
68844 .clock_get = posix_get_monotonic_raw,
68845 .clock_set = do_posix_clock_nosettime,
68846 .timer_create = no_timer_create,
68847 .nsleep = no_nsleep,
68848 };
68849- struct k_clock clock_realtime_coarse = {
68850+ static struct k_clock clock_realtime_coarse = {
68851 .clock_getres = posix_get_coarse_res,
68852 .clock_get = posix_get_realtime_coarse,
68853 .clock_set = do_posix_clock_nosettime,
68854 .timer_create = no_timer_create,
68855 .nsleep = no_nsleep,
68856 };
68857- struct k_clock clock_monotonic_coarse = {
68858+ static struct k_clock clock_monotonic_coarse = {
68859 .clock_getres = posix_get_coarse_res,
68860 .clock_get = posix_get_monotonic_coarse,
68861 .clock_set = do_posix_clock_nosettime,
68862@@ -296,6 +299,8 @@ static __init int init_posix_timers(void
68863 .nsleep = no_nsleep,
68864 };
68865
68866+ pax_track_stack();
68867+
68868 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
68869 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
68870 register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
68871@@ -484,7 +489,7 @@ void register_posix_clock(const clockid_
68872 return;
68873 }
68874
68875- posix_clocks[clock_id] = *new_clock;
68876+ posix_clocks[clock_id] = new_clock;
68877 }
68878 EXPORT_SYMBOL_GPL(register_posix_clock);
68879
68880@@ -948,6 +953,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
68881 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
68882 return -EFAULT;
68883
68884+ /* only the CLOCK_REALTIME clock can be set, all other clocks
68885+ have their clock_set fptr set to a nosettime dummy function
68886+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
68887+ call common_clock_set, which calls do_sys_settimeofday, which
68888+ we hook
68889+ */
68890+
68891 return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
68892 }
68893
68894diff -urNp linux-2.6.32.48/kernel/power/hibernate.c linux-2.6.32.48/kernel/power/hibernate.c
68895--- linux-2.6.32.48/kernel/power/hibernate.c 2011-11-08 19:02:43.000000000 -0500
68896+++ linux-2.6.32.48/kernel/power/hibernate.c 2011-11-15 19:59:43.000000000 -0500
68897@@ -48,14 +48,14 @@ enum {
68898
68899 static int hibernation_mode = HIBERNATION_SHUTDOWN;
68900
68901-static struct platform_hibernation_ops *hibernation_ops;
68902+static const struct platform_hibernation_ops *hibernation_ops;
68903
68904 /**
68905 * hibernation_set_ops - set the global hibernate operations
68906 * @ops: the hibernation operations to use in subsequent hibernation transitions
68907 */
68908
68909-void hibernation_set_ops(struct platform_hibernation_ops *ops)
68910+void hibernation_set_ops(const struct platform_hibernation_ops *ops)
68911 {
68912 if (ops && !(ops->begin && ops->end && ops->pre_snapshot
68913 && ops->prepare && ops->finish && ops->enter && ops->pre_restore
68914diff -urNp linux-2.6.32.48/kernel/power/poweroff.c linux-2.6.32.48/kernel/power/poweroff.c
68915--- linux-2.6.32.48/kernel/power/poweroff.c 2011-11-08 19:02:43.000000000 -0500
68916+++ linux-2.6.32.48/kernel/power/poweroff.c 2011-11-15 19:59:43.000000000 -0500
68917@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
68918 .enable_mask = SYSRQ_ENABLE_BOOT,
68919 };
68920
68921-static int pm_sysrq_init(void)
68922+static int __init pm_sysrq_init(void)
68923 {
68924 register_sysrq_key('o', &sysrq_poweroff_op);
68925 return 0;
68926diff -urNp linux-2.6.32.48/kernel/power/process.c linux-2.6.32.48/kernel/power/process.c
68927--- linux-2.6.32.48/kernel/power/process.c 2011-11-08 19:02:43.000000000 -0500
68928+++ linux-2.6.32.48/kernel/power/process.c 2011-11-15 19:59:43.000000000 -0500
68929@@ -37,12 +37,15 @@ static int try_to_freeze_tasks(bool sig_
68930 struct timeval start, end;
68931 u64 elapsed_csecs64;
68932 unsigned int elapsed_csecs;
68933+ bool timedout = false;
68934
68935 do_gettimeofday(&start);
68936
68937 end_time = jiffies + TIMEOUT;
68938 do {
68939 todo = 0;
68940+ if (time_after(jiffies, end_time))
68941+ timedout = true;
68942 read_lock(&tasklist_lock);
68943 do_each_thread(g, p) {
68944 if (frozen(p) || !freezeable(p))
68945@@ -57,15 +60,17 @@ static int try_to_freeze_tasks(bool sig_
68946 * It is "frozen enough". If the task does wake
68947 * up, it will immediately call try_to_freeze.
68948 */
68949- if (!task_is_stopped_or_traced(p) &&
68950- !freezer_should_skip(p))
68951+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
68952 todo++;
68953+ if (timedout) {
68954+ printk(KERN_ERR "Task refusing to freeze:\n");
68955+ sched_show_task(p);
68956+ }
68957+ }
68958 } while_each_thread(g, p);
68959 read_unlock(&tasklist_lock);
68960 yield(); /* Yield is okay here */
68961- if (time_after(jiffies, end_time))
68962- break;
68963- } while (todo);
68964+ } while (todo && !timedout);
68965
68966 do_gettimeofday(&end);
68967 elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
68968diff -urNp linux-2.6.32.48/kernel/power/suspend.c linux-2.6.32.48/kernel/power/suspend.c
68969--- linux-2.6.32.48/kernel/power/suspend.c 2011-11-08 19:02:43.000000000 -0500
68970+++ linux-2.6.32.48/kernel/power/suspend.c 2011-11-15 19:59:43.000000000 -0500
68971@@ -23,13 +23,13 @@ const char *const pm_states[PM_SUSPEND_M
68972 [PM_SUSPEND_MEM] = "mem",
68973 };
68974
68975-static struct platform_suspend_ops *suspend_ops;
68976+static const struct platform_suspend_ops *suspend_ops;
68977
68978 /**
68979 * suspend_set_ops - Set the global suspend method table.
68980 * @ops: Pointer to ops structure.
68981 */
68982-void suspend_set_ops(struct platform_suspend_ops *ops)
68983+void suspend_set_ops(const struct platform_suspend_ops *ops)
68984 {
68985 mutex_lock(&pm_mutex);
68986 suspend_ops = ops;
68987diff -urNp linux-2.6.32.48/kernel/printk.c linux-2.6.32.48/kernel/printk.c
68988--- linux-2.6.32.48/kernel/printk.c 2011-11-08 19:02:43.000000000 -0500
68989+++ linux-2.6.32.48/kernel/printk.c 2011-11-15 19:59:43.000000000 -0500
68990@@ -278,6 +278,11 @@ int do_syslog(int type, char __user *buf
68991 char c;
68992 int error = 0;
68993
68994+#ifdef CONFIG_GRKERNSEC_DMESG
68995+ if (grsec_enable_dmesg && !capable(CAP_SYS_ADMIN))
68996+ return -EPERM;
68997+#endif
68998+
68999 error = security_syslog(type);
69000 if (error)
69001 return error;
69002diff -urNp linux-2.6.32.48/kernel/profile.c linux-2.6.32.48/kernel/profile.c
69003--- linux-2.6.32.48/kernel/profile.c 2011-11-08 19:02:43.000000000 -0500
69004+++ linux-2.6.32.48/kernel/profile.c 2011-11-15 19:59:43.000000000 -0500
69005@@ -39,7 +39,7 @@ struct profile_hit {
69006 /* Oprofile timer tick hook */
69007 static int (*timer_hook)(struct pt_regs *) __read_mostly;
69008
69009-static atomic_t *prof_buffer;
69010+static atomic_unchecked_t *prof_buffer;
69011 static unsigned long prof_len, prof_shift;
69012
69013 int prof_on __read_mostly;
69014@@ -283,7 +283,7 @@ static void profile_flip_buffers(void)
69015 hits[i].pc = 0;
69016 continue;
69017 }
69018- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
69019+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
69020 hits[i].hits = hits[i].pc = 0;
69021 }
69022 }
69023@@ -346,9 +346,9 @@ void profile_hits(int type, void *__pc,
69024 * Add the current hit(s) and flush the write-queue out
69025 * to the global buffer:
69026 */
69027- atomic_add(nr_hits, &prof_buffer[pc]);
69028+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
69029 for (i = 0; i < NR_PROFILE_HIT; ++i) {
69030- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
69031+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
69032 hits[i].pc = hits[i].hits = 0;
69033 }
69034 out:
69035@@ -426,7 +426,7 @@ void profile_hits(int type, void *__pc,
69036 if (prof_on != type || !prof_buffer)
69037 return;
69038 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
69039- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
69040+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
69041 }
69042 #endif /* !CONFIG_SMP */
69043 EXPORT_SYMBOL_GPL(profile_hits);
69044@@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
69045 return -EFAULT;
69046 buf++; p++; count--; read++;
69047 }
69048- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
69049+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
69050 if (copy_to_user(buf, (void *)pnt, count))
69051 return -EFAULT;
69052 read += count;
69053@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
69054 }
69055 #endif
69056 profile_discard_flip_buffers();
69057- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
69058+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
69059 return count;
69060 }
69061
69062diff -urNp linux-2.6.32.48/kernel/ptrace.c linux-2.6.32.48/kernel/ptrace.c
69063--- linux-2.6.32.48/kernel/ptrace.c 2011-11-08 19:02:43.000000000 -0500
69064+++ linux-2.6.32.48/kernel/ptrace.c 2011-11-15 19:59:43.000000000 -0500
69065@@ -117,7 +117,8 @@ int ptrace_check_attach(struct task_stru
69066 return ret;
69067 }
69068
69069-int __ptrace_may_access(struct task_struct *task, unsigned int mode)
69070+static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
69071+ unsigned int log)
69072 {
69073 const struct cred *cred = current_cred(), *tcred;
69074
69075@@ -141,7 +142,9 @@ int __ptrace_may_access(struct task_stru
69076 cred->gid != tcred->egid ||
69077 cred->gid != tcred->sgid ||
69078 cred->gid != tcred->gid) &&
69079- !capable(CAP_SYS_PTRACE)) {
69080+ ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
69081+ (log && !capable(CAP_SYS_PTRACE)))
69082+ ) {
69083 rcu_read_unlock();
69084 return -EPERM;
69085 }
69086@@ -149,7 +152,9 @@ int __ptrace_may_access(struct task_stru
69087 smp_rmb();
69088 if (task->mm)
69089 dumpable = get_dumpable(task->mm);
69090- if (!dumpable && !capable(CAP_SYS_PTRACE))
69091+ if (!dumpable &&
69092+ ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
69093+ (log && !capable(CAP_SYS_PTRACE))))
69094 return -EPERM;
69095
69096 return security_ptrace_access_check(task, mode);
69097@@ -159,7 +164,16 @@ bool ptrace_may_access(struct task_struc
69098 {
69099 int err;
69100 task_lock(task);
69101- err = __ptrace_may_access(task, mode);
69102+ err = __ptrace_may_access(task, mode, 0);
69103+ task_unlock(task);
69104+ return !err;
69105+}
69106+
69107+bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
69108+{
69109+ int err;
69110+ task_lock(task);
69111+ err = __ptrace_may_access(task, mode, 1);
69112 task_unlock(task);
69113 return !err;
69114 }
69115@@ -186,7 +200,7 @@ int ptrace_attach(struct task_struct *ta
69116 goto out;
69117
69118 task_lock(task);
69119- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
69120+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
69121 task_unlock(task);
69122 if (retval)
69123 goto unlock_creds;
69124@@ -199,7 +213,7 @@ int ptrace_attach(struct task_struct *ta
69125 goto unlock_tasklist;
69126
69127 task->ptrace = PT_PTRACED;
69128- if (capable(CAP_SYS_PTRACE))
69129+ if (capable_nolog(CAP_SYS_PTRACE))
69130 task->ptrace |= PT_PTRACE_CAP;
69131
69132 __ptrace_link(task, current);
69133@@ -351,6 +365,8 @@ int ptrace_readdata(struct task_struct *
69134 {
69135 int copied = 0;
69136
69137+ pax_track_stack();
69138+
69139 while (len > 0) {
69140 char buf[128];
69141 int this_len, retval;
69142@@ -376,6 +392,8 @@ int ptrace_writedata(struct task_struct
69143 {
69144 int copied = 0;
69145
69146+ pax_track_stack();
69147+
69148 while (len > 0) {
69149 char buf[128];
69150 int this_len, retval;
69151@@ -517,6 +535,8 @@ int ptrace_request(struct task_struct *c
69152 int ret = -EIO;
69153 siginfo_t siginfo;
69154
69155+ pax_track_stack();
69156+
69157 switch (request) {
69158 case PTRACE_PEEKTEXT:
69159 case PTRACE_PEEKDATA:
69160@@ -532,18 +552,18 @@ int ptrace_request(struct task_struct *c
69161 ret = ptrace_setoptions(child, data);
69162 break;
69163 case PTRACE_GETEVENTMSG:
69164- ret = put_user(child->ptrace_message, (unsigned long __user *) data);
69165+ ret = put_user(child->ptrace_message, (__force unsigned long __user *) data);
69166 break;
69167
69168 case PTRACE_GETSIGINFO:
69169 ret = ptrace_getsiginfo(child, &siginfo);
69170 if (!ret)
69171- ret = copy_siginfo_to_user((siginfo_t __user *) data,
69172+ ret = copy_siginfo_to_user((__force siginfo_t __user *) data,
69173 &siginfo);
69174 break;
69175
69176 case PTRACE_SETSIGINFO:
69177- if (copy_from_user(&siginfo, (siginfo_t __user *) data,
69178+ if (copy_from_user(&siginfo, (__force siginfo_t __user *) data,
69179 sizeof siginfo))
69180 ret = -EFAULT;
69181 else
69182@@ -621,14 +641,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
69183 goto out;
69184 }
69185
69186+ if (gr_handle_ptrace(child, request)) {
69187+ ret = -EPERM;
69188+ goto out_put_task_struct;
69189+ }
69190+
69191 if (request == PTRACE_ATTACH) {
69192 ret = ptrace_attach(child);
69193 /*
69194 * Some architectures need to do book-keeping after
69195 * a ptrace attach.
69196 */
69197- if (!ret)
69198+ if (!ret) {
69199 arch_ptrace_attach(child);
69200+ gr_audit_ptrace(child);
69201+ }
69202 goto out_put_task_struct;
69203 }
69204
69205@@ -653,7 +680,7 @@ int generic_ptrace_peekdata(struct task_
69206 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
69207 if (copied != sizeof(tmp))
69208 return -EIO;
69209- return put_user(tmp, (unsigned long __user *)data);
69210+ return put_user(tmp, (__force unsigned long __user *)data);
69211 }
69212
69213 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
69214@@ -675,6 +702,8 @@ int compat_ptrace_request(struct task_st
69215 siginfo_t siginfo;
69216 int ret;
69217
69218+ pax_track_stack();
69219+
69220 switch (request) {
69221 case PTRACE_PEEKTEXT:
69222 case PTRACE_PEEKDATA:
69223@@ -740,14 +769,21 @@ asmlinkage long compat_sys_ptrace(compat
69224 goto out;
69225 }
69226
69227+ if (gr_handle_ptrace(child, request)) {
69228+ ret = -EPERM;
69229+ goto out_put_task_struct;
69230+ }
69231+
69232 if (request == PTRACE_ATTACH) {
69233 ret = ptrace_attach(child);
69234 /*
69235 * Some architectures need to do book-keeping after
69236 * a ptrace attach.
69237 */
69238- if (!ret)
69239+ if (!ret) {
69240 arch_ptrace_attach(child);
69241+ gr_audit_ptrace(child);
69242+ }
69243 goto out_put_task_struct;
69244 }
69245
69246diff -urNp linux-2.6.32.48/kernel/rcutorture.c linux-2.6.32.48/kernel/rcutorture.c
69247--- linux-2.6.32.48/kernel/rcutorture.c 2011-11-08 19:02:43.000000000 -0500
69248+++ linux-2.6.32.48/kernel/rcutorture.c 2011-11-15 19:59:43.000000000 -0500
69249@@ -118,12 +118,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
69250 { 0 };
69251 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
69252 { 0 };
69253-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
69254-static atomic_t n_rcu_torture_alloc;
69255-static atomic_t n_rcu_torture_alloc_fail;
69256-static atomic_t n_rcu_torture_free;
69257-static atomic_t n_rcu_torture_mberror;
69258-static atomic_t n_rcu_torture_error;
69259+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
69260+static atomic_unchecked_t n_rcu_torture_alloc;
69261+static atomic_unchecked_t n_rcu_torture_alloc_fail;
69262+static atomic_unchecked_t n_rcu_torture_free;
69263+static atomic_unchecked_t n_rcu_torture_mberror;
69264+static atomic_unchecked_t n_rcu_torture_error;
69265 static long n_rcu_torture_timers;
69266 static struct list_head rcu_torture_removed;
69267 static cpumask_var_t shuffle_tmp_mask;
69268@@ -187,11 +187,11 @@ rcu_torture_alloc(void)
69269
69270 spin_lock_bh(&rcu_torture_lock);
69271 if (list_empty(&rcu_torture_freelist)) {
69272- atomic_inc(&n_rcu_torture_alloc_fail);
69273+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
69274 spin_unlock_bh(&rcu_torture_lock);
69275 return NULL;
69276 }
69277- atomic_inc(&n_rcu_torture_alloc);
69278+ atomic_inc_unchecked(&n_rcu_torture_alloc);
69279 p = rcu_torture_freelist.next;
69280 list_del_init(p);
69281 spin_unlock_bh(&rcu_torture_lock);
69282@@ -204,7 +204,7 @@ rcu_torture_alloc(void)
69283 static void
69284 rcu_torture_free(struct rcu_torture *p)
69285 {
69286- atomic_inc(&n_rcu_torture_free);
69287+ atomic_inc_unchecked(&n_rcu_torture_free);
69288 spin_lock_bh(&rcu_torture_lock);
69289 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
69290 spin_unlock_bh(&rcu_torture_lock);
69291@@ -319,7 +319,7 @@ rcu_torture_cb(struct rcu_head *p)
69292 i = rp->rtort_pipe_count;
69293 if (i > RCU_TORTURE_PIPE_LEN)
69294 i = RCU_TORTURE_PIPE_LEN;
69295- atomic_inc(&rcu_torture_wcount[i]);
69296+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
69297 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
69298 rp->rtort_mbtest = 0;
69299 rcu_torture_free(rp);
69300@@ -359,7 +359,7 @@ static void rcu_sync_torture_deferred_fr
69301 i = rp->rtort_pipe_count;
69302 if (i > RCU_TORTURE_PIPE_LEN)
69303 i = RCU_TORTURE_PIPE_LEN;
69304- atomic_inc(&rcu_torture_wcount[i]);
69305+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
69306 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
69307 rp->rtort_mbtest = 0;
69308 list_del(&rp->rtort_free);
69309@@ -653,7 +653,7 @@ rcu_torture_writer(void *arg)
69310 i = old_rp->rtort_pipe_count;
69311 if (i > RCU_TORTURE_PIPE_LEN)
69312 i = RCU_TORTURE_PIPE_LEN;
69313- atomic_inc(&rcu_torture_wcount[i]);
69314+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
69315 old_rp->rtort_pipe_count++;
69316 cur_ops->deferred_free(old_rp);
69317 }
69318@@ -718,7 +718,7 @@ static void rcu_torture_timer(unsigned l
69319 return;
69320 }
69321 if (p->rtort_mbtest == 0)
69322- atomic_inc(&n_rcu_torture_mberror);
69323+ atomic_inc_unchecked(&n_rcu_torture_mberror);
69324 spin_lock(&rand_lock);
69325 cur_ops->read_delay(&rand);
69326 n_rcu_torture_timers++;
69327@@ -776,7 +776,7 @@ rcu_torture_reader(void *arg)
69328 continue;
69329 }
69330 if (p->rtort_mbtest == 0)
69331- atomic_inc(&n_rcu_torture_mberror);
69332+ atomic_inc_unchecked(&n_rcu_torture_mberror);
69333 cur_ops->read_delay(&rand);
69334 preempt_disable();
69335 pipe_count = p->rtort_pipe_count;
69336@@ -834,17 +834,17 @@ rcu_torture_printk(char *page)
69337 rcu_torture_current,
69338 rcu_torture_current_version,
69339 list_empty(&rcu_torture_freelist),
69340- atomic_read(&n_rcu_torture_alloc),
69341- atomic_read(&n_rcu_torture_alloc_fail),
69342- atomic_read(&n_rcu_torture_free),
69343- atomic_read(&n_rcu_torture_mberror),
69344+ atomic_read_unchecked(&n_rcu_torture_alloc),
69345+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
69346+ atomic_read_unchecked(&n_rcu_torture_free),
69347+ atomic_read_unchecked(&n_rcu_torture_mberror),
69348 n_rcu_torture_timers);
69349- if (atomic_read(&n_rcu_torture_mberror) != 0)
69350+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0)
69351 cnt += sprintf(&page[cnt], " !!!");
69352 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
69353 if (i > 1) {
69354 cnt += sprintf(&page[cnt], "!!! ");
69355- atomic_inc(&n_rcu_torture_error);
69356+ atomic_inc_unchecked(&n_rcu_torture_error);
69357 WARN_ON_ONCE(1);
69358 }
69359 cnt += sprintf(&page[cnt], "Reader Pipe: ");
69360@@ -858,7 +858,7 @@ rcu_torture_printk(char *page)
69361 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
69362 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
69363 cnt += sprintf(&page[cnt], " %d",
69364- atomic_read(&rcu_torture_wcount[i]));
69365+ atomic_read_unchecked(&rcu_torture_wcount[i]));
69366 }
69367 cnt += sprintf(&page[cnt], "\n");
69368 if (cur_ops->stats)
69369@@ -1084,7 +1084,7 @@ rcu_torture_cleanup(void)
69370
69371 if (cur_ops->cleanup)
69372 cur_ops->cleanup();
69373- if (atomic_read(&n_rcu_torture_error))
69374+ if (atomic_read_unchecked(&n_rcu_torture_error))
69375 rcu_torture_print_module_parms("End of test: FAILURE");
69376 else
69377 rcu_torture_print_module_parms("End of test: SUCCESS");
69378@@ -1138,13 +1138,13 @@ rcu_torture_init(void)
69379
69380 rcu_torture_current = NULL;
69381 rcu_torture_current_version = 0;
69382- atomic_set(&n_rcu_torture_alloc, 0);
69383- atomic_set(&n_rcu_torture_alloc_fail, 0);
69384- atomic_set(&n_rcu_torture_free, 0);
69385- atomic_set(&n_rcu_torture_mberror, 0);
69386- atomic_set(&n_rcu_torture_error, 0);
69387+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
69388+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
69389+ atomic_set_unchecked(&n_rcu_torture_free, 0);
69390+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
69391+ atomic_set_unchecked(&n_rcu_torture_error, 0);
69392 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
69393- atomic_set(&rcu_torture_wcount[i], 0);
69394+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
69395 for_each_possible_cpu(cpu) {
69396 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
69397 per_cpu(rcu_torture_count, cpu)[i] = 0;
69398diff -urNp linux-2.6.32.48/kernel/rcutree.c linux-2.6.32.48/kernel/rcutree.c
69399--- linux-2.6.32.48/kernel/rcutree.c 2011-11-08 19:02:43.000000000 -0500
69400+++ linux-2.6.32.48/kernel/rcutree.c 2011-11-15 19:59:43.000000000 -0500
69401@@ -1303,7 +1303,7 @@ __rcu_process_callbacks(struct rcu_state
69402 /*
69403 * Do softirq processing for the current CPU.
69404 */
69405-static void rcu_process_callbacks(struct softirq_action *unused)
69406+static void rcu_process_callbacks(void)
69407 {
69408 /*
69409 * Memory references from any prior RCU read-side critical sections
69410diff -urNp linux-2.6.32.48/kernel/rcutree_plugin.h linux-2.6.32.48/kernel/rcutree_plugin.h
69411--- linux-2.6.32.48/kernel/rcutree_plugin.h 2011-11-08 19:02:43.000000000 -0500
69412+++ linux-2.6.32.48/kernel/rcutree_plugin.h 2011-11-15 19:59:43.000000000 -0500
69413@@ -145,7 +145,7 @@ static void rcu_preempt_note_context_swi
69414 */
69415 void __rcu_read_lock(void)
69416 {
69417- ACCESS_ONCE(current->rcu_read_lock_nesting)++;
69418+ ACCESS_ONCE_RW(current->rcu_read_lock_nesting)++;
69419 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
69420 }
69421 EXPORT_SYMBOL_GPL(__rcu_read_lock);
69422@@ -251,7 +251,7 @@ void __rcu_read_unlock(void)
69423 struct task_struct *t = current;
69424
69425 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
69426- if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
69427+ if (--ACCESS_ONCE_RW(t->rcu_read_lock_nesting) == 0 &&
69428 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
69429 rcu_read_unlock_special(t);
69430 }
69431diff -urNp linux-2.6.32.48/kernel/relay.c linux-2.6.32.48/kernel/relay.c
69432--- linux-2.6.32.48/kernel/relay.c 2011-11-08 19:02:43.000000000 -0500
69433+++ linux-2.6.32.48/kernel/relay.c 2011-11-15 19:59:43.000000000 -0500
69434@@ -1222,7 +1222,7 @@ static int subbuf_splice_actor(struct fi
69435 unsigned int flags,
69436 int *nonpad_ret)
69437 {
69438- unsigned int pidx, poff, total_len, subbuf_pages, nr_pages, ret;
69439+ unsigned int pidx, poff, total_len, subbuf_pages, nr_pages;
69440 struct rchan_buf *rbuf = in->private_data;
69441 unsigned int subbuf_size = rbuf->chan->subbuf_size;
69442 uint64_t pos = (uint64_t) *ppos;
69443@@ -1241,6 +1241,9 @@ static int subbuf_splice_actor(struct fi
69444 .ops = &relay_pipe_buf_ops,
69445 .spd_release = relay_page_release,
69446 };
69447+ ssize_t ret;
69448+
69449+ pax_track_stack();
69450
69451 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
69452 return 0;
69453diff -urNp linux-2.6.32.48/kernel/resource.c linux-2.6.32.48/kernel/resource.c
69454--- linux-2.6.32.48/kernel/resource.c 2011-11-08 19:02:43.000000000 -0500
69455+++ linux-2.6.32.48/kernel/resource.c 2011-11-15 19:59:43.000000000 -0500
69456@@ -132,8 +132,18 @@ static const struct file_operations proc
69457
69458 static int __init ioresources_init(void)
69459 {
69460+#ifdef CONFIG_GRKERNSEC_PROC_ADD
69461+#ifdef CONFIG_GRKERNSEC_PROC_USER
69462+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
69463+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
69464+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
69465+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
69466+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
69467+#endif
69468+#else
69469 proc_create("ioports", 0, NULL, &proc_ioports_operations);
69470 proc_create("iomem", 0, NULL, &proc_iomem_operations);
69471+#endif
69472 return 0;
69473 }
69474 __initcall(ioresources_init);
69475diff -urNp linux-2.6.32.48/kernel/rtmutex.c linux-2.6.32.48/kernel/rtmutex.c
69476--- linux-2.6.32.48/kernel/rtmutex.c 2011-11-08 19:02:43.000000000 -0500
69477+++ linux-2.6.32.48/kernel/rtmutex.c 2011-11-15 19:59:43.000000000 -0500
69478@@ -511,7 +511,7 @@ static void wakeup_next_waiter(struct rt
69479 */
69480 spin_lock_irqsave(&pendowner->pi_lock, flags);
69481
69482- WARN_ON(!pendowner->pi_blocked_on);
69483+ BUG_ON(!pendowner->pi_blocked_on);
69484 WARN_ON(pendowner->pi_blocked_on != waiter);
69485 WARN_ON(pendowner->pi_blocked_on->lock != lock);
69486
69487diff -urNp linux-2.6.32.48/kernel/rtmutex-tester.c linux-2.6.32.48/kernel/rtmutex-tester.c
69488--- linux-2.6.32.48/kernel/rtmutex-tester.c 2011-11-08 19:02:43.000000000 -0500
69489+++ linux-2.6.32.48/kernel/rtmutex-tester.c 2011-11-15 19:59:43.000000000 -0500
69490@@ -21,7 +21,7 @@
69491 #define MAX_RT_TEST_MUTEXES 8
69492
69493 static spinlock_t rttest_lock;
69494-static atomic_t rttest_event;
69495+static atomic_unchecked_t rttest_event;
69496
69497 struct test_thread_data {
69498 int opcode;
69499@@ -64,7 +64,7 @@ static int handle_op(struct test_thread_
69500
69501 case RTTEST_LOCKCONT:
69502 td->mutexes[td->opdata] = 1;
69503- td->event = atomic_add_return(1, &rttest_event);
69504+ td->event = atomic_add_return_unchecked(1, &rttest_event);
69505 return 0;
69506
69507 case RTTEST_RESET:
69508@@ -82,7 +82,7 @@ static int handle_op(struct test_thread_
69509 return 0;
69510
69511 case RTTEST_RESETEVENT:
69512- atomic_set(&rttest_event, 0);
69513+ atomic_set_unchecked(&rttest_event, 0);
69514 return 0;
69515
69516 default:
69517@@ -99,9 +99,9 @@ static int handle_op(struct test_thread_
69518 return ret;
69519
69520 td->mutexes[id] = 1;
69521- td->event = atomic_add_return(1, &rttest_event);
69522+ td->event = atomic_add_return_unchecked(1, &rttest_event);
69523 rt_mutex_lock(&mutexes[id]);
69524- td->event = atomic_add_return(1, &rttest_event);
69525+ td->event = atomic_add_return_unchecked(1, &rttest_event);
69526 td->mutexes[id] = 4;
69527 return 0;
69528
69529@@ -112,9 +112,9 @@ static int handle_op(struct test_thread_
69530 return ret;
69531
69532 td->mutexes[id] = 1;
69533- td->event = atomic_add_return(1, &rttest_event);
69534+ td->event = atomic_add_return_unchecked(1, &rttest_event);
69535 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
69536- td->event = atomic_add_return(1, &rttest_event);
69537+ td->event = atomic_add_return_unchecked(1, &rttest_event);
69538 td->mutexes[id] = ret ? 0 : 4;
69539 return ret ? -EINTR : 0;
69540
69541@@ -123,9 +123,9 @@ static int handle_op(struct test_thread_
69542 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
69543 return ret;
69544
69545- td->event = atomic_add_return(1, &rttest_event);
69546+ td->event = atomic_add_return_unchecked(1, &rttest_event);
69547 rt_mutex_unlock(&mutexes[id]);
69548- td->event = atomic_add_return(1, &rttest_event);
69549+ td->event = atomic_add_return_unchecked(1, &rttest_event);
69550 td->mutexes[id] = 0;
69551 return 0;
69552
69553@@ -187,7 +187,7 @@ void schedule_rt_mutex_test(struct rt_mu
69554 break;
69555
69556 td->mutexes[dat] = 2;
69557- td->event = atomic_add_return(1, &rttest_event);
69558+ td->event = atomic_add_return_unchecked(1, &rttest_event);
69559 break;
69560
69561 case RTTEST_LOCKBKL:
69562@@ -208,7 +208,7 @@ void schedule_rt_mutex_test(struct rt_mu
69563 return;
69564
69565 td->mutexes[dat] = 3;
69566- td->event = atomic_add_return(1, &rttest_event);
69567+ td->event = atomic_add_return_unchecked(1, &rttest_event);
69568 break;
69569
69570 case RTTEST_LOCKNOWAIT:
69571@@ -220,7 +220,7 @@ void schedule_rt_mutex_test(struct rt_mu
69572 return;
69573
69574 td->mutexes[dat] = 1;
69575- td->event = atomic_add_return(1, &rttest_event);
69576+ td->event = atomic_add_return_unchecked(1, &rttest_event);
69577 return;
69578
69579 case RTTEST_LOCKBKL:
69580diff -urNp linux-2.6.32.48/kernel/sched.c linux-2.6.32.48/kernel/sched.c
69581--- linux-2.6.32.48/kernel/sched.c 2011-11-08 19:02:43.000000000 -0500
69582+++ linux-2.6.32.48/kernel/sched.c 2011-11-15 19:59:43.000000000 -0500
69583@@ -2764,9 +2764,10 @@ void wake_up_new_task(struct task_struct
69584 {
69585 unsigned long flags;
69586 struct rq *rq;
69587- int cpu = get_cpu();
69588
69589 #ifdef CONFIG_SMP
69590+ int cpu = get_cpu();
69591+
69592 rq = task_rq_lock(p, &flags);
69593 p->state = TASK_WAKING;
69594
69595@@ -5043,7 +5044,7 @@ out:
69596 * In CONFIG_NO_HZ case, the idle load balance owner will do the
69597 * rebalancing for all the cpus for whom scheduler ticks are stopped.
69598 */
69599-static void run_rebalance_domains(struct softirq_action *h)
69600+static void run_rebalance_domains(void)
69601 {
69602 int this_cpu = smp_processor_id();
69603 struct rq *this_rq = cpu_rq(this_cpu);
69604@@ -5700,6 +5701,8 @@ asmlinkage void __sched schedule(void)
69605 struct rq *rq;
69606 int cpu;
69607
69608+ pax_track_stack();
69609+
69610 need_resched:
69611 preempt_disable();
69612 cpu = smp_processor_id();
69613@@ -5770,7 +5773,7 @@ EXPORT_SYMBOL(schedule);
69614 * Look out! "owner" is an entirely speculative pointer
69615 * access and not reliable.
69616 */
69617-int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
69618+int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
69619 {
69620 unsigned int cpu;
69621 struct rq *rq;
69622@@ -5784,10 +5787,10 @@ int mutex_spin_on_owner(struct mutex *lo
69623 * DEBUG_PAGEALLOC could have unmapped it if
69624 * the mutex owner just released it and exited.
69625 */
69626- if (probe_kernel_address(&owner->cpu, cpu))
69627+ if (probe_kernel_address(&task_thread_info(owner)->cpu, cpu))
69628 return 0;
69629 #else
69630- cpu = owner->cpu;
69631+ cpu = task_thread_info(owner)->cpu;
69632 #endif
69633
69634 /*
69635@@ -5816,7 +5819,7 @@ int mutex_spin_on_owner(struct mutex *lo
69636 /*
69637 * Is that owner really running on that cpu?
69638 */
69639- if (task_thread_info(rq->curr) != owner || need_resched())
69640+ if (rq->curr != owner || need_resched())
69641 return 0;
69642
69643 cpu_relax();
69644@@ -6359,6 +6362,8 @@ int can_nice(const struct task_struct *p
69645 /* convert nice value [19,-20] to rlimit style value [1,40] */
69646 int nice_rlim = 20 - nice;
69647
69648+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
69649+
69650 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
69651 capable(CAP_SYS_NICE));
69652 }
69653@@ -6392,7 +6397,8 @@ SYSCALL_DEFINE1(nice, int, increment)
69654 if (nice > 19)
69655 nice = 19;
69656
69657- if (increment < 0 && !can_nice(current, nice))
69658+ if (increment < 0 && (!can_nice(current, nice) ||
69659+ gr_handle_chroot_nice()))
69660 return -EPERM;
69661
69662 retval = security_task_setnice(current, nice);
69663@@ -8774,7 +8780,7 @@ static void init_sched_groups_power(int
69664 long power;
69665 int weight;
69666
69667- WARN_ON(!sd || !sd->groups);
69668+ BUG_ON(!sd || !sd->groups);
69669
69670 if (cpu != group_first_cpu(sd->groups))
69671 return;
69672diff -urNp linux-2.6.32.48/kernel/signal.c linux-2.6.32.48/kernel/signal.c
69673--- linux-2.6.32.48/kernel/signal.c 2011-11-08 19:02:43.000000000 -0500
69674+++ linux-2.6.32.48/kernel/signal.c 2011-11-15 19:59:43.000000000 -0500
69675@@ -41,12 +41,12 @@
69676
69677 static struct kmem_cache *sigqueue_cachep;
69678
69679-static void __user *sig_handler(struct task_struct *t, int sig)
69680+static __sighandler_t sig_handler(struct task_struct *t, int sig)
69681 {
69682 return t->sighand->action[sig - 1].sa.sa_handler;
69683 }
69684
69685-static int sig_handler_ignored(void __user *handler, int sig)
69686+static int sig_handler_ignored(__sighandler_t handler, int sig)
69687 {
69688 /* Is it explicitly or implicitly ignored? */
69689 return handler == SIG_IGN ||
69690@@ -56,7 +56,7 @@ static int sig_handler_ignored(void __us
69691 static int sig_task_ignored(struct task_struct *t, int sig,
69692 int from_ancestor_ns)
69693 {
69694- void __user *handler;
69695+ __sighandler_t handler;
69696
69697 handler = sig_handler(t, sig);
69698
69699@@ -207,6 +207,9 @@ static struct sigqueue *__sigqueue_alloc
69700 */
69701 user = get_uid(__task_cred(t)->user);
69702 atomic_inc(&user->sigpending);
69703+
69704+ if (!override_rlimit)
69705+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
69706 if (override_rlimit ||
69707 atomic_read(&user->sigpending) <=
69708 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
69709@@ -327,7 +330,7 @@ flush_signal_handlers(struct task_struct
69710
69711 int unhandled_signal(struct task_struct *tsk, int sig)
69712 {
69713- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
69714+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
69715 if (is_global_init(tsk))
69716 return 1;
69717 if (handler != SIG_IGN && handler != SIG_DFL)
69718@@ -627,6 +630,13 @@ static int check_kill_permission(int sig
69719 }
69720 }
69721
69722+ /* allow glibc communication via tgkill to other threads in our
69723+ thread group */
69724+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
69725+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
69726+ && gr_handle_signal(t, sig))
69727+ return -EPERM;
69728+
69729 return security_task_kill(t, info, sig, 0);
69730 }
69731
69732@@ -968,7 +978,7 @@ __group_send_sig_info(int sig, struct si
69733 return send_signal(sig, info, p, 1);
69734 }
69735
69736-static int
69737+int
69738 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
69739 {
69740 return send_signal(sig, info, t, 0);
69741@@ -1005,6 +1015,7 @@ force_sig_info(int sig, struct siginfo *
69742 unsigned long int flags;
69743 int ret, blocked, ignored;
69744 struct k_sigaction *action;
69745+ int is_unhandled = 0;
69746
69747 spin_lock_irqsave(&t->sighand->siglock, flags);
69748 action = &t->sighand->action[sig-1];
69749@@ -1019,9 +1030,18 @@ force_sig_info(int sig, struct siginfo *
69750 }
69751 if (action->sa.sa_handler == SIG_DFL)
69752 t->signal->flags &= ~SIGNAL_UNKILLABLE;
69753+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
69754+ is_unhandled = 1;
69755 ret = specific_send_sig_info(sig, info, t);
69756 spin_unlock_irqrestore(&t->sighand->siglock, flags);
69757
69758+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
69759+ normal operation */
69760+ if (is_unhandled) {
69761+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
69762+ gr_handle_crash(t, sig);
69763+ }
69764+
69765 return ret;
69766 }
69767
69768@@ -1081,8 +1101,11 @@ int group_send_sig_info(int sig, struct
69769 {
69770 int ret = check_kill_permission(sig, info, p);
69771
69772- if (!ret && sig)
69773+ if (!ret && sig) {
69774 ret = do_send_sig_info(sig, info, p, true);
69775+ if (!ret)
69776+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
69777+ }
69778
69779 return ret;
69780 }
69781@@ -1644,6 +1667,8 @@ void ptrace_notify(int exit_code)
69782 {
69783 siginfo_t info;
69784
69785+ pax_track_stack();
69786+
69787 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
69788
69789 memset(&info, 0, sizeof info);
69790@@ -2275,7 +2300,15 @@ do_send_specific(pid_t tgid, pid_t pid,
69791 int error = -ESRCH;
69792
69793 rcu_read_lock();
69794- p = find_task_by_vpid(pid);
69795+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
69796+ /* allow glibc communication via tgkill to other threads in our
69797+ thread group */
69798+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
69799+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
69800+ p = find_task_by_vpid_unrestricted(pid);
69801+ else
69802+#endif
69803+ p = find_task_by_vpid(pid);
69804 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
69805 error = check_kill_permission(sig, info, p);
69806 /*
69807diff -urNp linux-2.6.32.48/kernel/smp.c linux-2.6.32.48/kernel/smp.c
69808--- linux-2.6.32.48/kernel/smp.c 2011-11-08 19:02:43.000000000 -0500
69809+++ linux-2.6.32.48/kernel/smp.c 2011-11-15 19:59:43.000000000 -0500
69810@@ -522,22 +522,22 @@ int smp_call_function(void (*func)(void
69811 }
69812 EXPORT_SYMBOL(smp_call_function);
69813
69814-void ipi_call_lock(void)
69815+void ipi_call_lock(void) __acquires(call_function.lock)
69816 {
69817 spin_lock(&call_function.lock);
69818 }
69819
69820-void ipi_call_unlock(void)
69821+void ipi_call_unlock(void) __releases(call_function.lock)
69822 {
69823 spin_unlock(&call_function.lock);
69824 }
69825
69826-void ipi_call_lock_irq(void)
69827+void ipi_call_lock_irq(void) __acquires(call_function.lock)
69828 {
69829 spin_lock_irq(&call_function.lock);
69830 }
69831
69832-void ipi_call_unlock_irq(void)
69833+void ipi_call_unlock_irq(void) __releases(call_function.lock)
69834 {
69835 spin_unlock_irq(&call_function.lock);
69836 }
69837diff -urNp linux-2.6.32.48/kernel/softirq.c linux-2.6.32.48/kernel/softirq.c
69838--- linux-2.6.32.48/kernel/softirq.c 2011-11-08 19:02:43.000000000 -0500
69839+++ linux-2.6.32.48/kernel/softirq.c 2011-11-15 19:59:43.000000000 -0500
69840@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
69841
69842 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
69843
69844-char *softirq_to_name[NR_SOFTIRQS] = {
69845+const char * const softirq_to_name[NR_SOFTIRQS] = {
69846 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
69847 "TASKLET", "SCHED", "HRTIMER", "RCU"
69848 };
69849@@ -206,7 +206,7 @@ EXPORT_SYMBOL(local_bh_enable_ip);
69850
69851 asmlinkage void __do_softirq(void)
69852 {
69853- struct softirq_action *h;
69854+ const struct softirq_action *h;
69855 __u32 pending;
69856 int max_restart = MAX_SOFTIRQ_RESTART;
69857 int cpu;
69858@@ -233,7 +233,7 @@ restart:
69859 kstat_incr_softirqs_this_cpu(h - softirq_vec);
69860
69861 trace_softirq_entry(h, softirq_vec);
69862- h->action(h);
69863+ h->action();
69864 trace_softirq_exit(h, softirq_vec);
69865 if (unlikely(prev_count != preempt_count())) {
69866 printk(KERN_ERR "huh, entered softirq %td %s %p"
69867@@ -363,9 +363,11 @@ void raise_softirq(unsigned int nr)
69868 local_irq_restore(flags);
69869 }
69870
69871-void open_softirq(int nr, void (*action)(struct softirq_action *))
69872+void open_softirq(int nr, void (*action)(void))
69873 {
69874- softirq_vec[nr].action = action;
69875+ pax_open_kernel();
69876+ *(void **)&softirq_vec[nr].action = action;
69877+ pax_close_kernel();
69878 }
69879
69880 /*
69881@@ -419,7 +421,7 @@ void __tasklet_hi_schedule_first(struct
69882
69883 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
69884
69885-static void tasklet_action(struct softirq_action *a)
69886+static void tasklet_action(void)
69887 {
69888 struct tasklet_struct *list;
69889
69890@@ -454,7 +456,7 @@ static void tasklet_action(struct softir
69891 }
69892 }
69893
69894-static void tasklet_hi_action(struct softirq_action *a)
69895+static void tasklet_hi_action(void)
69896 {
69897 struct tasklet_struct *list;
69898
69899diff -urNp linux-2.6.32.48/kernel/sys.c linux-2.6.32.48/kernel/sys.c
69900--- linux-2.6.32.48/kernel/sys.c 2011-11-08 19:02:43.000000000 -0500
69901+++ linux-2.6.32.48/kernel/sys.c 2011-11-15 19:59:43.000000000 -0500
69902@@ -133,6 +133,12 @@ static int set_one_prio(struct task_stru
69903 error = -EACCES;
69904 goto out;
69905 }
69906+
69907+ if (gr_handle_chroot_setpriority(p, niceval)) {
69908+ error = -EACCES;
69909+ goto out;
69910+ }
69911+
69912 no_nice = security_task_setnice(p, niceval);
69913 if (no_nice) {
69914 error = no_nice;
69915@@ -190,10 +196,10 @@ SYSCALL_DEFINE3(setpriority, int, which,
69916 !(user = find_user(who)))
69917 goto out_unlock; /* No processes for this user */
69918
69919- do_each_thread(g, p)
69920+ do_each_thread(g, p) {
69921 if (__task_cred(p)->uid == who)
69922 error = set_one_prio(p, niceval, error);
69923- while_each_thread(g, p);
69924+ } while_each_thread(g, p);
69925 if (who != cred->uid)
69926 free_uid(user); /* For find_user() */
69927 break;
69928@@ -253,13 +259,13 @@ SYSCALL_DEFINE2(getpriority, int, which,
69929 !(user = find_user(who)))
69930 goto out_unlock; /* No processes for this user */
69931
69932- do_each_thread(g, p)
69933+ do_each_thread(g, p) {
69934 if (__task_cred(p)->uid == who) {
69935 niceval = 20 - task_nice(p);
69936 if (niceval > retval)
69937 retval = niceval;
69938 }
69939- while_each_thread(g, p);
69940+ } while_each_thread(g, p);
69941 if (who != cred->uid)
69942 free_uid(user); /* for find_user() */
69943 break;
69944@@ -509,6 +515,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
69945 goto error;
69946 }
69947
69948+ if (gr_check_group_change(new->gid, new->egid, -1))
69949+ goto error;
69950+
69951 if (rgid != (gid_t) -1 ||
69952 (egid != (gid_t) -1 && egid != old->gid))
69953 new->sgid = new->egid;
69954@@ -542,6 +551,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
69955 goto error;
69956
69957 retval = -EPERM;
69958+
69959+ if (gr_check_group_change(gid, gid, gid))
69960+ goto error;
69961+
69962 if (capable(CAP_SETGID))
69963 new->gid = new->egid = new->sgid = new->fsgid = gid;
69964 else if (gid == old->gid || gid == old->sgid)
69965@@ -567,12 +580,19 @@ static int set_user(struct cred *new)
69966 if (!new_user)
69967 return -EAGAIN;
69968
69969+ /*
69970+ * We don't fail in case of NPROC limit excess here because too many
69971+ * poorly written programs don't check set*uid() return code, assuming
69972+ * it never fails if called by root. We may still enforce NPROC limit
69973+ * for programs doing set*uid()+execve() by harmlessly deferring the
69974+ * failure to the execve() stage.
69975+ */
69976 if (atomic_read(&new_user->processes) >=
69977 current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
69978- new_user != INIT_USER) {
69979- free_uid(new_user);
69980- return -EAGAIN;
69981- }
69982+ new_user != INIT_USER)
69983+ current->flags |= PF_NPROC_EXCEEDED;
69984+ else
69985+ current->flags &= ~PF_NPROC_EXCEEDED;
69986
69987 free_uid(new->user);
69988 new->user = new_user;
69989@@ -627,6 +647,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
69990 goto error;
69991 }
69992
69993+ if (gr_check_user_change(new->uid, new->euid, -1))
69994+ goto error;
69995+
69996 if (new->uid != old->uid) {
69997 retval = set_user(new);
69998 if (retval < 0)
69999@@ -675,6 +698,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
70000 goto error;
70001
70002 retval = -EPERM;
70003+
70004+ if (gr_check_crash_uid(uid))
70005+ goto error;
70006+ if (gr_check_user_change(uid, uid, uid))
70007+ goto error;
70008+
70009 if (capable(CAP_SETUID)) {
70010 new->suid = new->uid = uid;
70011 if (uid != old->uid) {
70012@@ -732,6 +761,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
70013 goto error;
70014 }
70015
70016+ if (gr_check_user_change(ruid, euid, -1))
70017+ goto error;
70018+
70019 if (ruid != (uid_t) -1) {
70020 new->uid = ruid;
70021 if (ruid != old->uid) {
70022@@ -800,6 +832,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
70023 goto error;
70024 }
70025
70026+ if (gr_check_group_change(rgid, egid, -1))
70027+ goto error;
70028+
70029 if (rgid != (gid_t) -1)
70030 new->gid = rgid;
70031 if (egid != (gid_t) -1)
70032@@ -849,6 +884,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
70033 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0)
70034 goto error;
70035
70036+ if (gr_check_user_change(-1, -1, uid))
70037+ goto error;
70038+
70039 if (uid == old->uid || uid == old->euid ||
70040 uid == old->suid || uid == old->fsuid ||
70041 capable(CAP_SETUID)) {
70042@@ -889,6 +927,9 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
70043 if (gid == old->gid || gid == old->egid ||
70044 gid == old->sgid || gid == old->fsgid ||
70045 capable(CAP_SETGID)) {
70046+ if (gr_check_group_change(-1, -1, gid))
70047+ goto error;
70048+
70049 if (gid != old_fsgid) {
70050 new->fsgid = gid;
70051 goto change_okay;
70052@@ -1454,7 +1495,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
70053 error = get_dumpable(me->mm);
70054 break;
70055 case PR_SET_DUMPABLE:
70056- if (arg2 < 0 || arg2 > 1) {
70057+ if (arg2 > 1) {
70058 error = -EINVAL;
70059 break;
70060 }
70061diff -urNp linux-2.6.32.48/kernel/sysctl.c linux-2.6.32.48/kernel/sysctl.c
70062--- linux-2.6.32.48/kernel/sysctl.c 2011-11-08 19:02:43.000000000 -0500
70063+++ linux-2.6.32.48/kernel/sysctl.c 2011-11-15 19:59:43.000000000 -0500
70064@@ -63,6 +63,13 @@
70065 static int deprecated_sysctl_warning(struct __sysctl_args *args);
70066
70067 #if defined(CONFIG_SYSCTL)
70068+#include <linux/grsecurity.h>
70069+#include <linux/grinternal.h>
70070+
70071+extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
70072+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
70073+ const int op);
70074+extern int gr_handle_chroot_sysctl(const int op);
70075
70076 /* External variables not in a header file. */
70077 extern int C_A_D;
70078@@ -168,6 +175,7 @@ static int proc_do_cad_pid(struct ctl_ta
70079 static int proc_taint(struct ctl_table *table, int write,
70080 void __user *buffer, size_t *lenp, loff_t *ppos);
70081 #endif
70082+extern ctl_table grsecurity_table[];
70083
70084 static struct ctl_table root_table[];
70085 static struct ctl_table_root sysctl_table_root;
70086@@ -200,6 +208,21 @@ extern struct ctl_table epoll_table[];
70087 int sysctl_legacy_va_layout;
70088 #endif
70089
70090+#ifdef CONFIG_PAX_SOFTMODE
70091+static ctl_table pax_table[] = {
70092+ {
70093+ .ctl_name = CTL_UNNUMBERED,
70094+ .procname = "softmode",
70095+ .data = &pax_softmode,
70096+ .maxlen = sizeof(unsigned int),
70097+ .mode = 0600,
70098+ .proc_handler = &proc_dointvec,
70099+ },
70100+
70101+ { .ctl_name = 0 }
70102+};
70103+#endif
70104+
70105 extern int prove_locking;
70106 extern int lock_stat;
70107
70108@@ -251,6 +274,24 @@ static int max_wakeup_granularity_ns = N
70109 #endif
70110
70111 static struct ctl_table kern_table[] = {
70112+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
70113+ {
70114+ .ctl_name = CTL_UNNUMBERED,
70115+ .procname = "grsecurity",
70116+ .mode = 0500,
70117+ .child = grsecurity_table,
70118+ },
70119+#endif
70120+
70121+#ifdef CONFIG_PAX_SOFTMODE
70122+ {
70123+ .ctl_name = CTL_UNNUMBERED,
70124+ .procname = "pax",
70125+ .mode = 0500,
70126+ .child = pax_table,
70127+ },
70128+#endif
70129+
70130 {
70131 .ctl_name = CTL_UNNUMBERED,
70132 .procname = "sched_child_runs_first",
70133@@ -567,8 +608,8 @@ static struct ctl_table kern_table[] = {
70134 .data = &modprobe_path,
70135 .maxlen = KMOD_PATH_LEN,
70136 .mode = 0644,
70137- .proc_handler = &proc_dostring,
70138- .strategy = &sysctl_string,
70139+ .proc_handler = &proc_dostring_modpriv,
70140+ .strategy = &sysctl_string_modpriv,
70141 },
70142 {
70143 .ctl_name = CTL_UNNUMBERED,
70144@@ -1247,6 +1288,13 @@ static struct ctl_table vm_table[] = {
70145 .mode = 0644,
70146 .proc_handler = &proc_dointvec
70147 },
70148+ {
70149+ .procname = "heap_stack_gap",
70150+ .data = &sysctl_heap_stack_gap,
70151+ .maxlen = sizeof(sysctl_heap_stack_gap),
70152+ .mode = 0644,
70153+ .proc_handler = proc_doulongvec_minmax,
70154+ },
70155 #else
70156 {
70157 .ctl_name = CTL_UNNUMBERED,
70158@@ -1803,6 +1851,8 @@ static int do_sysctl_strategy(struct ctl
70159 return 0;
70160 }
70161
70162+static int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op);
70163+
70164 static int parse_table(int __user *name, int nlen,
70165 void __user *oldval, size_t __user *oldlenp,
70166 void __user *newval, size_t newlen,
70167@@ -1821,7 +1871,7 @@ repeat:
70168 if (n == table->ctl_name) {
70169 int error;
70170 if (table->child) {
70171- if (sysctl_perm(root, table, MAY_EXEC))
70172+ if (sysctl_perm_nochk(root, table, MAY_EXEC))
70173 return -EPERM;
70174 name++;
70175 nlen--;
70176@@ -1906,6 +1956,33 @@ int sysctl_perm(struct ctl_table_root *r
70177 int error;
70178 int mode;
70179
70180+ if (table->parent != NULL && table->parent->procname != NULL &&
70181+ table->procname != NULL &&
70182+ gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
70183+ return -EACCES;
70184+ if (gr_handle_chroot_sysctl(op))
70185+ return -EACCES;
70186+ error = gr_handle_sysctl(table, op);
70187+ if (error)
70188+ return error;
70189+
70190+ error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
70191+ if (error)
70192+ return error;
70193+
70194+ if (root->permissions)
70195+ mode = root->permissions(root, current->nsproxy, table);
70196+ else
70197+ mode = table->mode;
70198+
70199+ return test_perm(mode, op);
70200+}
70201+
70202+int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op)
70203+{
70204+ int error;
70205+ int mode;
70206+
70207 error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
70208 if (error)
70209 return error;
70210@@ -2335,6 +2412,16 @@ int proc_dostring(struct ctl_table *tabl
70211 buffer, lenp, ppos);
70212 }
70213
70214+int proc_dostring_modpriv(struct ctl_table *table, int write,
70215+ void __user *buffer, size_t *lenp, loff_t *ppos)
70216+{
70217+ if (write && !capable(CAP_SYS_MODULE))
70218+ return -EPERM;
70219+
70220+ return _proc_do_string(table->data, table->maxlen, write,
70221+ buffer, lenp, ppos);
70222+}
70223+
70224
70225 static int do_proc_dointvec_conv(int *negp, unsigned long *lvalp,
70226 int *valp,
70227@@ -2609,7 +2696,7 @@ static int __do_proc_doulongvec_minmax(v
70228 vleft = table->maxlen / sizeof(unsigned long);
70229 left = *lenp;
70230
70231- for (; left && vleft--; i++, min++, max++, first=0) {
70232+ for (; left && vleft--; i++, first=0) {
70233 if (write) {
70234 while (left) {
70235 char c;
70236@@ -2910,6 +2997,12 @@ int proc_dostring(struct ctl_table *tabl
70237 return -ENOSYS;
70238 }
70239
70240+int proc_dostring_modpriv(struct ctl_table *table, int write,
70241+ void __user *buffer, size_t *lenp, loff_t *ppos)
70242+{
70243+ return -ENOSYS;
70244+}
70245+
70246 int proc_dointvec(struct ctl_table *table, int write,
70247 void __user *buffer, size_t *lenp, loff_t *ppos)
70248 {
70249@@ -3038,6 +3131,16 @@ int sysctl_string(struct ctl_table *tabl
70250 return 1;
70251 }
70252
70253+int sysctl_string_modpriv(struct ctl_table *table,
70254+ void __user *oldval, size_t __user *oldlenp,
70255+ void __user *newval, size_t newlen)
70256+{
70257+ if (newval && newlen && !capable(CAP_SYS_MODULE))
70258+ return -EPERM;
70259+
70260+ return sysctl_string(table, oldval, oldlenp, newval, newlen);
70261+}
70262+
70263 /*
70264 * This function makes sure that all of the integers in the vector
70265 * are between the minimum and maximum values given in the arrays
70266@@ -3182,6 +3285,13 @@ int sysctl_string(struct ctl_table *tabl
70267 return -ENOSYS;
70268 }
70269
70270+int sysctl_string_modpriv(struct ctl_table *table,
70271+ void __user *oldval, size_t __user *oldlenp,
70272+ void __user *newval, size_t newlen)
70273+{
70274+ return -ENOSYS;
70275+}
70276+
70277 int sysctl_intvec(struct ctl_table *table,
70278 void __user *oldval, size_t __user *oldlenp,
70279 void __user *newval, size_t newlen)
70280@@ -3246,6 +3356,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
70281 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
70282 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
70283 EXPORT_SYMBOL(proc_dostring);
70284+EXPORT_SYMBOL(proc_dostring_modpriv);
70285 EXPORT_SYMBOL(proc_doulongvec_minmax);
70286 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
70287 EXPORT_SYMBOL(register_sysctl_table);
70288@@ -3254,5 +3365,6 @@ EXPORT_SYMBOL(sysctl_intvec);
70289 EXPORT_SYMBOL(sysctl_jiffies);
70290 EXPORT_SYMBOL(sysctl_ms_jiffies);
70291 EXPORT_SYMBOL(sysctl_string);
70292+EXPORT_SYMBOL(sysctl_string_modpriv);
70293 EXPORT_SYMBOL(sysctl_data);
70294 EXPORT_SYMBOL(unregister_sysctl_table);
70295diff -urNp linux-2.6.32.48/kernel/sysctl_check.c linux-2.6.32.48/kernel/sysctl_check.c
70296--- linux-2.6.32.48/kernel/sysctl_check.c 2011-11-08 19:02:43.000000000 -0500
70297+++ linux-2.6.32.48/kernel/sysctl_check.c 2011-11-15 19:59:43.000000000 -0500
70298@@ -1489,10 +1489,12 @@ int sysctl_check_table(struct nsproxy *n
70299 } else {
70300 if ((table->strategy == sysctl_data) ||
70301 (table->strategy == sysctl_string) ||
70302+ (table->strategy == sysctl_string_modpriv) ||
70303 (table->strategy == sysctl_intvec) ||
70304 (table->strategy == sysctl_jiffies) ||
70305 (table->strategy == sysctl_ms_jiffies) ||
70306 (table->proc_handler == proc_dostring) ||
70307+ (table->proc_handler == proc_dostring_modpriv) ||
70308 (table->proc_handler == proc_dointvec) ||
70309 (table->proc_handler == proc_dointvec_minmax) ||
70310 (table->proc_handler == proc_dointvec_jiffies) ||
70311diff -urNp linux-2.6.32.48/kernel/taskstats.c linux-2.6.32.48/kernel/taskstats.c
70312--- linux-2.6.32.48/kernel/taskstats.c 2011-11-08 19:02:43.000000000 -0500
70313+++ linux-2.6.32.48/kernel/taskstats.c 2011-11-15 19:59:43.000000000 -0500
70314@@ -26,9 +26,12 @@
70315 #include <linux/cgroup.h>
70316 #include <linux/fs.h>
70317 #include <linux/file.h>
70318+#include <linux/grsecurity.h>
70319 #include <net/genetlink.h>
70320 #include <asm/atomic.h>
70321
70322+extern int gr_is_taskstats_denied(int pid);
70323+
70324 /*
70325 * Maximum length of a cpumask that can be specified in
70326 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
70327@@ -442,6 +445,9 @@ static int taskstats_user_cmd(struct sk_
70328 size_t size;
70329 cpumask_var_t mask;
70330
70331+ if (gr_is_taskstats_denied(current->pid))
70332+ return -EACCES;
70333+
70334 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
70335 return -ENOMEM;
70336
70337diff -urNp linux-2.6.32.48/kernel/time/tick-broadcast.c linux-2.6.32.48/kernel/time/tick-broadcast.c
70338--- linux-2.6.32.48/kernel/time/tick-broadcast.c 2011-11-08 19:02:43.000000000 -0500
70339+++ linux-2.6.32.48/kernel/time/tick-broadcast.c 2011-11-15 19:59:43.000000000 -0500
70340@@ -116,7 +116,7 @@ int tick_device_uses_broadcast(struct cl
70341 * then clear the broadcast bit.
70342 */
70343 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
70344- int cpu = smp_processor_id();
70345+ cpu = smp_processor_id();
70346
70347 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
70348 tick_broadcast_clear_oneshot(cpu);
70349diff -urNp linux-2.6.32.48/kernel/time/timekeeping.c linux-2.6.32.48/kernel/time/timekeeping.c
70350--- linux-2.6.32.48/kernel/time/timekeeping.c 2011-11-08 19:02:43.000000000 -0500
70351+++ linux-2.6.32.48/kernel/time/timekeeping.c 2011-11-15 19:59:43.000000000 -0500
70352@@ -14,6 +14,7 @@
70353 #include <linux/init.h>
70354 #include <linux/mm.h>
70355 #include <linux/sched.h>
70356+#include <linux/grsecurity.h>
70357 #include <linux/sysdev.h>
70358 #include <linux/clocksource.h>
70359 #include <linux/jiffies.h>
70360@@ -180,7 +181,7 @@ void update_xtime_cache(u64 nsec)
70361 */
70362 struct timespec ts = xtime;
70363 timespec_add_ns(&ts, nsec);
70364- ACCESS_ONCE(xtime_cache) = ts;
70365+ ACCESS_ONCE_RW(xtime_cache) = ts;
70366 }
70367
70368 /* must hold xtime_lock */
70369@@ -333,6 +334,8 @@ int do_settimeofday(struct timespec *tv)
70370 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
70371 return -EINVAL;
70372
70373+ gr_log_timechange();
70374+
70375 write_seqlock_irqsave(&xtime_lock, flags);
70376
70377 timekeeping_forward_now();
70378diff -urNp linux-2.6.32.48/kernel/time/timer_list.c linux-2.6.32.48/kernel/time/timer_list.c
70379--- linux-2.6.32.48/kernel/time/timer_list.c 2011-11-08 19:02:43.000000000 -0500
70380+++ linux-2.6.32.48/kernel/time/timer_list.c 2011-11-15 19:59:43.000000000 -0500
70381@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
70382
70383 static void print_name_offset(struct seq_file *m, void *sym)
70384 {
70385+#ifdef CONFIG_GRKERNSEC_HIDESYM
70386+ SEQ_printf(m, "<%p>", NULL);
70387+#else
70388 char symname[KSYM_NAME_LEN];
70389
70390 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
70391 SEQ_printf(m, "<%p>", sym);
70392 else
70393 SEQ_printf(m, "%s", symname);
70394+#endif
70395 }
70396
70397 static void
70398@@ -112,7 +116,11 @@ next_one:
70399 static void
70400 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
70401 {
70402+#ifdef CONFIG_GRKERNSEC_HIDESYM
70403+ SEQ_printf(m, " .base: %p\n", NULL);
70404+#else
70405 SEQ_printf(m, " .base: %p\n", base);
70406+#endif
70407 SEQ_printf(m, " .index: %d\n",
70408 base->index);
70409 SEQ_printf(m, " .resolution: %Lu nsecs\n",
70410@@ -289,7 +297,11 @@ static int __init init_timer_list_procfs
70411 {
70412 struct proc_dir_entry *pe;
70413
70414+#ifdef CONFIG_GRKERNSEC_PROC_ADD
70415+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
70416+#else
70417 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
70418+#endif
70419 if (!pe)
70420 return -ENOMEM;
70421 return 0;
70422diff -urNp linux-2.6.32.48/kernel/time/timer_stats.c linux-2.6.32.48/kernel/time/timer_stats.c
70423--- linux-2.6.32.48/kernel/time/timer_stats.c 2011-11-08 19:02:43.000000000 -0500
70424+++ linux-2.6.32.48/kernel/time/timer_stats.c 2011-11-15 19:59:43.000000000 -0500
70425@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
70426 static unsigned long nr_entries;
70427 static struct entry entries[MAX_ENTRIES];
70428
70429-static atomic_t overflow_count;
70430+static atomic_unchecked_t overflow_count;
70431
70432 /*
70433 * The entries are in a hash-table, for fast lookup:
70434@@ -140,7 +140,7 @@ static void reset_entries(void)
70435 nr_entries = 0;
70436 memset(entries, 0, sizeof(entries));
70437 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
70438- atomic_set(&overflow_count, 0);
70439+ atomic_set_unchecked(&overflow_count, 0);
70440 }
70441
70442 static struct entry *alloc_entry(void)
70443@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
70444 if (likely(entry))
70445 entry->count++;
70446 else
70447- atomic_inc(&overflow_count);
70448+ atomic_inc_unchecked(&overflow_count);
70449
70450 out_unlock:
70451 spin_unlock_irqrestore(lock, flags);
70452@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
70453
70454 static void print_name_offset(struct seq_file *m, unsigned long addr)
70455 {
70456+#ifdef CONFIG_GRKERNSEC_HIDESYM
70457+ seq_printf(m, "<%p>", NULL);
70458+#else
70459 char symname[KSYM_NAME_LEN];
70460
70461 if (lookup_symbol_name(addr, symname) < 0)
70462 seq_printf(m, "<%p>", (void *)addr);
70463 else
70464 seq_printf(m, "%s", symname);
70465+#endif
70466 }
70467
70468 static int tstats_show(struct seq_file *m, void *v)
70469@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
70470
70471 seq_puts(m, "Timer Stats Version: v0.2\n");
70472 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
70473- if (atomic_read(&overflow_count))
70474+ if (atomic_read_unchecked(&overflow_count))
70475 seq_printf(m, "Overflow: %d entries\n",
70476- atomic_read(&overflow_count));
70477+ atomic_read_unchecked(&overflow_count));
70478
70479 for (i = 0; i < nr_entries; i++) {
70480 entry = entries + i;
70481@@ -415,7 +419,11 @@ static int __init init_tstats_procfs(voi
70482 {
70483 struct proc_dir_entry *pe;
70484
70485+#ifdef CONFIG_GRKERNSEC_PROC_ADD
70486+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
70487+#else
70488 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
70489+#endif
70490 if (!pe)
70491 return -ENOMEM;
70492 return 0;
70493diff -urNp linux-2.6.32.48/kernel/time.c linux-2.6.32.48/kernel/time.c
70494--- linux-2.6.32.48/kernel/time.c 2011-11-08 19:02:43.000000000 -0500
70495+++ linux-2.6.32.48/kernel/time.c 2011-11-15 19:59:43.000000000 -0500
70496@@ -165,6 +165,11 @@ int do_sys_settimeofday(struct timespec
70497 return error;
70498
70499 if (tz) {
70500+ /* we log in do_settimeofday called below, so don't log twice
70501+ */
70502+ if (!tv)
70503+ gr_log_timechange();
70504+
70505 /* SMP safe, global irq locking makes it work. */
70506 sys_tz = *tz;
70507 update_vsyscall_tz();
70508@@ -240,7 +245,7 @@ EXPORT_SYMBOL(current_fs_time);
70509 * Avoid unnecessary multiplications/divisions in the
70510 * two most common HZ cases:
70511 */
70512-unsigned int inline jiffies_to_msecs(const unsigned long j)
70513+inline unsigned int jiffies_to_msecs(const unsigned long j)
70514 {
70515 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
70516 return (MSEC_PER_SEC / HZ) * j;
70517@@ -256,7 +261,7 @@ unsigned int inline jiffies_to_msecs(con
70518 }
70519 EXPORT_SYMBOL(jiffies_to_msecs);
70520
70521-unsigned int inline jiffies_to_usecs(const unsigned long j)
70522+inline unsigned int jiffies_to_usecs(const unsigned long j)
70523 {
70524 #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
70525 return (USEC_PER_SEC / HZ) * j;
70526diff -urNp linux-2.6.32.48/kernel/timer.c linux-2.6.32.48/kernel/timer.c
70527--- linux-2.6.32.48/kernel/timer.c 2011-11-08 19:02:43.000000000 -0500
70528+++ linux-2.6.32.48/kernel/timer.c 2011-11-15 19:59:43.000000000 -0500
70529@@ -1213,7 +1213,7 @@ void update_process_times(int user_tick)
70530 /*
70531 * This function runs timers and the timer-tq in bottom half context.
70532 */
70533-static void run_timer_softirq(struct softirq_action *h)
70534+static void run_timer_softirq(void)
70535 {
70536 struct tvec_base *base = __get_cpu_var(tvec_bases);
70537
70538diff -urNp linux-2.6.32.48/kernel/trace/blktrace.c linux-2.6.32.48/kernel/trace/blktrace.c
70539--- linux-2.6.32.48/kernel/trace/blktrace.c 2011-11-08 19:02:43.000000000 -0500
70540+++ linux-2.6.32.48/kernel/trace/blktrace.c 2011-11-15 19:59:43.000000000 -0500
70541@@ -313,7 +313,7 @@ static ssize_t blk_dropped_read(struct f
70542 struct blk_trace *bt = filp->private_data;
70543 char buf[16];
70544
70545- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
70546+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
70547
70548 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
70549 }
70550@@ -376,7 +376,7 @@ static int blk_subbuf_start_callback(str
70551 return 1;
70552
70553 bt = buf->chan->private_data;
70554- atomic_inc(&bt->dropped);
70555+ atomic_inc_unchecked(&bt->dropped);
70556 return 0;
70557 }
70558
70559@@ -477,7 +477,7 @@ int do_blk_trace_setup(struct request_qu
70560
70561 bt->dir = dir;
70562 bt->dev = dev;
70563- atomic_set(&bt->dropped, 0);
70564+ atomic_set_unchecked(&bt->dropped, 0);
70565
70566 ret = -EIO;
70567 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
70568diff -urNp linux-2.6.32.48/kernel/trace/ftrace.c linux-2.6.32.48/kernel/trace/ftrace.c
70569--- linux-2.6.32.48/kernel/trace/ftrace.c 2011-11-08 19:02:43.000000000 -0500
70570+++ linux-2.6.32.48/kernel/trace/ftrace.c 2011-11-15 19:59:43.000000000 -0500
70571@@ -1100,13 +1100,18 @@ ftrace_code_disable(struct module *mod,
70572
70573 ip = rec->ip;
70574
70575+ ret = ftrace_arch_code_modify_prepare();
70576+ FTRACE_WARN_ON(ret);
70577+ if (ret)
70578+ return 0;
70579+
70580 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
70581+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
70582 if (ret) {
70583 ftrace_bug(ret, ip);
70584 rec->flags |= FTRACE_FL_FAILED;
70585- return 0;
70586 }
70587- return 1;
70588+ return ret ? 0 : 1;
70589 }
70590
70591 /*
70592diff -urNp linux-2.6.32.48/kernel/trace/ring_buffer.c linux-2.6.32.48/kernel/trace/ring_buffer.c
70593--- linux-2.6.32.48/kernel/trace/ring_buffer.c 2011-11-08 19:02:43.000000000 -0500
70594+++ linux-2.6.32.48/kernel/trace/ring_buffer.c 2011-11-15 19:59:43.000000000 -0500
70595@@ -606,7 +606,7 @@ static struct list_head *rb_list_head(st
70596 * the reader page). But if the next page is a header page,
70597 * its flags will be non zero.
70598 */
70599-static int inline
70600+static inline int
70601 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
70602 struct buffer_page *page, struct list_head *list)
70603 {
70604diff -urNp linux-2.6.32.48/kernel/trace/trace.c linux-2.6.32.48/kernel/trace/trace.c
70605--- linux-2.6.32.48/kernel/trace/trace.c 2011-11-08 19:02:43.000000000 -0500
70606+++ linux-2.6.32.48/kernel/trace/trace.c 2011-11-15 19:59:43.000000000 -0500
70607@@ -3193,6 +3193,8 @@ static ssize_t tracing_splice_read_pipe(
70608 size_t rem;
70609 unsigned int i;
70610
70611+ pax_track_stack();
70612+
70613 /* copy the tracer to avoid using a global lock all around */
70614 mutex_lock(&trace_types_lock);
70615 if (unlikely(old_tracer != current_trace && current_trace)) {
70616@@ -3659,6 +3661,8 @@ tracing_buffers_splice_read(struct file
70617 int entries, size, i;
70618 size_t ret;
70619
70620+ pax_track_stack();
70621+
70622 if (*ppos & (PAGE_SIZE - 1)) {
70623 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
70624 return -EINVAL;
70625@@ -3816,10 +3820,9 @@ static const struct file_operations trac
70626 };
70627 #endif
70628
70629-static struct dentry *d_tracer;
70630-
70631 struct dentry *tracing_init_dentry(void)
70632 {
70633+ static struct dentry *d_tracer;
70634 static int once;
70635
70636 if (d_tracer)
70637@@ -3839,10 +3842,9 @@ struct dentry *tracing_init_dentry(void)
70638 return d_tracer;
70639 }
70640
70641-static struct dentry *d_percpu;
70642-
70643 struct dentry *tracing_dentry_percpu(void)
70644 {
70645+ static struct dentry *d_percpu;
70646 static int once;
70647 struct dentry *d_tracer;
70648
70649diff -urNp linux-2.6.32.48/kernel/trace/trace_events.c linux-2.6.32.48/kernel/trace/trace_events.c
70650--- linux-2.6.32.48/kernel/trace/trace_events.c 2011-11-08 19:02:43.000000000 -0500
70651+++ linux-2.6.32.48/kernel/trace/trace_events.c 2011-11-15 19:59:43.000000000 -0500
70652@@ -951,13 +951,10 @@ static LIST_HEAD(ftrace_module_file_list
70653 * Modules must own their file_operations to keep up with
70654 * reference counting.
70655 */
70656+
70657 struct ftrace_module_file_ops {
70658 struct list_head list;
70659 struct module *mod;
70660- struct file_operations id;
70661- struct file_operations enable;
70662- struct file_operations format;
70663- struct file_operations filter;
70664 };
70665
70666 static void remove_subsystem_dir(const char *name)
70667@@ -1004,17 +1001,12 @@ trace_create_file_ops(struct module *mod
70668
70669 file_ops->mod = mod;
70670
70671- file_ops->id = ftrace_event_id_fops;
70672- file_ops->id.owner = mod;
70673-
70674- file_ops->enable = ftrace_enable_fops;
70675- file_ops->enable.owner = mod;
70676-
70677- file_ops->filter = ftrace_event_filter_fops;
70678- file_ops->filter.owner = mod;
70679-
70680- file_ops->format = ftrace_event_format_fops;
70681- file_ops->format.owner = mod;
70682+ pax_open_kernel();
70683+ *(void **)&mod->trace_id.owner = mod;
70684+ *(void **)&mod->trace_enable.owner = mod;
70685+ *(void **)&mod->trace_filter.owner = mod;
70686+ *(void **)&mod->trace_format.owner = mod;
70687+ pax_close_kernel();
70688
70689 list_add(&file_ops->list, &ftrace_module_file_list);
70690
70691@@ -1063,8 +1055,8 @@ static void trace_module_add_events(stru
70692 call->mod = mod;
70693 list_add(&call->list, &ftrace_events);
70694 event_create_dir(call, d_events,
70695- &file_ops->id, &file_ops->enable,
70696- &file_ops->filter, &file_ops->format);
70697+ &mod->trace_id, &mod->trace_enable,
70698+ &mod->trace_filter, &mod->trace_format);
70699 }
70700 }
70701
70702diff -urNp linux-2.6.32.48/kernel/trace/trace_mmiotrace.c linux-2.6.32.48/kernel/trace/trace_mmiotrace.c
70703--- linux-2.6.32.48/kernel/trace/trace_mmiotrace.c 2011-11-08 19:02:43.000000000 -0500
70704+++ linux-2.6.32.48/kernel/trace/trace_mmiotrace.c 2011-11-15 19:59:43.000000000 -0500
70705@@ -23,7 +23,7 @@ struct header_iter {
70706 static struct trace_array *mmio_trace_array;
70707 static bool overrun_detected;
70708 static unsigned long prev_overruns;
70709-static atomic_t dropped_count;
70710+static atomic_unchecked_t dropped_count;
70711
70712 static void mmio_reset_data(struct trace_array *tr)
70713 {
70714@@ -126,7 +126,7 @@ static void mmio_close(struct trace_iter
70715
70716 static unsigned long count_overruns(struct trace_iterator *iter)
70717 {
70718- unsigned long cnt = atomic_xchg(&dropped_count, 0);
70719+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
70720 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
70721
70722 if (over > prev_overruns)
70723@@ -316,7 +316,7 @@ static void __trace_mmiotrace_rw(struct
70724 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
70725 sizeof(*entry), 0, pc);
70726 if (!event) {
70727- atomic_inc(&dropped_count);
70728+ atomic_inc_unchecked(&dropped_count);
70729 return;
70730 }
70731 entry = ring_buffer_event_data(event);
70732@@ -346,7 +346,7 @@ static void __trace_mmiotrace_map(struct
70733 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
70734 sizeof(*entry), 0, pc);
70735 if (!event) {
70736- atomic_inc(&dropped_count);
70737+ atomic_inc_unchecked(&dropped_count);
70738 return;
70739 }
70740 entry = ring_buffer_event_data(event);
70741diff -urNp linux-2.6.32.48/kernel/trace/trace_output.c linux-2.6.32.48/kernel/trace/trace_output.c
70742--- linux-2.6.32.48/kernel/trace/trace_output.c 2011-11-08 19:02:43.000000000 -0500
70743+++ linux-2.6.32.48/kernel/trace/trace_output.c 2011-11-15 19:59:43.000000000 -0500
70744@@ -237,7 +237,7 @@ int trace_seq_path(struct trace_seq *s,
70745 return 0;
70746 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
70747 if (!IS_ERR(p)) {
70748- p = mangle_path(s->buffer + s->len, p, "\n");
70749+ p = mangle_path(s->buffer + s->len, p, "\n\\");
70750 if (p) {
70751 s->len = p - s->buffer;
70752 return 1;
70753diff -urNp linux-2.6.32.48/kernel/trace/trace_stack.c linux-2.6.32.48/kernel/trace/trace_stack.c
70754--- linux-2.6.32.48/kernel/trace/trace_stack.c 2011-11-08 19:02:43.000000000 -0500
70755+++ linux-2.6.32.48/kernel/trace/trace_stack.c 2011-11-15 19:59:43.000000000 -0500
70756@@ -50,7 +50,7 @@ static inline void check_stack(void)
70757 return;
70758
70759 /* we do not handle interrupt stacks yet */
70760- if (!object_is_on_stack(&this_size))
70761+ if (!object_starts_on_stack(&this_size))
70762 return;
70763
70764 local_irq_save(flags);
70765diff -urNp linux-2.6.32.48/kernel/trace/trace_workqueue.c linux-2.6.32.48/kernel/trace/trace_workqueue.c
70766--- linux-2.6.32.48/kernel/trace/trace_workqueue.c 2011-11-08 19:02:43.000000000 -0500
70767+++ linux-2.6.32.48/kernel/trace/trace_workqueue.c 2011-11-15 19:59:43.000000000 -0500
70768@@ -21,7 +21,7 @@ struct cpu_workqueue_stats {
70769 int cpu;
70770 pid_t pid;
70771 /* Can be inserted from interrupt or user context, need to be atomic */
70772- atomic_t inserted;
70773+ atomic_unchecked_t inserted;
70774 /*
70775 * Don't need to be atomic, works are serialized in a single workqueue thread
70776 * on a single CPU.
70777@@ -58,7 +58,7 @@ probe_workqueue_insertion(struct task_st
70778 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
70779 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
70780 if (node->pid == wq_thread->pid) {
70781- atomic_inc(&node->inserted);
70782+ atomic_inc_unchecked(&node->inserted);
70783 goto found;
70784 }
70785 }
70786@@ -205,7 +205,7 @@ static int workqueue_stat_show(struct se
70787 tsk = get_pid_task(pid, PIDTYPE_PID);
70788 if (tsk) {
70789 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
70790- atomic_read(&cws->inserted), cws->executed,
70791+ atomic_read_unchecked(&cws->inserted), cws->executed,
70792 tsk->comm);
70793 put_task_struct(tsk);
70794 }
70795diff -urNp linux-2.6.32.48/kernel/user.c linux-2.6.32.48/kernel/user.c
70796--- linux-2.6.32.48/kernel/user.c 2011-11-08 19:02:43.000000000 -0500
70797+++ linux-2.6.32.48/kernel/user.c 2011-11-15 19:59:43.000000000 -0500
70798@@ -159,6 +159,7 @@ struct user_struct *alloc_uid(struct use
70799 spin_lock_irq(&uidhash_lock);
70800 up = uid_hash_find(uid, hashent);
70801 if (up) {
70802+ put_user_ns(ns);
70803 key_put(new->uid_keyring);
70804 key_put(new->session_keyring);
70805 kmem_cache_free(uid_cachep, new);
70806diff -urNp linux-2.6.32.48/lib/bitmap.c linux-2.6.32.48/lib/bitmap.c
70807--- linux-2.6.32.48/lib/bitmap.c 2011-11-08 19:02:43.000000000 -0500
70808+++ linux-2.6.32.48/lib/bitmap.c 2011-11-15 19:59:43.000000000 -0500
70809@@ -341,7 +341,7 @@ int __bitmap_parse(const char *buf, unsi
70810 {
70811 int c, old_c, totaldigits, ndigits, nchunks, nbits;
70812 u32 chunk;
70813- const char __user *ubuf = buf;
70814+ const char __user *ubuf = (const char __force_user *)buf;
70815
70816 bitmap_zero(maskp, nmaskbits);
70817
70818@@ -426,7 +426,7 @@ int bitmap_parse_user(const char __user
70819 {
70820 if (!access_ok(VERIFY_READ, ubuf, ulen))
70821 return -EFAULT;
70822- return __bitmap_parse((const char *)ubuf, ulen, 1, maskp, nmaskbits);
70823+ return __bitmap_parse((const char __force_kernel *)ubuf, ulen, 1, maskp, nmaskbits);
70824 }
70825 EXPORT_SYMBOL(bitmap_parse_user);
70826
70827diff -urNp linux-2.6.32.48/lib/bug.c linux-2.6.32.48/lib/bug.c
70828--- linux-2.6.32.48/lib/bug.c 2011-11-08 19:02:43.000000000 -0500
70829+++ linux-2.6.32.48/lib/bug.c 2011-11-15 19:59:43.000000000 -0500
70830@@ -135,6 +135,8 @@ enum bug_trap_type report_bug(unsigned l
70831 return BUG_TRAP_TYPE_NONE;
70832
70833 bug = find_bug(bugaddr);
70834+ if (!bug)
70835+ return BUG_TRAP_TYPE_NONE;
70836
70837 printk(KERN_EMERG "------------[ cut here ]------------\n");
70838
70839diff -urNp linux-2.6.32.48/lib/debugobjects.c linux-2.6.32.48/lib/debugobjects.c
70840--- linux-2.6.32.48/lib/debugobjects.c 2011-11-08 19:02:43.000000000 -0500
70841+++ linux-2.6.32.48/lib/debugobjects.c 2011-11-15 19:59:43.000000000 -0500
70842@@ -277,7 +277,7 @@ static void debug_object_is_on_stack(voi
70843 if (limit > 4)
70844 return;
70845
70846- is_on_stack = object_is_on_stack(addr);
70847+ is_on_stack = object_starts_on_stack(addr);
70848 if (is_on_stack == onstack)
70849 return;
70850
70851diff -urNp linux-2.6.32.48/lib/devres.c linux-2.6.32.48/lib/devres.c
70852--- linux-2.6.32.48/lib/devres.c 2011-11-08 19:02:43.000000000 -0500
70853+++ linux-2.6.32.48/lib/devres.c 2011-11-15 19:59:43.000000000 -0500
70854@@ -80,7 +80,7 @@ void devm_iounmap(struct device *dev, vo
70855 {
70856 iounmap(addr);
70857 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
70858- (void *)addr));
70859+ (void __force *)addr));
70860 }
70861 EXPORT_SYMBOL(devm_iounmap);
70862
70863@@ -140,7 +140,7 @@ void devm_ioport_unmap(struct device *de
70864 {
70865 ioport_unmap(addr);
70866 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
70867- devm_ioport_map_match, (void *)addr));
70868+ devm_ioport_map_match, (void __force *)addr));
70869 }
70870 EXPORT_SYMBOL(devm_ioport_unmap);
70871
70872diff -urNp linux-2.6.32.48/lib/dma-debug.c linux-2.6.32.48/lib/dma-debug.c
70873--- linux-2.6.32.48/lib/dma-debug.c 2011-11-08 19:02:43.000000000 -0500
70874+++ linux-2.6.32.48/lib/dma-debug.c 2011-11-15 19:59:43.000000000 -0500
70875@@ -861,7 +861,7 @@ out:
70876
70877 static void check_for_stack(struct device *dev, void *addr)
70878 {
70879- if (object_is_on_stack(addr))
70880+ if (object_starts_on_stack(addr))
70881 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
70882 "stack [addr=%p]\n", addr);
70883 }
70884diff -urNp linux-2.6.32.48/lib/idr.c linux-2.6.32.48/lib/idr.c
70885--- linux-2.6.32.48/lib/idr.c 2011-11-08 19:02:43.000000000 -0500
70886+++ linux-2.6.32.48/lib/idr.c 2011-11-15 19:59:43.000000000 -0500
70887@@ -156,7 +156,7 @@ static int sub_alloc(struct idr *idp, in
70888 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
70889
70890 /* if already at the top layer, we need to grow */
70891- if (id >= 1 << (idp->layers * IDR_BITS)) {
70892+ if (id >= (1 << (idp->layers * IDR_BITS))) {
70893 *starting_id = id;
70894 return IDR_NEED_TO_GROW;
70895 }
70896diff -urNp linux-2.6.32.48/lib/inflate.c linux-2.6.32.48/lib/inflate.c
70897--- linux-2.6.32.48/lib/inflate.c 2011-11-08 19:02:43.000000000 -0500
70898+++ linux-2.6.32.48/lib/inflate.c 2011-11-15 19:59:43.000000000 -0500
70899@@ -266,7 +266,7 @@ static void free(void *where)
70900 malloc_ptr = free_mem_ptr;
70901 }
70902 #else
70903-#define malloc(a) kmalloc(a, GFP_KERNEL)
70904+#define malloc(a) kmalloc((a), GFP_KERNEL)
70905 #define free(a) kfree(a)
70906 #endif
70907
70908diff -urNp linux-2.6.32.48/lib/Kconfig.debug linux-2.6.32.48/lib/Kconfig.debug
70909--- linux-2.6.32.48/lib/Kconfig.debug 2011-11-08 19:02:43.000000000 -0500
70910+++ linux-2.6.32.48/lib/Kconfig.debug 2011-11-15 19:59:43.000000000 -0500
70911@@ -905,7 +905,7 @@ config LATENCYTOP
70912 select STACKTRACE
70913 select SCHEDSTATS
70914 select SCHED_DEBUG
70915- depends on HAVE_LATENCYTOP_SUPPORT
70916+ depends on HAVE_LATENCYTOP_SUPPORT && !GRKERNSEC_HIDESYM
70917 help
70918 Enable this option if you want to use the LatencyTOP tool
70919 to find out which userspace is blocking on what kernel operations.
70920diff -urNp linux-2.6.32.48/lib/kobject.c linux-2.6.32.48/lib/kobject.c
70921--- linux-2.6.32.48/lib/kobject.c 2011-11-08 19:02:43.000000000 -0500
70922+++ linux-2.6.32.48/lib/kobject.c 2011-11-15 19:59:43.000000000 -0500
70923@@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct ko
70924 return ret;
70925 }
70926
70927-struct sysfs_ops kobj_sysfs_ops = {
70928+const struct sysfs_ops kobj_sysfs_ops = {
70929 .show = kobj_attr_show,
70930 .store = kobj_attr_store,
70931 };
70932@@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = {
70933 * If the kset was not able to be created, NULL will be returned.
70934 */
70935 static struct kset *kset_create(const char *name,
70936- struct kset_uevent_ops *uevent_ops,
70937+ const struct kset_uevent_ops *uevent_ops,
70938 struct kobject *parent_kobj)
70939 {
70940 struct kset *kset;
70941@@ -832,7 +832,7 @@ static struct kset *kset_create(const ch
70942 * If the kset was not able to be created, NULL will be returned.
70943 */
70944 struct kset *kset_create_and_add(const char *name,
70945- struct kset_uevent_ops *uevent_ops,
70946+ const struct kset_uevent_ops *uevent_ops,
70947 struct kobject *parent_kobj)
70948 {
70949 struct kset *kset;
70950diff -urNp linux-2.6.32.48/lib/kobject_uevent.c linux-2.6.32.48/lib/kobject_uevent.c
70951--- linux-2.6.32.48/lib/kobject_uevent.c 2011-11-08 19:02:43.000000000 -0500
70952+++ linux-2.6.32.48/lib/kobject_uevent.c 2011-11-15 19:59:43.000000000 -0500
70953@@ -95,7 +95,7 @@ int kobject_uevent_env(struct kobject *k
70954 const char *subsystem;
70955 struct kobject *top_kobj;
70956 struct kset *kset;
70957- struct kset_uevent_ops *uevent_ops;
70958+ const struct kset_uevent_ops *uevent_ops;
70959 u64 seq;
70960 int i = 0;
70961 int retval = 0;
70962diff -urNp linux-2.6.32.48/lib/kref.c linux-2.6.32.48/lib/kref.c
70963--- linux-2.6.32.48/lib/kref.c 2011-11-08 19:02:43.000000000 -0500
70964+++ linux-2.6.32.48/lib/kref.c 2011-11-15 19:59:43.000000000 -0500
70965@@ -61,7 +61,7 @@ void kref_get(struct kref *kref)
70966 */
70967 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
70968 {
70969- WARN_ON(release == NULL);
70970+ BUG_ON(release == NULL);
70971 WARN_ON(release == (void (*)(struct kref *))kfree);
70972
70973 if (atomic_dec_and_test(&kref->refcount)) {
70974diff -urNp linux-2.6.32.48/lib/parser.c linux-2.6.32.48/lib/parser.c
70975--- linux-2.6.32.48/lib/parser.c 2011-11-08 19:02:43.000000000 -0500
70976+++ linux-2.6.32.48/lib/parser.c 2011-11-15 19:59:43.000000000 -0500
70977@@ -126,7 +126,7 @@ static int match_number(substring_t *s,
70978 char *buf;
70979 int ret;
70980
70981- buf = kmalloc(s->to - s->from + 1, GFP_KERNEL);
70982+ buf = kmalloc((s->to - s->from) + 1, GFP_KERNEL);
70983 if (!buf)
70984 return -ENOMEM;
70985 memcpy(buf, s->from, s->to - s->from);
70986diff -urNp linux-2.6.32.48/lib/radix-tree.c linux-2.6.32.48/lib/radix-tree.c
70987--- linux-2.6.32.48/lib/radix-tree.c 2011-11-08 19:02:43.000000000 -0500
70988+++ linux-2.6.32.48/lib/radix-tree.c 2011-11-15 19:59:43.000000000 -0500
70989@@ -81,7 +81,7 @@ struct radix_tree_preload {
70990 int nr;
70991 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
70992 };
70993-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
70994+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
70995
70996 static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
70997 {
70998diff -urNp linux-2.6.32.48/lib/random32.c linux-2.6.32.48/lib/random32.c
70999--- linux-2.6.32.48/lib/random32.c 2011-11-08 19:02:43.000000000 -0500
71000+++ linux-2.6.32.48/lib/random32.c 2011-11-15 19:59:43.000000000 -0500
71001@@ -61,7 +61,7 @@ static u32 __random32(struct rnd_state *
71002 */
71003 static inline u32 __seed(u32 x, u32 m)
71004 {
71005- return (x < m) ? x + m : x;
71006+ return (x <= m) ? x + m + 1 : x;
71007 }
71008
71009 /**
71010diff -urNp linux-2.6.32.48/lib/vsprintf.c linux-2.6.32.48/lib/vsprintf.c
71011--- linux-2.6.32.48/lib/vsprintf.c 2011-11-08 19:02:43.000000000 -0500
71012+++ linux-2.6.32.48/lib/vsprintf.c 2011-11-15 19:59:43.000000000 -0500
71013@@ -16,6 +16,9 @@
71014 * - scnprintf and vscnprintf
71015 */
71016
71017+#ifdef CONFIG_GRKERNSEC_HIDESYM
71018+#define __INCLUDED_BY_HIDESYM 1
71019+#endif
71020 #include <stdarg.h>
71021 #include <linux/module.h>
71022 #include <linux/types.h>
71023@@ -546,12 +549,12 @@ static char *number(char *buf, char *end
71024 return buf;
71025 }
71026
71027-static char *string(char *buf, char *end, char *s, struct printf_spec spec)
71028+static char *string(char *buf, char *end, const char *s, struct printf_spec spec)
71029 {
71030 int len, i;
71031
71032 if ((unsigned long)s < PAGE_SIZE)
71033- s = "<NULL>";
71034+ s = "(null)";
71035
71036 len = strnlen(s, spec.precision);
71037
71038@@ -581,7 +584,7 @@ static char *symbol_string(char *buf, ch
71039 unsigned long value = (unsigned long) ptr;
71040 #ifdef CONFIG_KALLSYMS
71041 char sym[KSYM_SYMBOL_LEN];
71042- if (ext != 'f' && ext != 's')
71043+ if (ext != 'f' && ext != 's' && ext != 'a')
71044 sprint_symbol(sym, value);
71045 else
71046 kallsyms_lookup(value, NULL, NULL, NULL, sym);
71047@@ -801,6 +804,8 @@ static char *ip4_addr_string(char *buf,
71048 * - 'f' For simple symbolic function names without offset
71049 * - 'S' For symbolic direct pointers with offset
71050 * - 's' For symbolic direct pointers without offset
71051+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
71052+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
71053 * - 'R' For a struct resource pointer, it prints the range of
71054 * addresses (not the name nor the flags)
71055 * - 'M' For a 6-byte MAC address, it prints the address in the
71056@@ -822,7 +827,7 @@ static char *pointer(const char *fmt, ch
71057 struct printf_spec spec)
71058 {
71059 if (!ptr)
71060- return string(buf, end, "(null)", spec);
71061+ return string(buf, end, "(nil)", spec);
71062
71063 switch (*fmt) {
71064 case 'F':
71065@@ -831,6 +836,14 @@ static char *pointer(const char *fmt, ch
71066 case 's':
71067 /* Fallthrough */
71068 case 'S':
71069+#ifdef CONFIG_GRKERNSEC_HIDESYM
71070+ break;
71071+#else
71072+ return symbol_string(buf, end, ptr, spec, *fmt);
71073+#endif
71074+ case 'a':
71075+ /* Fallthrough */
71076+ case 'A':
71077 return symbol_string(buf, end, ptr, spec, *fmt);
71078 case 'R':
71079 return resource_string(buf, end, ptr, spec);
71080@@ -1445,7 +1458,7 @@ do { \
71081 size_t len;
71082 if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
71083 || (unsigned long)save_str < PAGE_SIZE)
71084- save_str = "<NULL>";
71085+ save_str = "(null)";
71086 len = strlen(save_str);
71087 if (str + len + 1 < end)
71088 memcpy(str, save_str, len + 1);
71089@@ -1555,11 +1568,11 @@ int bstr_printf(char *buf, size_t size,
71090 typeof(type) value; \
71091 if (sizeof(type) == 8) { \
71092 args = PTR_ALIGN(args, sizeof(u32)); \
71093- *(u32 *)&value = *(u32 *)args; \
71094- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
71095+ *(u32 *)&value = *(const u32 *)args; \
71096+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
71097 } else { \
71098 args = PTR_ALIGN(args, sizeof(type)); \
71099- value = *(typeof(type) *)args; \
71100+ value = *(const typeof(type) *)args; \
71101 } \
71102 args += sizeof(type); \
71103 value; \
71104@@ -1622,7 +1635,7 @@ int bstr_printf(char *buf, size_t size,
71105 const char *str_arg = args;
71106 size_t len = strlen(str_arg);
71107 args += len + 1;
71108- str = string(str, end, (char *)str_arg, spec);
71109+ str = string(str, end, str_arg, spec);
71110 break;
71111 }
71112
71113diff -urNp linux-2.6.32.48/localversion-grsec linux-2.6.32.48/localversion-grsec
71114--- linux-2.6.32.48/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
71115+++ linux-2.6.32.48/localversion-grsec 2011-11-15 19:59:43.000000000 -0500
71116@@ -0,0 +1 @@
71117+-grsec
71118diff -urNp linux-2.6.32.48/Makefile linux-2.6.32.48/Makefile
71119--- linux-2.6.32.48/Makefile 2011-11-08 19:02:43.000000000 -0500
71120+++ linux-2.6.32.48/Makefile 2011-11-15 19:59:43.000000000 -0500
71121@@ -221,8 +221,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
71122
71123 HOSTCC = gcc
71124 HOSTCXX = g++
71125-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
71126-HOSTCXXFLAGS = -O2
71127+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
71128+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
71129+HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks
71130
71131 # Decide whether to build built-in, modular, or both.
71132 # Normally, just do built-in.
71133@@ -342,10 +343,12 @@ LINUXINCLUDE := -Iinclude \
71134 KBUILD_CPPFLAGS := -D__KERNEL__
71135
71136 KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
71137+ -W -Wno-unused-parameter -Wno-missing-field-initializers \
71138 -fno-strict-aliasing -fno-common \
71139 -Werror-implicit-function-declaration \
71140 -Wno-format-security \
71141 -fno-delete-null-pointer-checks
71142+KBUILD_CFLAGS += $(call cc-option, -Wno-empty-body)
71143 KBUILD_AFLAGS := -D__ASSEMBLY__
71144
71145 # Read KERNELRELEASE from include/config/kernel.release (if it exists)
71146@@ -376,8 +379,8 @@ export RCS_TAR_IGNORE := --exclude SCCS
71147 # Rules shared between *config targets and build targets
71148
71149 # Basic helpers built in scripts/
71150-PHONY += scripts_basic
71151-scripts_basic:
71152+PHONY += scripts_basic gcc-plugins
71153+scripts_basic: gcc-plugins
71154 $(Q)$(MAKE) $(build)=scripts/basic
71155
71156 # To avoid any implicit rule to kick in, define an empty command.
71157@@ -403,7 +406,7 @@ endif
71158 # of make so .config is not included in this case either (for *config).
71159
71160 no-dot-config-targets := clean mrproper distclean \
71161- cscope TAGS tags help %docs check% \
71162+ cscope gtags TAGS tags help %docs check% \
71163 include/linux/version.h headers_% \
71164 kernelrelease kernelversion
71165
71166@@ -526,6 +529,36 @@ else
71167 KBUILD_CFLAGS += -O2
71168 endif
71169
71170+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
71171+CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
71172+ifdef CONFIG_PAX_MEMORY_STACKLEAK
71173+STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -fplugin-arg-stackleak_plugin-track-lowest-sp=100
71174+endif
71175+ifdef CONFIG_KALLOCSTAT_PLUGIN
71176+KALLOCSTAT_PLUGIN := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
71177+endif
71178+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
71179+KERNEXEC_PLUGIN := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
71180+endif
71181+ifdef CONFIG_CHECKER_PLUGIN
71182+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
71183+CHECKER_PLUGIN := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
71184+endif
71185+endif
71186+GCC_PLUGINS := $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN) $(KALLOCSTAT_PLUGIN) $(KERNEXEC_PLUGIN) $(CHECKER_PLUGIN)
71187+export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
71188+gcc-plugins:
71189+ $(Q)$(MAKE) $(build)=tools/gcc
71190+else
71191+gcc-plugins:
71192+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
71193+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev.))
71194+else
71195+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
71196+endif
71197+ $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
71198+endif
71199+
71200 include $(srctree)/arch/$(SRCARCH)/Makefile
71201
71202 ifneq ($(CONFIG_FRAME_WARN),0)
71203@@ -644,7 +677,7 @@ export mod_strip_cmd
71204
71205
71206 ifeq ($(KBUILD_EXTMOD),)
71207-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
71208+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
71209
71210 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
71211 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
71212@@ -865,6 +898,7 @@ vmlinux.o: $(modpost-init) $(vmlinux-mai
71213
71214 # The actual objects are generated when descending,
71215 # make sure no implicit rule kicks in
71216+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS)
71217 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
71218
71219 # Handle descending into subdirectories listed in $(vmlinux-dirs)
71220@@ -874,7 +908,7 @@ $(sort $(vmlinux-init) $(vmlinux-main))
71221 # Error messages still appears in the original language
71222
71223 PHONY += $(vmlinux-dirs)
71224-$(vmlinux-dirs): prepare scripts
71225+$(vmlinux-dirs): gcc-plugins prepare scripts
71226 $(Q)$(MAKE) $(build)=$@
71227
71228 # Build the kernel release string
71229@@ -983,6 +1017,7 @@ prepare0: archprepare FORCE
71230 $(Q)$(MAKE) $(build)=. missing-syscalls
71231
71232 # All the preparing..
71233+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS),$(KBUILD_CFLAGS))
71234 prepare: prepare0
71235
71236 # The asm symlink changes when $(ARCH) changes.
71237@@ -1124,6 +1159,7 @@ all: modules
71238 # using awk while concatenating to the final file.
71239
71240 PHONY += modules
71241+modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
71242 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
71243 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
71244 @$(kecho) ' Building modules, stage 2.';
71245@@ -1133,7 +1169,7 @@ modules: $(vmlinux-dirs) $(if $(KBUILD_B
71246
71247 # Target to prepare building external modules
71248 PHONY += modules_prepare
71249-modules_prepare: prepare scripts
71250+modules_prepare: gcc-plugins prepare scripts
71251
71252 # Target to install modules
71253 PHONY += modules_install
71254@@ -1198,7 +1234,7 @@ MRPROPER_FILES += .config .config.old in
71255 include/linux/autoconf.h include/linux/version.h \
71256 include/linux/utsrelease.h \
71257 include/linux/bounds.h include/asm*/asm-offsets.h \
71258- Module.symvers Module.markers tags TAGS cscope*
71259+ Module.symvers Module.markers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
71260
71261 # clean - Delete most, but leave enough to build external modules
71262 #
71263@@ -1242,7 +1278,7 @@ distclean: mrproper
71264 @find $(srctree) $(RCS_FIND_IGNORE) \
71265 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
71266 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
71267- -o -name '.*.rej' -o -size 0 \
71268+ -o -name '.*.rej' -o -name '*.so' -o -size 0 \
71269 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
71270 -type f -print | xargs rm -f
71271
71272@@ -1289,6 +1325,7 @@ help:
71273 @echo ' modules_prepare - Set up for building external modules'
71274 @echo ' tags/TAGS - Generate tags file for editors'
71275 @echo ' cscope - Generate cscope index'
71276+ @echo ' gtags - Generate GNU GLOBAL index'
71277 @echo ' kernelrelease - Output the release version string'
71278 @echo ' kernelversion - Output the version stored in Makefile'
71279 @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
71280@@ -1390,6 +1427,7 @@ PHONY += $(module-dirs) modules
71281 $(module-dirs): crmodverdir $(objtree)/Module.symvers
71282 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
71283
71284+modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
71285 modules: $(module-dirs)
71286 @$(kecho) ' Building modules, stage 2.';
71287 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
71288@@ -1445,7 +1483,7 @@ endif # KBUILD_EXTMOD
71289 quiet_cmd_tags = GEN $@
71290 cmd_tags = $(CONFIG_SHELL) $(srctree)/scripts/tags.sh $@
71291
71292-tags TAGS cscope: FORCE
71293+tags TAGS cscope gtags: FORCE
71294 $(call cmd,tags)
71295
71296 # Scripts to check various things for consistency
71297@@ -1510,17 +1548,19 @@ else
71298 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
71299 endif
71300
71301-%.s: %.c prepare scripts FORCE
71302+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS)
71303+%.s: %.c gcc-plugins prepare scripts FORCE
71304 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
71305 %.i: %.c prepare scripts FORCE
71306 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
71307-%.o: %.c prepare scripts FORCE
71308+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS)
71309+%.o: %.c gcc-plugins prepare scripts FORCE
71310 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
71311 %.lst: %.c prepare scripts FORCE
71312 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
71313-%.s: %.S prepare scripts FORCE
71314+%.s: %.S gcc-plugins prepare scripts FORCE
71315 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
71316-%.o: %.S prepare scripts FORCE
71317+%.o: %.S gcc-plugins prepare scripts FORCE
71318 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
71319 %.symtypes: %.c prepare scripts FORCE
71320 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
71321@@ -1530,11 +1570,13 @@ endif
71322 $(cmd_crmodverdir)
71323 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
71324 $(build)=$(build-dir)
71325-%/: prepare scripts FORCE
71326+%/: KBUILD_CFLAGS += $(GCC_PLUGINS)
71327+%/: gcc-plugins prepare scripts FORCE
71328 $(cmd_crmodverdir)
71329 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
71330 $(build)=$(build-dir)
71331-%.ko: prepare scripts FORCE
71332+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS)
71333+%.ko: gcc-plugins prepare scripts FORCE
71334 $(cmd_crmodverdir)
71335 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
71336 $(build)=$(build-dir) $(@:.ko=.o)
71337diff -urNp linux-2.6.32.48/mm/backing-dev.c linux-2.6.32.48/mm/backing-dev.c
71338--- linux-2.6.32.48/mm/backing-dev.c 2011-11-08 19:02:43.000000000 -0500
71339+++ linux-2.6.32.48/mm/backing-dev.c 2011-11-15 19:59:43.000000000 -0500
71340@@ -272,7 +272,7 @@ static void bdi_task_init(struct backing
71341 list_add_tail_rcu(&wb->list, &bdi->wb_list);
71342 spin_unlock(&bdi->wb_lock);
71343
71344- tsk->flags |= PF_FLUSHER | PF_SWAPWRITE;
71345+ tsk->flags |= PF_SWAPWRITE;
71346 set_freezable();
71347
71348 /*
71349@@ -484,7 +484,7 @@ static void bdi_add_to_pending(struct rc
71350 * Add the default flusher task that gets created for any bdi
71351 * that has dirty data pending writeout
71352 */
71353-void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
71354+static void bdi_add_default_flusher_task(struct backing_dev_info *bdi)
71355 {
71356 if (!bdi_cap_writeback_dirty(bdi))
71357 return;
71358diff -urNp linux-2.6.32.48/mm/filemap.c linux-2.6.32.48/mm/filemap.c
71359--- linux-2.6.32.48/mm/filemap.c 2011-11-08 19:02:43.000000000 -0500
71360+++ linux-2.6.32.48/mm/filemap.c 2011-11-15 19:59:43.000000000 -0500
71361@@ -1631,7 +1631,7 @@ int generic_file_mmap(struct file * file
71362 struct address_space *mapping = file->f_mapping;
71363
71364 if (!mapping->a_ops->readpage)
71365- return -ENOEXEC;
71366+ return -ENODEV;
71367 file_accessed(file);
71368 vma->vm_ops = &generic_file_vm_ops;
71369 vma->vm_flags |= VM_CAN_NONLINEAR;
71370@@ -2027,6 +2027,7 @@ inline int generic_write_checks(struct f
71371 *pos = i_size_read(inode);
71372
71373 if (limit != RLIM_INFINITY) {
71374+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
71375 if (*pos >= limit) {
71376 send_sig(SIGXFSZ, current, 0);
71377 return -EFBIG;
71378diff -urNp linux-2.6.32.48/mm/fremap.c linux-2.6.32.48/mm/fremap.c
71379--- linux-2.6.32.48/mm/fremap.c 2011-11-08 19:02:43.000000000 -0500
71380+++ linux-2.6.32.48/mm/fremap.c 2011-11-15 19:59:43.000000000 -0500
71381@@ -153,6 +153,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
71382 retry:
71383 vma = find_vma(mm, start);
71384
71385+#ifdef CONFIG_PAX_SEGMEXEC
71386+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
71387+ goto out;
71388+#endif
71389+
71390 /*
71391 * Make sure the vma is shared, that it supports prefaulting,
71392 * and that the remapped range is valid and fully within
71393@@ -221,7 +226,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
71394 /*
71395 * drop PG_Mlocked flag for over-mapped range
71396 */
71397- unsigned int saved_flags = vma->vm_flags;
71398+ unsigned long saved_flags = vma->vm_flags;
71399 munlock_vma_pages_range(vma, start, start + size);
71400 vma->vm_flags = saved_flags;
71401 }
71402diff -urNp linux-2.6.32.48/mm/highmem.c linux-2.6.32.48/mm/highmem.c
71403--- linux-2.6.32.48/mm/highmem.c 2011-11-08 19:02:43.000000000 -0500
71404+++ linux-2.6.32.48/mm/highmem.c 2011-11-15 19:59:43.000000000 -0500
71405@@ -116,9 +116,10 @@ static void flush_all_zero_pkmaps(void)
71406 * So no dangers, even with speculative execution.
71407 */
71408 page = pte_page(pkmap_page_table[i]);
71409+ pax_open_kernel();
71410 pte_clear(&init_mm, (unsigned long)page_address(page),
71411 &pkmap_page_table[i]);
71412-
71413+ pax_close_kernel();
71414 set_page_address(page, NULL);
71415 need_flush = 1;
71416 }
71417@@ -177,9 +178,11 @@ start:
71418 }
71419 }
71420 vaddr = PKMAP_ADDR(last_pkmap_nr);
71421+
71422+ pax_open_kernel();
71423 set_pte_at(&init_mm, vaddr,
71424 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
71425-
71426+ pax_close_kernel();
71427 pkmap_count[last_pkmap_nr] = 1;
71428 set_page_address(page, (void *)vaddr);
71429
71430diff -urNp linux-2.6.32.48/mm/hugetlb.c linux-2.6.32.48/mm/hugetlb.c
71431--- linux-2.6.32.48/mm/hugetlb.c 2011-11-08 19:02:43.000000000 -0500
71432+++ linux-2.6.32.48/mm/hugetlb.c 2011-11-15 19:59:43.000000000 -0500
71433@@ -1933,6 +1933,26 @@ static int unmap_ref_private(struct mm_s
71434 return 1;
71435 }
71436
71437+#ifdef CONFIG_PAX_SEGMEXEC
71438+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
71439+{
71440+ struct mm_struct *mm = vma->vm_mm;
71441+ struct vm_area_struct *vma_m;
71442+ unsigned long address_m;
71443+ pte_t *ptep_m;
71444+
71445+ vma_m = pax_find_mirror_vma(vma);
71446+ if (!vma_m)
71447+ return;
71448+
71449+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
71450+ address_m = address + SEGMEXEC_TASK_SIZE;
71451+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
71452+ get_page(page_m);
71453+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
71454+}
71455+#endif
71456+
71457 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
71458 unsigned long address, pte_t *ptep, pte_t pte,
71459 struct page *pagecache_page)
71460@@ -2004,6 +2024,11 @@ retry_avoidcopy:
71461 huge_ptep_clear_flush(vma, address, ptep);
71462 set_huge_pte_at(mm, address, ptep,
71463 make_huge_pte(vma, new_page, 1));
71464+
71465+#ifdef CONFIG_PAX_SEGMEXEC
71466+ pax_mirror_huge_pte(vma, address, new_page);
71467+#endif
71468+
71469 /* Make the old page be freed below */
71470 new_page = old_page;
71471 }
71472@@ -2135,6 +2160,10 @@ retry:
71473 && (vma->vm_flags & VM_SHARED)));
71474 set_huge_pte_at(mm, address, ptep, new_pte);
71475
71476+#ifdef CONFIG_PAX_SEGMEXEC
71477+ pax_mirror_huge_pte(vma, address, page);
71478+#endif
71479+
71480 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
71481 /* Optimization, do the COW without a second fault */
71482 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
71483@@ -2163,6 +2192,28 @@ int hugetlb_fault(struct mm_struct *mm,
71484 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
71485 struct hstate *h = hstate_vma(vma);
71486
71487+#ifdef CONFIG_PAX_SEGMEXEC
71488+ struct vm_area_struct *vma_m;
71489+
71490+ vma_m = pax_find_mirror_vma(vma);
71491+ if (vma_m) {
71492+ unsigned long address_m;
71493+
71494+ if (vma->vm_start > vma_m->vm_start) {
71495+ address_m = address;
71496+ address -= SEGMEXEC_TASK_SIZE;
71497+ vma = vma_m;
71498+ h = hstate_vma(vma);
71499+ } else
71500+ address_m = address + SEGMEXEC_TASK_SIZE;
71501+
71502+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
71503+ return VM_FAULT_OOM;
71504+ address_m &= HPAGE_MASK;
71505+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
71506+ }
71507+#endif
71508+
71509 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
71510 if (!ptep)
71511 return VM_FAULT_OOM;
71512diff -urNp linux-2.6.32.48/mm/internal.h linux-2.6.32.48/mm/internal.h
71513--- linux-2.6.32.48/mm/internal.h 2011-11-08 19:02:43.000000000 -0500
71514+++ linux-2.6.32.48/mm/internal.h 2011-11-15 19:59:43.000000000 -0500
71515@@ -49,6 +49,7 @@ extern void putback_lru_page(struct page
71516 * in mm/page_alloc.c
71517 */
71518 extern void __free_pages_bootmem(struct page *page, unsigned int order);
71519+extern void free_compound_page(struct page *page);
71520 extern void prep_compound_page(struct page *page, unsigned long order);
71521
71522
71523diff -urNp linux-2.6.32.48/mm/Kconfig linux-2.6.32.48/mm/Kconfig
71524--- linux-2.6.32.48/mm/Kconfig 2011-11-08 19:02:43.000000000 -0500
71525+++ linux-2.6.32.48/mm/Kconfig 2011-11-15 19:59:43.000000000 -0500
71526@@ -228,7 +228,7 @@ config KSM
71527 config DEFAULT_MMAP_MIN_ADDR
71528 int "Low address space to protect from user allocation"
71529 depends on MMU
71530- default 4096
71531+ default 65536
71532 help
71533 This is the portion of low virtual memory which should be protected
71534 from userspace allocation. Keeping a user from writing to low pages
71535diff -urNp linux-2.6.32.48/mm/kmemleak.c linux-2.6.32.48/mm/kmemleak.c
71536--- linux-2.6.32.48/mm/kmemleak.c 2011-11-08 19:02:43.000000000 -0500
71537+++ linux-2.6.32.48/mm/kmemleak.c 2011-11-15 19:59:43.000000000 -0500
71538@@ -358,7 +358,7 @@ static void print_unreferenced(struct se
71539
71540 for (i = 0; i < object->trace_len; i++) {
71541 void *ptr = (void *)object->trace[i];
71542- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
71543+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
71544 }
71545 }
71546
71547diff -urNp linux-2.6.32.48/mm/maccess.c linux-2.6.32.48/mm/maccess.c
71548--- linux-2.6.32.48/mm/maccess.c 2011-11-08 19:02:43.000000000 -0500
71549+++ linux-2.6.32.48/mm/maccess.c 2011-11-15 19:59:43.000000000 -0500
71550@@ -14,7 +14,7 @@
71551 * Safely read from address @src to the buffer at @dst. If a kernel fault
71552 * happens, handle that and return -EFAULT.
71553 */
71554-long probe_kernel_read(void *dst, void *src, size_t size)
71555+long probe_kernel_read(void *dst, const void *src, size_t size)
71556 {
71557 long ret;
71558 mm_segment_t old_fs = get_fs();
71559@@ -22,7 +22,7 @@ long probe_kernel_read(void *dst, void *
71560 set_fs(KERNEL_DS);
71561 pagefault_disable();
71562 ret = __copy_from_user_inatomic(dst,
71563- (__force const void __user *)src, size);
71564+ (const void __force_user *)src, size);
71565 pagefault_enable();
71566 set_fs(old_fs);
71567
71568@@ -39,14 +39,14 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
71569 * Safely write to address @dst from the buffer at @src. If a kernel fault
71570 * happens, handle that and return -EFAULT.
71571 */
71572-long notrace __weak probe_kernel_write(void *dst, void *src, size_t size)
71573+long notrace __weak probe_kernel_write(void *dst, const void *src, size_t size)
71574 {
71575 long ret;
71576 mm_segment_t old_fs = get_fs();
71577
71578 set_fs(KERNEL_DS);
71579 pagefault_disable();
71580- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
71581+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
71582 pagefault_enable();
71583 set_fs(old_fs);
71584
71585diff -urNp linux-2.6.32.48/mm/madvise.c linux-2.6.32.48/mm/madvise.c
71586--- linux-2.6.32.48/mm/madvise.c 2011-11-08 19:02:43.000000000 -0500
71587+++ linux-2.6.32.48/mm/madvise.c 2011-11-15 19:59:43.000000000 -0500
71588@@ -44,6 +44,10 @@ static long madvise_behavior(struct vm_a
71589 pgoff_t pgoff;
71590 unsigned long new_flags = vma->vm_flags;
71591
71592+#ifdef CONFIG_PAX_SEGMEXEC
71593+ struct vm_area_struct *vma_m;
71594+#endif
71595+
71596 switch (behavior) {
71597 case MADV_NORMAL:
71598 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
71599@@ -103,6 +107,13 @@ success:
71600 /*
71601 * vm_flags is protected by the mmap_sem held in write mode.
71602 */
71603+
71604+#ifdef CONFIG_PAX_SEGMEXEC
71605+ vma_m = pax_find_mirror_vma(vma);
71606+ if (vma_m)
71607+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
71608+#endif
71609+
71610 vma->vm_flags = new_flags;
71611
71612 out:
71613@@ -161,6 +172,11 @@ static long madvise_dontneed(struct vm_a
71614 struct vm_area_struct ** prev,
71615 unsigned long start, unsigned long end)
71616 {
71617+
71618+#ifdef CONFIG_PAX_SEGMEXEC
71619+ struct vm_area_struct *vma_m;
71620+#endif
71621+
71622 *prev = vma;
71623 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
71624 return -EINVAL;
71625@@ -173,6 +189,21 @@ static long madvise_dontneed(struct vm_a
71626 zap_page_range(vma, start, end - start, &details);
71627 } else
71628 zap_page_range(vma, start, end - start, NULL);
71629+
71630+#ifdef CONFIG_PAX_SEGMEXEC
71631+ vma_m = pax_find_mirror_vma(vma);
71632+ if (vma_m) {
71633+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
71634+ struct zap_details details = {
71635+ .nonlinear_vma = vma_m,
71636+ .last_index = ULONG_MAX,
71637+ };
71638+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
71639+ } else
71640+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
71641+ }
71642+#endif
71643+
71644 return 0;
71645 }
71646
71647@@ -359,6 +390,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
71648 if (end < start)
71649 goto out;
71650
71651+#ifdef CONFIG_PAX_SEGMEXEC
71652+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
71653+ if (end > SEGMEXEC_TASK_SIZE)
71654+ goto out;
71655+ } else
71656+#endif
71657+
71658+ if (end > TASK_SIZE)
71659+ goto out;
71660+
71661 error = 0;
71662 if (end == start)
71663 goto out;
71664diff -urNp linux-2.6.32.48/mm/memory.c linux-2.6.32.48/mm/memory.c
71665--- linux-2.6.32.48/mm/memory.c 2011-11-08 19:02:43.000000000 -0500
71666+++ linux-2.6.32.48/mm/memory.c 2011-11-15 19:59:43.000000000 -0500
71667@@ -187,8 +187,12 @@ static inline void free_pmd_range(struct
71668 return;
71669
71670 pmd = pmd_offset(pud, start);
71671+
71672+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
71673 pud_clear(pud);
71674 pmd_free_tlb(tlb, pmd, start);
71675+#endif
71676+
71677 }
71678
71679 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
71680@@ -219,9 +223,12 @@ static inline void free_pud_range(struct
71681 if (end - 1 > ceiling - 1)
71682 return;
71683
71684+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
71685 pud = pud_offset(pgd, start);
71686 pgd_clear(pgd);
71687 pud_free_tlb(tlb, pud, start);
71688+#endif
71689+
71690 }
71691
71692 /*
71693@@ -1251,10 +1258,10 @@ int __get_user_pages(struct task_struct
71694 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
71695 i = 0;
71696
71697- do {
71698+ while (nr_pages) {
71699 struct vm_area_struct *vma;
71700
71701- vma = find_extend_vma(mm, start);
71702+ vma = find_vma(mm, start);
71703 if (!vma && in_gate_area(tsk, start)) {
71704 unsigned long pg = start & PAGE_MASK;
71705 struct vm_area_struct *gate_vma = get_gate_vma(tsk);
71706@@ -1306,7 +1313,7 @@ int __get_user_pages(struct task_struct
71707 continue;
71708 }
71709
71710- if (!vma ||
71711+ if (!vma || start < vma->vm_start ||
71712 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
71713 !(vm_flags & vma->vm_flags))
71714 return i ? : -EFAULT;
71715@@ -1381,7 +1388,7 @@ int __get_user_pages(struct task_struct
71716 start += PAGE_SIZE;
71717 nr_pages--;
71718 } while (nr_pages && start < vma->vm_end);
71719- } while (nr_pages);
71720+ }
71721 return i;
71722 }
71723
71724@@ -1526,6 +1533,10 @@ static int insert_page(struct vm_area_st
71725 page_add_file_rmap(page);
71726 set_pte_at(mm, addr, pte, mk_pte(page, prot));
71727
71728+#ifdef CONFIG_PAX_SEGMEXEC
71729+ pax_mirror_file_pte(vma, addr, page, ptl);
71730+#endif
71731+
71732 retval = 0;
71733 pte_unmap_unlock(pte, ptl);
71734 return retval;
71735@@ -1560,10 +1571,22 @@ out:
71736 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
71737 struct page *page)
71738 {
71739+
71740+#ifdef CONFIG_PAX_SEGMEXEC
71741+ struct vm_area_struct *vma_m;
71742+#endif
71743+
71744 if (addr < vma->vm_start || addr >= vma->vm_end)
71745 return -EFAULT;
71746 if (!page_count(page))
71747 return -EINVAL;
71748+
71749+#ifdef CONFIG_PAX_SEGMEXEC
71750+ vma_m = pax_find_mirror_vma(vma);
71751+ if (vma_m)
71752+ vma_m->vm_flags |= VM_INSERTPAGE;
71753+#endif
71754+
71755 vma->vm_flags |= VM_INSERTPAGE;
71756 return insert_page(vma, addr, page, vma->vm_page_prot);
71757 }
71758@@ -1649,6 +1672,7 @@ int vm_insert_mixed(struct vm_area_struc
71759 unsigned long pfn)
71760 {
71761 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
71762+ BUG_ON(vma->vm_mirror);
71763
71764 if (addr < vma->vm_start || addr >= vma->vm_end)
71765 return -EFAULT;
71766@@ -1977,6 +2001,186 @@ static inline void cow_user_page(struct
71767 copy_user_highpage(dst, src, va, vma);
71768 }
71769
71770+#ifdef CONFIG_PAX_SEGMEXEC
71771+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
71772+{
71773+ struct mm_struct *mm = vma->vm_mm;
71774+ spinlock_t *ptl;
71775+ pte_t *pte, entry;
71776+
71777+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
71778+ entry = *pte;
71779+ if (!pte_present(entry)) {
71780+ if (!pte_none(entry)) {
71781+ BUG_ON(pte_file(entry));
71782+ free_swap_and_cache(pte_to_swp_entry(entry));
71783+ pte_clear_not_present_full(mm, address, pte, 0);
71784+ }
71785+ } else {
71786+ struct page *page;
71787+
71788+ flush_cache_page(vma, address, pte_pfn(entry));
71789+ entry = ptep_clear_flush(vma, address, pte);
71790+ BUG_ON(pte_dirty(entry));
71791+ page = vm_normal_page(vma, address, entry);
71792+ if (page) {
71793+ update_hiwater_rss(mm);
71794+ if (PageAnon(page))
71795+ dec_mm_counter(mm, anon_rss);
71796+ else
71797+ dec_mm_counter(mm, file_rss);
71798+ page_remove_rmap(page);
71799+ page_cache_release(page);
71800+ }
71801+ }
71802+ pte_unmap_unlock(pte, ptl);
71803+}
71804+
71805+/* PaX: if vma is mirrored, synchronize the mirror's PTE
71806+ *
71807+ * the ptl of the lower mapped page is held on entry and is not released on exit
71808+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
71809+ */
71810+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
71811+{
71812+ struct mm_struct *mm = vma->vm_mm;
71813+ unsigned long address_m;
71814+ spinlock_t *ptl_m;
71815+ struct vm_area_struct *vma_m;
71816+ pmd_t *pmd_m;
71817+ pte_t *pte_m, entry_m;
71818+
71819+ BUG_ON(!page_m || !PageAnon(page_m));
71820+
71821+ vma_m = pax_find_mirror_vma(vma);
71822+ if (!vma_m)
71823+ return;
71824+
71825+ BUG_ON(!PageLocked(page_m));
71826+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
71827+ address_m = address + SEGMEXEC_TASK_SIZE;
71828+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
71829+ pte_m = pte_offset_map_nested(pmd_m, address_m);
71830+ ptl_m = pte_lockptr(mm, pmd_m);
71831+ if (ptl != ptl_m) {
71832+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
71833+ if (!pte_none(*pte_m))
71834+ goto out;
71835+ }
71836+
71837+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
71838+ page_cache_get(page_m);
71839+ page_add_anon_rmap(page_m, vma_m, address_m);
71840+ inc_mm_counter(mm, anon_rss);
71841+ set_pte_at(mm, address_m, pte_m, entry_m);
71842+ update_mmu_cache(vma_m, address_m, entry_m);
71843+out:
71844+ if (ptl != ptl_m)
71845+ spin_unlock(ptl_m);
71846+ pte_unmap_nested(pte_m);
71847+ unlock_page(page_m);
71848+}
71849+
71850+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
71851+{
71852+ struct mm_struct *mm = vma->vm_mm;
71853+ unsigned long address_m;
71854+ spinlock_t *ptl_m;
71855+ struct vm_area_struct *vma_m;
71856+ pmd_t *pmd_m;
71857+ pte_t *pte_m, entry_m;
71858+
71859+ BUG_ON(!page_m || PageAnon(page_m));
71860+
71861+ vma_m = pax_find_mirror_vma(vma);
71862+ if (!vma_m)
71863+ return;
71864+
71865+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
71866+ address_m = address + SEGMEXEC_TASK_SIZE;
71867+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
71868+ pte_m = pte_offset_map_nested(pmd_m, address_m);
71869+ ptl_m = pte_lockptr(mm, pmd_m);
71870+ if (ptl != ptl_m) {
71871+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
71872+ if (!pte_none(*pte_m))
71873+ goto out;
71874+ }
71875+
71876+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
71877+ page_cache_get(page_m);
71878+ page_add_file_rmap(page_m);
71879+ inc_mm_counter(mm, file_rss);
71880+ set_pte_at(mm, address_m, pte_m, entry_m);
71881+ update_mmu_cache(vma_m, address_m, entry_m);
71882+out:
71883+ if (ptl != ptl_m)
71884+ spin_unlock(ptl_m);
71885+ pte_unmap_nested(pte_m);
71886+}
71887+
71888+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
71889+{
71890+ struct mm_struct *mm = vma->vm_mm;
71891+ unsigned long address_m;
71892+ spinlock_t *ptl_m;
71893+ struct vm_area_struct *vma_m;
71894+ pmd_t *pmd_m;
71895+ pte_t *pte_m, entry_m;
71896+
71897+ vma_m = pax_find_mirror_vma(vma);
71898+ if (!vma_m)
71899+ return;
71900+
71901+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
71902+ address_m = address + SEGMEXEC_TASK_SIZE;
71903+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
71904+ pte_m = pte_offset_map_nested(pmd_m, address_m);
71905+ ptl_m = pte_lockptr(mm, pmd_m);
71906+ if (ptl != ptl_m) {
71907+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
71908+ if (!pte_none(*pte_m))
71909+ goto out;
71910+ }
71911+
71912+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
71913+ set_pte_at(mm, address_m, pte_m, entry_m);
71914+out:
71915+ if (ptl != ptl_m)
71916+ spin_unlock(ptl_m);
71917+ pte_unmap_nested(pte_m);
71918+}
71919+
71920+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
71921+{
71922+ struct page *page_m;
71923+ pte_t entry;
71924+
71925+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
71926+ goto out;
71927+
71928+ entry = *pte;
71929+ page_m = vm_normal_page(vma, address, entry);
71930+ if (!page_m)
71931+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
71932+ else if (PageAnon(page_m)) {
71933+ if (pax_find_mirror_vma(vma)) {
71934+ pte_unmap_unlock(pte, ptl);
71935+ lock_page(page_m);
71936+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
71937+ if (pte_same(entry, *pte))
71938+ pax_mirror_anon_pte(vma, address, page_m, ptl);
71939+ else
71940+ unlock_page(page_m);
71941+ }
71942+ } else
71943+ pax_mirror_file_pte(vma, address, page_m, ptl);
71944+
71945+out:
71946+ pte_unmap_unlock(pte, ptl);
71947+}
71948+#endif
71949+
71950 /*
71951 * This routine handles present pages, when users try to write
71952 * to a shared page. It is done by copying the page to a new address
71953@@ -2156,6 +2360,12 @@ gotten:
71954 */
71955 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
71956 if (likely(pte_same(*page_table, orig_pte))) {
71957+
71958+#ifdef CONFIG_PAX_SEGMEXEC
71959+ if (pax_find_mirror_vma(vma))
71960+ BUG_ON(!trylock_page(new_page));
71961+#endif
71962+
71963 if (old_page) {
71964 if (!PageAnon(old_page)) {
71965 dec_mm_counter(mm, file_rss);
71966@@ -2207,6 +2417,10 @@ gotten:
71967 page_remove_rmap(old_page);
71968 }
71969
71970+#ifdef CONFIG_PAX_SEGMEXEC
71971+ pax_mirror_anon_pte(vma, address, new_page, ptl);
71972+#endif
71973+
71974 /* Free the old page.. */
71975 new_page = old_page;
71976 ret |= VM_FAULT_WRITE;
71977@@ -2606,6 +2820,11 @@ static int do_swap_page(struct mm_struct
71978 swap_free(entry);
71979 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
71980 try_to_free_swap(page);
71981+
71982+#ifdef CONFIG_PAX_SEGMEXEC
71983+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
71984+#endif
71985+
71986 unlock_page(page);
71987
71988 if (flags & FAULT_FLAG_WRITE) {
71989@@ -2617,6 +2836,11 @@ static int do_swap_page(struct mm_struct
71990
71991 /* No need to invalidate - it was non-present before */
71992 update_mmu_cache(vma, address, pte);
71993+
71994+#ifdef CONFIG_PAX_SEGMEXEC
71995+ pax_mirror_anon_pte(vma, address, page, ptl);
71996+#endif
71997+
71998 unlock:
71999 pte_unmap_unlock(page_table, ptl);
72000 out:
72001@@ -2632,40 +2856,6 @@ out_release:
72002 }
72003
72004 /*
72005- * This is like a special single-page "expand_{down|up}wards()",
72006- * except we must first make sure that 'address{-|+}PAGE_SIZE'
72007- * doesn't hit another vma.
72008- */
72009-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
72010-{
72011- address &= PAGE_MASK;
72012- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
72013- struct vm_area_struct *prev = vma->vm_prev;
72014-
72015- /*
72016- * Is there a mapping abutting this one below?
72017- *
72018- * That's only ok if it's the same stack mapping
72019- * that has gotten split..
72020- */
72021- if (prev && prev->vm_end == address)
72022- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
72023-
72024- expand_stack(vma, address - PAGE_SIZE);
72025- }
72026- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
72027- struct vm_area_struct *next = vma->vm_next;
72028-
72029- /* As VM_GROWSDOWN but s/below/above/ */
72030- if (next && next->vm_start == address + PAGE_SIZE)
72031- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
72032-
72033- expand_upwards(vma, address + PAGE_SIZE);
72034- }
72035- return 0;
72036-}
72037-
72038-/*
72039 * We enter with non-exclusive mmap_sem (to exclude vma changes,
72040 * but allow concurrent faults), and pte mapped but not yet locked.
72041 * We return with mmap_sem still held, but pte unmapped and unlocked.
72042@@ -2674,27 +2864,23 @@ static int do_anonymous_page(struct mm_s
72043 unsigned long address, pte_t *page_table, pmd_t *pmd,
72044 unsigned int flags)
72045 {
72046- struct page *page;
72047+ struct page *page = NULL;
72048 spinlock_t *ptl;
72049 pte_t entry;
72050
72051- pte_unmap(page_table);
72052-
72053- /* Check if we need to add a guard page to the stack */
72054- if (check_stack_guard_page(vma, address) < 0)
72055- return VM_FAULT_SIGBUS;
72056-
72057- /* Use the zero-page for reads */
72058 if (!(flags & FAULT_FLAG_WRITE)) {
72059 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
72060 vma->vm_page_prot));
72061- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
72062+ ptl = pte_lockptr(mm, pmd);
72063+ spin_lock(ptl);
72064 if (!pte_none(*page_table))
72065 goto unlock;
72066 goto setpte;
72067 }
72068
72069 /* Allocate our own private page. */
72070+ pte_unmap(page_table);
72071+
72072 if (unlikely(anon_vma_prepare(vma)))
72073 goto oom;
72074 page = alloc_zeroed_user_highpage_movable(vma, address);
72075@@ -2713,6 +2899,11 @@ static int do_anonymous_page(struct mm_s
72076 if (!pte_none(*page_table))
72077 goto release;
72078
72079+#ifdef CONFIG_PAX_SEGMEXEC
72080+ if (pax_find_mirror_vma(vma))
72081+ BUG_ON(!trylock_page(page));
72082+#endif
72083+
72084 inc_mm_counter(mm, anon_rss);
72085 page_add_new_anon_rmap(page, vma, address);
72086 setpte:
72087@@ -2720,6 +2911,12 @@ setpte:
72088
72089 /* No need to invalidate - it was non-present before */
72090 update_mmu_cache(vma, address, entry);
72091+
72092+#ifdef CONFIG_PAX_SEGMEXEC
72093+ if (page)
72094+ pax_mirror_anon_pte(vma, address, page, ptl);
72095+#endif
72096+
72097 unlock:
72098 pte_unmap_unlock(page_table, ptl);
72099 return 0;
72100@@ -2862,6 +3059,12 @@ static int __do_fault(struct mm_struct *
72101 */
72102 /* Only go through if we didn't race with anybody else... */
72103 if (likely(pte_same(*page_table, orig_pte))) {
72104+
72105+#ifdef CONFIG_PAX_SEGMEXEC
72106+ if (anon && pax_find_mirror_vma(vma))
72107+ BUG_ON(!trylock_page(page));
72108+#endif
72109+
72110 flush_icache_page(vma, page);
72111 entry = mk_pte(page, vma->vm_page_prot);
72112 if (flags & FAULT_FLAG_WRITE)
72113@@ -2881,6 +3084,14 @@ static int __do_fault(struct mm_struct *
72114
72115 /* no need to invalidate: a not-present page won't be cached */
72116 update_mmu_cache(vma, address, entry);
72117+
72118+#ifdef CONFIG_PAX_SEGMEXEC
72119+ if (anon)
72120+ pax_mirror_anon_pte(vma, address, page, ptl);
72121+ else
72122+ pax_mirror_file_pte(vma, address, page, ptl);
72123+#endif
72124+
72125 } else {
72126 if (charged)
72127 mem_cgroup_uncharge_page(page);
72128@@ -3028,6 +3239,12 @@ static inline int handle_pte_fault(struc
72129 if (flags & FAULT_FLAG_WRITE)
72130 flush_tlb_page(vma, address);
72131 }
72132+
72133+#ifdef CONFIG_PAX_SEGMEXEC
72134+ pax_mirror_pte(vma, address, pte, pmd, ptl);
72135+ return 0;
72136+#endif
72137+
72138 unlock:
72139 pte_unmap_unlock(pte, ptl);
72140 return 0;
72141@@ -3044,6 +3261,10 @@ int handle_mm_fault(struct mm_struct *mm
72142 pmd_t *pmd;
72143 pte_t *pte;
72144
72145+#ifdef CONFIG_PAX_SEGMEXEC
72146+ struct vm_area_struct *vma_m;
72147+#endif
72148+
72149 __set_current_state(TASK_RUNNING);
72150
72151 count_vm_event(PGFAULT);
72152@@ -3051,6 +3272,34 @@ int handle_mm_fault(struct mm_struct *mm
72153 if (unlikely(is_vm_hugetlb_page(vma)))
72154 return hugetlb_fault(mm, vma, address, flags);
72155
72156+#ifdef CONFIG_PAX_SEGMEXEC
72157+ vma_m = pax_find_mirror_vma(vma);
72158+ if (vma_m) {
72159+ unsigned long address_m;
72160+ pgd_t *pgd_m;
72161+ pud_t *pud_m;
72162+ pmd_t *pmd_m;
72163+
72164+ if (vma->vm_start > vma_m->vm_start) {
72165+ address_m = address;
72166+ address -= SEGMEXEC_TASK_SIZE;
72167+ vma = vma_m;
72168+ } else
72169+ address_m = address + SEGMEXEC_TASK_SIZE;
72170+
72171+ pgd_m = pgd_offset(mm, address_m);
72172+ pud_m = pud_alloc(mm, pgd_m, address_m);
72173+ if (!pud_m)
72174+ return VM_FAULT_OOM;
72175+ pmd_m = pmd_alloc(mm, pud_m, address_m);
72176+ if (!pmd_m)
72177+ return VM_FAULT_OOM;
72178+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, pmd_m, address_m))
72179+ return VM_FAULT_OOM;
72180+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
72181+ }
72182+#endif
72183+
72184 pgd = pgd_offset(mm, address);
72185 pud = pud_alloc(mm, pgd, address);
72186 if (!pud)
72187@@ -3148,7 +3397,7 @@ static int __init gate_vma_init(void)
72188 gate_vma.vm_start = FIXADDR_USER_START;
72189 gate_vma.vm_end = FIXADDR_USER_END;
72190 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
72191- gate_vma.vm_page_prot = __P101;
72192+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
72193 /*
72194 * Make sure the vDSO gets into every core dump.
72195 * Dumping its contents makes post-mortem fully interpretable later
72196diff -urNp linux-2.6.32.48/mm/memory-failure.c linux-2.6.32.48/mm/memory-failure.c
72197--- linux-2.6.32.48/mm/memory-failure.c 2011-11-08 19:02:43.000000000 -0500
72198+++ linux-2.6.32.48/mm/memory-failure.c 2011-11-15 19:59:43.000000000 -0500
72199@@ -46,7 +46,7 @@ int sysctl_memory_failure_early_kill __r
72200
72201 int sysctl_memory_failure_recovery __read_mostly = 1;
72202
72203-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
72204+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
72205
72206 /*
72207 * Send all the processes who have the page mapped an ``action optional''
72208@@ -64,7 +64,7 @@ static int kill_proc_ao(struct task_stru
72209 si.si_signo = SIGBUS;
72210 si.si_errno = 0;
72211 si.si_code = BUS_MCEERR_AO;
72212- si.si_addr = (void *)addr;
72213+ si.si_addr = (void __user *)addr;
72214 #ifdef __ARCH_SI_TRAPNO
72215 si.si_trapno = trapno;
72216 #endif
72217@@ -745,7 +745,7 @@ int __memory_failure(unsigned long pfn,
72218 return 0;
72219 }
72220
72221- atomic_long_add(1, &mce_bad_pages);
72222+ atomic_long_add_unchecked(1, &mce_bad_pages);
72223
72224 /*
72225 * We need/can do nothing about count=0 pages.
72226diff -urNp linux-2.6.32.48/mm/mempolicy.c linux-2.6.32.48/mm/mempolicy.c
72227--- linux-2.6.32.48/mm/mempolicy.c 2011-11-08 19:02:43.000000000 -0500
72228+++ linux-2.6.32.48/mm/mempolicy.c 2011-11-15 19:59:43.000000000 -0500
72229@@ -573,6 +573,10 @@ static int mbind_range(struct vm_area_st
72230 struct vm_area_struct *next;
72231 int err;
72232
72233+#ifdef CONFIG_PAX_SEGMEXEC
72234+ struct vm_area_struct *vma_m;
72235+#endif
72236+
72237 err = 0;
72238 for (; vma && vma->vm_start < end; vma = next) {
72239 next = vma->vm_next;
72240@@ -584,6 +588,16 @@ static int mbind_range(struct vm_area_st
72241 err = policy_vma(vma, new);
72242 if (err)
72243 break;
72244+
72245+#ifdef CONFIG_PAX_SEGMEXEC
72246+ vma_m = pax_find_mirror_vma(vma);
72247+ if (vma_m) {
72248+ err = policy_vma(vma_m, new);
72249+ if (err)
72250+ break;
72251+ }
72252+#endif
72253+
72254 }
72255 return err;
72256 }
72257@@ -1002,6 +1016,17 @@ static long do_mbind(unsigned long start
72258
72259 if (end < start)
72260 return -EINVAL;
72261+
72262+#ifdef CONFIG_PAX_SEGMEXEC
72263+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
72264+ if (end > SEGMEXEC_TASK_SIZE)
72265+ return -EINVAL;
72266+ } else
72267+#endif
72268+
72269+ if (end > TASK_SIZE)
72270+ return -EINVAL;
72271+
72272 if (end == start)
72273 return 0;
72274
72275@@ -1207,6 +1232,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
72276 if (!mm)
72277 return -EINVAL;
72278
72279+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72280+ if (mm != current->mm &&
72281+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
72282+ err = -EPERM;
72283+ goto out;
72284+ }
72285+#endif
72286+
72287 /*
72288 * Check if this process has the right to modify the specified
72289 * process. The right exists if the process has administrative
72290@@ -1216,8 +1249,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
72291 rcu_read_lock();
72292 tcred = __task_cred(task);
72293 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
72294- cred->uid != tcred->suid && cred->uid != tcred->uid &&
72295- !capable(CAP_SYS_NICE)) {
72296+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
72297 rcu_read_unlock();
72298 err = -EPERM;
72299 goto out;
72300@@ -2396,7 +2428,7 @@ int show_numa_map(struct seq_file *m, vo
72301
72302 if (file) {
72303 seq_printf(m, " file=");
72304- seq_path(m, &file->f_path, "\n\t= ");
72305+ seq_path(m, &file->f_path, "\n\t\\= ");
72306 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
72307 seq_printf(m, " heap");
72308 } else if (vma->vm_start <= mm->start_stack &&
72309diff -urNp linux-2.6.32.48/mm/migrate.c linux-2.6.32.48/mm/migrate.c
72310--- linux-2.6.32.48/mm/migrate.c 2011-11-08 19:02:43.000000000 -0500
72311+++ linux-2.6.32.48/mm/migrate.c 2011-11-15 19:59:43.000000000 -0500
72312@@ -916,6 +916,8 @@ static int do_pages_move(struct mm_struc
72313 unsigned long chunk_start;
72314 int err;
72315
72316+ pax_track_stack();
72317+
72318 task_nodes = cpuset_mems_allowed(task);
72319
72320 err = -ENOMEM;
72321@@ -1106,6 +1108,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
72322 if (!mm)
72323 return -EINVAL;
72324
72325+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72326+ if (mm != current->mm &&
72327+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
72328+ err = -EPERM;
72329+ goto out;
72330+ }
72331+#endif
72332+
72333 /*
72334 * Check if this process has the right to modify the specified
72335 * process. The right exists if the process has administrative
72336@@ -1115,8 +1125,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
72337 rcu_read_lock();
72338 tcred = __task_cred(task);
72339 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
72340- cred->uid != tcred->suid && cred->uid != tcred->uid &&
72341- !capable(CAP_SYS_NICE)) {
72342+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
72343 rcu_read_unlock();
72344 err = -EPERM;
72345 goto out;
72346diff -urNp linux-2.6.32.48/mm/mlock.c linux-2.6.32.48/mm/mlock.c
72347--- linux-2.6.32.48/mm/mlock.c 2011-11-08 19:02:43.000000000 -0500
72348+++ linux-2.6.32.48/mm/mlock.c 2011-11-15 19:59:43.000000000 -0500
72349@@ -13,6 +13,7 @@
72350 #include <linux/pagemap.h>
72351 #include <linux/mempolicy.h>
72352 #include <linux/syscalls.h>
72353+#include <linux/security.h>
72354 #include <linux/sched.h>
72355 #include <linux/module.h>
72356 #include <linux/rmap.h>
72357@@ -138,13 +139,6 @@ void munlock_vma_page(struct page *page)
72358 }
72359 }
72360
72361-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
72362-{
72363- return (vma->vm_flags & VM_GROWSDOWN) &&
72364- (vma->vm_start == addr) &&
72365- !vma_stack_continue(vma->vm_prev, addr);
72366-}
72367-
72368 /**
72369 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
72370 * @vma: target vma
72371@@ -177,12 +171,6 @@ static long __mlock_vma_pages_range(stru
72372 if (vma->vm_flags & VM_WRITE)
72373 gup_flags |= FOLL_WRITE;
72374
72375- /* We don't try to access the guard page of a stack vma */
72376- if (stack_guard_page(vma, start)) {
72377- addr += PAGE_SIZE;
72378- nr_pages--;
72379- }
72380-
72381 while (nr_pages > 0) {
72382 int i;
72383
72384@@ -440,7 +428,7 @@ static int do_mlock(unsigned long start,
72385 {
72386 unsigned long nstart, end, tmp;
72387 struct vm_area_struct * vma, * prev;
72388- int error;
72389+ int error = -EINVAL;
72390
72391 len = PAGE_ALIGN(len);
72392 end = start + len;
72393@@ -448,6 +436,9 @@ static int do_mlock(unsigned long start,
72394 return -EINVAL;
72395 if (end == start)
72396 return 0;
72397+ if (end > TASK_SIZE)
72398+ return -EINVAL;
72399+
72400 vma = find_vma_prev(current->mm, start, &prev);
72401 if (!vma || vma->vm_start > start)
72402 return -ENOMEM;
72403@@ -458,6 +449,11 @@ static int do_mlock(unsigned long start,
72404 for (nstart = start ; ; ) {
72405 unsigned int newflags;
72406
72407+#ifdef CONFIG_PAX_SEGMEXEC
72408+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
72409+ break;
72410+#endif
72411+
72412 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
72413
72414 newflags = vma->vm_flags | VM_LOCKED;
72415@@ -507,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
72416 lock_limit >>= PAGE_SHIFT;
72417
72418 /* check against resource limits */
72419+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
72420 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
72421 error = do_mlock(start, len, 1);
72422 up_write(&current->mm->mmap_sem);
72423@@ -528,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
72424 static int do_mlockall(int flags)
72425 {
72426 struct vm_area_struct * vma, * prev = NULL;
72427- unsigned int def_flags = 0;
72428
72429 if (flags & MCL_FUTURE)
72430- def_flags = VM_LOCKED;
72431- current->mm->def_flags = def_flags;
72432+ current->mm->def_flags |= VM_LOCKED;
72433+ else
72434+ current->mm->def_flags &= ~VM_LOCKED;
72435 if (flags == MCL_FUTURE)
72436 goto out;
72437
72438 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
72439- unsigned int newflags;
72440+ unsigned long newflags;
72441+
72442+#ifdef CONFIG_PAX_SEGMEXEC
72443+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
72444+ break;
72445+#endif
72446
72447+ BUG_ON(vma->vm_end > TASK_SIZE);
72448 newflags = vma->vm_flags | VM_LOCKED;
72449 if (!(flags & MCL_CURRENT))
72450 newflags &= ~VM_LOCKED;
72451@@ -570,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
72452 lock_limit >>= PAGE_SHIFT;
72453
72454 ret = -ENOMEM;
72455+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
72456 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
72457 capable(CAP_IPC_LOCK))
72458 ret = do_mlockall(flags);
72459diff -urNp linux-2.6.32.48/mm/mmap.c linux-2.6.32.48/mm/mmap.c
72460--- linux-2.6.32.48/mm/mmap.c 2011-11-08 19:02:43.000000000 -0500
72461+++ linux-2.6.32.48/mm/mmap.c 2011-11-15 19:59:43.000000000 -0500
72462@@ -45,6 +45,16 @@
72463 #define arch_rebalance_pgtables(addr, len) (addr)
72464 #endif
72465
72466+static inline void verify_mm_writelocked(struct mm_struct *mm)
72467+{
72468+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
72469+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
72470+ up_read(&mm->mmap_sem);
72471+ BUG();
72472+ }
72473+#endif
72474+}
72475+
72476 static void unmap_region(struct mm_struct *mm,
72477 struct vm_area_struct *vma, struct vm_area_struct *prev,
72478 unsigned long start, unsigned long end);
72479@@ -70,22 +80,32 @@ static void unmap_region(struct mm_struc
72480 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
72481 *
72482 */
72483-pgprot_t protection_map[16] = {
72484+pgprot_t protection_map[16] __read_only = {
72485 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
72486 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
72487 };
72488
72489 pgprot_t vm_get_page_prot(unsigned long vm_flags)
72490 {
72491- return __pgprot(pgprot_val(protection_map[vm_flags &
72492+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
72493 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
72494 pgprot_val(arch_vm_get_page_prot(vm_flags)));
72495+
72496+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
72497+ if (!nx_enabled &&
72498+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
72499+ (vm_flags & (VM_READ | VM_WRITE)))
72500+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
72501+#endif
72502+
72503+ return prot;
72504 }
72505 EXPORT_SYMBOL(vm_get_page_prot);
72506
72507 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
72508 int sysctl_overcommit_ratio = 50; /* default is 50% */
72509 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
72510+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
72511 struct percpu_counter vm_committed_as;
72512
72513 /*
72514@@ -231,6 +251,7 @@ static struct vm_area_struct *remove_vma
72515 struct vm_area_struct *next = vma->vm_next;
72516
72517 might_sleep();
72518+ BUG_ON(vma->vm_mirror);
72519 if (vma->vm_ops && vma->vm_ops->close)
72520 vma->vm_ops->close(vma);
72521 if (vma->vm_file) {
72522@@ -267,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
72523 * not page aligned -Ram Gupta
72524 */
72525 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
72526+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
72527 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
72528 (mm->end_data - mm->start_data) > rlim)
72529 goto out;
72530@@ -704,6 +726,12 @@ static int
72531 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
72532 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
72533 {
72534+
72535+#ifdef CONFIG_PAX_SEGMEXEC
72536+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
72537+ return 0;
72538+#endif
72539+
72540 if (is_mergeable_vma(vma, file, vm_flags) &&
72541 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
72542 if (vma->vm_pgoff == vm_pgoff)
72543@@ -723,6 +751,12 @@ static int
72544 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
72545 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
72546 {
72547+
72548+#ifdef CONFIG_PAX_SEGMEXEC
72549+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
72550+ return 0;
72551+#endif
72552+
72553 if (is_mergeable_vma(vma, file, vm_flags) &&
72554 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
72555 pgoff_t vm_pglen;
72556@@ -765,12 +799,19 @@ can_vma_merge_after(struct vm_area_struc
72557 struct vm_area_struct *vma_merge(struct mm_struct *mm,
72558 struct vm_area_struct *prev, unsigned long addr,
72559 unsigned long end, unsigned long vm_flags,
72560- struct anon_vma *anon_vma, struct file *file,
72561+ struct anon_vma *anon_vma, struct file *file,
72562 pgoff_t pgoff, struct mempolicy *policy)
72563 {
72564 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
72565 struct vm_area_struct *area, *next;
72566
72567+#ifdef CONFIG_PAX_SEGMEXEC
72568+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
72569+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
72570+
72571+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
72572+#endif
72573+
72574 /*
72575 * We later require that vma->vm_flags == vm_flags,
72576 * so this tests vma->vm_flags & VM_SPECIAL, too.
72577@@ -786,6 +827,15 @@ struct vm_area_struct *vma_merge(struct
72578 if (next && next->vm_end == end) /* cases 6, 7, 8 */
72579 next = next->vm_next;
72580
72581+#ifdef CONFIG_PAX_SEGMEXEC
72582+ if (prev)
72583+ prev_m = pax_find_mirror_vma(prev);
72584+ if (area)
72585+ area_m = pax_find_mirror_vma(area);
72586+ if (next)
72587+ next_m = pax_find_mirror_vma(next);
72588+#endif
72589+
72590 /*
72591 * Can it merge with the predecessor?
72592 */
72593@@ -805,9 +855,24 @@ struct vm_area_struct *vma_merge(struct
72594 /* cases 1, 6 */
72595 vma_adjust(prev, prev->vm_start,
72596 next->vm_end, prev->vm_pgoff, NULL);
72597- } else /* cases 2, 5, 7 */
72598+
72599+#ifdef CONFIG_PAX_SEGMEXEC
72600+ if (prev_m)
72601+ vma_adjust(prev_m, prev_m->vm_start,
72602+ next_m->vm_end, prev_m->vm_pgoff, NULL);
72603+#endif
72604+
72605+ } else { /* cases 2, 5, 7 */
72606 vma_adjust(prev, prev->vm_start,
72607 end, prev->vm_pgoff, NULL);
72608+
72609+#ifdef CONFIG_PAX_SEGMEXEC
72610+ if (prev_m)
72611+ vma_adjust(prev_m, prev_m->vm_start,
72612+ end_m, prev_m->vm_pgoff, NULL);
72613+#endif
72614+
72615+ }
72616 return prev;
72617 }
72618
72619@@ -818,12 +883,27 @@ struct vm_area_struct *vma_merge(struct
72620 mpol_equal(policy, vma_policy(next)) &&
72621 can_vma_merge_before(next, vm_flags,
72622 anon_vma, file, pgoff+pglen)) {
72623- if (prev && addr < prev->vm_end) /* case 4 */
72624+ if (prev && addr < prev->vm_end) { /* case 4 */
72625 vma_adjust(prev, prev->vm_start,
72626 addr, prev->vm_pgoff, NULL);
72627- else /* cases 3, 8 */
72628+
72629+#ifdef CONFIG_PAX_SEGMEXEC
72630+ if (prev_m)
72631+ vma_adjust(prev_m, prev_m->vm_start,
72632+ addr_m, prev_m->vm_pgoff, NULL);
72633+#endif
72634+
72635+ } else { /* cases 3, 8 */
72636 vma_adjust(area, addr, next->vm_end,
72637 next->vm_pgoff - pglen, NULL);
72638+
72639+#ifdef CONFIG_PAX_SEGMEXEC
72640+ if (area_m)
72641+ vma_adjust(area_m, addr_m, next_m->vm_end,
72642+ next_m->vm_pgoff - pglen, NULL);
72643+#endif
72644+
72645+ }
72646 return area;
72647 }
72648
72649@@ -898,14 +978,11 @@ none:
72650 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
72651 struct file *file, long pages)
72652 {
72653- const unsigned long stack_flags
72654- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
72655-
72656 if (file) {
72657 mm->shared_vm += pages;
72658 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
72659 mm->exec_vm += pages;
72660- } else if (flags & stack_flags)
72661+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
72662 mm->stack_vm += pages;
72663 if (flags & (VM_RESERVED|VM_IO))
72664 mm->reserved_vm += pages;
72665@@ -932,7 +1009,7 @@ unsigned long do_mmap_pgoff(struct file
72666 * (the exception is when the underlying filesystem is noexec
72667 * mounted, in which case we dont add PROT_EXEC.)
72668 */
72669- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
72670+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
72671 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
72672 prot |= PROT_EXEC;
72673
72674@@ -958,7 +1035,7 @@ unsigned long do_mmap_pgoff(struct file
72675 /* Obtain the address to map to. we verify (or select) it and ensure
72676 * that it represents a valid section of the address space.
72677 */
72678- addr = get_unmapped_area(file, addr, len, pgoff, flags);
72679+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
72680 if (addr & ~PAGE_MASK)
72681 return addr;
72682
72683@@ -969,6 +1046,36 @@ unsigned long do_mmap_pgoff(struct file
72684 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
72685 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
72686
72687+#ifdef CONFIG_PAX_MPROTECT
72688+ if (mm->pax_flags & MF_PAX_MPROTECT) {
72689+#ifndef CONFIG_PAX_MPROTECT_COMPAT
72690+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
72691+ gr_log_rwxmmap(file);
72692+
72693+#ifdef CONFIG_PAX_EMUPLT
72694+ vm_flags &= ~VM_EXEC;
72695+#else
72696+ return -EPERM;
72697+#endif
72698+
72699+ }
72700+
72701+ if (!(vm_flags & VM_EXEC))
72702+ vm_flags &= ~VM_MAYEXEC;
72703+#else
72704+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
72705+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
72706+#endif
72707+ else
72708+ vm_flags &= ~VM_MAYWRITE;
72709+ }
72710+#endif
72711+
72712+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
72713+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
72714+ vm_flags &= ~VM_PAGEEXEC;
72715+#endif
72716+
72717 if (flags & MAP_LOCKED)
72718 if (!can_do_mlock())
72719 return -EPERM;
72720@@ -980,6 +1087,7 @@ unsigned long do_mmap_pgoff(struct file
72721 locked += mm->locked_vm;
72722 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
72723 lock_limit >>= PAGE_SHIFT;
72724+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
72725 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
72726 return -EAGAIN;
72727 }
72728@@ -1053,6 +1161,9 @@ unsigned long do_mmap_pgoff(struct file
72729 if (error)
72730 return error;
72731
72732+ if (!gr_acl_handle_mmap(file, prot))
72733+ return -EACCES;
72734+
72735 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
72736 }
72737 EXPORT_SYMBOL(do_mmap_pgoff);
72738@@ -1065,10 +1176,10 @@ EXPORT_SYMBOL(do_mmap_pgoff);
72739 */
72740 int vma_wants_writenotify(struct vm_area_struct *vma)
72741 {
72742- unsigned int vm_flags = vma->vm_flags;
72743+ unsigned long vm_flags = vma->vm_flags;
72744
72745 /* If it was private or non-writable, the write bit is already clear */
72746- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
72747+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
72748 return 0;
72749
72750 /* The backer wishes to know when pages are first written to? */
72751@@ -1117,14 +1228,24 @@ unsigned long mmap_region(struct file *f
72752 unsigned long charged = 0;
72753 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
72754
72755+#ifdef CONFIG_PAX_SEGMEXEC
72756+ struct vm_area_struct *vma_m = NULL;
72757+#endif
72758+
72759+ /*
72760+ * mm->mmap_sem is required to protect against another thread
72761+ * changing the mappings in case we sleep.
72762+ */
72763+ verify_mm_writelocked(mm);
72764+
72765 /* Clear old maps */
72766 error = -ENOMEM;
72767-munmap_back:
72768 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
72769 if (vma && vma->vm_start < addr + len) {
72770 if (do_munmap(mm, addr, len))
72771 return -ENOMEM;
72772- goto munmap_back;
72773+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
72774+ BUG_ON(vma && vma->vm_start < addr + len);
72775 }
72776
72777 /* Check against address space limit. */
72778@@ -1173,6 +1294,16 @@ munmap_back:
72779 goto unacct_error;
72780 }
72781
72782+#ifdef CONFIG_PAX_SEGMEXEC
72783+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
72784+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
72785+ if (!vma_m) {
72786+ error = -ENOMEM;
72787+ goto free_vma;
72788+ }
72789+ }
72790+#endif
72791+
72792 vma->vm_mm = mm;
72793 vma->vm_start = addr;
72794 vma->vm_end = addr + len;
72795@@ -1195,6 +1326,19 @@ munmap_back:
72796 error = file->f_op->mmap(file, vma);
72797 if (error)
72798 goto unmap_and_free_vma;
72799+
72800+#ifdef CONFIG_PAX_SEGMEXEC
72801+ if (vma_m && (vm_flags & VM_EXECUTABLE))
72802+ added_exe_file_vma(mm);
72803+#endif
72804+
72805+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
72806+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
72807+ vma->vm_flags |= VM_PAGEEXEC;
72808+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
72809+ }
72810+#endif
72811+
72812 if (vm_flags & VM_EXECUTABLE)
72813 added_exe_file_vma(mm);
72814
72815@@ -1218,6 +1362,11 @@ munmap_back:
72816 vma_link(mm, vma, prev, rb_link, rb_parent);
72817 file = vma->vm_file;
72818
72819+#ifdef CONFIG_PAX_SEGMEXEC
72820+ if (vma_m)
72821+ pax_mirror_vma(vma_m, vma);
72822+#endif
72823+
72824 /* Once vma denies write, undo our temporary denial count */
72825 if (correct_wcount)
72826 atomic_inc(&inode->i_writecount);
72827@@ -1226,6 +1375,7 @@ out:
72828
72829 mm->total_vm += len >> PAGE_SHIFT;
72830 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
72831+ track_exec_limit(mm, addr, addr + len, vm_flags);
72832 if (vm_flags & VM_LOCKED) {
72833 /*
72834 * makes pages present; downgrades, drops, reacquires mmap_sem
72835@@ -1248,6 +1398,12 @@ unmap_and_free_vma:
72836 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
72837 charged = 0;
72838 free_vma:
72839+
72840+#ifdef CONFIG_PAX_SEGMEXEC
72841+ if (vma_m)
72842+ kmem_cache_free(vm_area_cachep, vma_m);
72843+#endif
72844+
72845 kmem_cache_free(vm_area_cachep, vma);
72846 unacct_error:
72847 if (charged)
72848@@ -1255,6 +1411,44 @@ unacct_error:
72849 return error;
72850 }
72851
72852+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
72853+{
72854+ if (!vma) {
72855+#ifdef CONFIG_STACK_GROWSUP
72856+ if (addr > sysctl_heap_stack_gap)
72857+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
72858+ else
72859+ vma = find_vma(current->mm, 0);
72860+ if (vma && (vma->vm_flags & VM_GROWSUP))
72861+ return false;
72862+#endif
72863+ return true;
72864+ }
72865+
72866+ if (addr + len > vma->vm_start)
72867+ return false;
72868+
72869+ if (vma->vm_flags & VM_GROWSDOWN)
72870+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
72871+#ifdef CONFIG_STACK_GROWSUP
72872+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
72873+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
72874+#endif
72875+
72876+ return true;
72877+}
72878+
72879+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
72880+{
72881+ if (vma->vm_start < len)
72882+ return -ENOMEM;
72883+ if (!(vma->vm_flags & VM_GROWSDOWN))
72884+ return vma->vm_start - len;
72885+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
72886+ return vma->vm_start - len - sysctl_heap_stack_gap;
72887+ return -ENOMEM;
72888+}
72889+
72890 /* Get an address range which is currently unmapped.
72891 * For shmat() with addr=0.
72892 *
72893@@ -1281,18 +1475,23 @@ arch_get_unmapped_area(struct file *filp
72894 if (flags & MAP_FIXED)
72895 return addr;
72896
72897+#ifdef CONFIG_PAX_RANDMMAP
72898+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
72899+#endif
72900+
72901 if (addr) {
72902 addr = PAGE_ALIGN(addr);
72903- vma = find_vma(mm, addr);
72904- if (TASK_SIZE - len >= addr &&
72905- (!vma || addr + len <= vma->vm_start))
72906- return addr;
72907+ if (TASK_SIZE - len >= addr) {
72908+ vma = find_vma(mm, addr);
72909+ if (check_heap_stack_gap(vma, addr, len))
72910+ return addr;
72911+ }
72912 }
72913 if (len > mm->cached_hole_size) {
72914- start_addr = addr = mm->free_area_cache;
72915+ start_addr = addr = mm->free_area_cache;
72916 } else {
72917- start_addr = addr = TASK_UNMAPPED_BASE;
72918- mm->cached_hole_size = 0;
72919+ start_addr = addr = mm->mmap_base;
72920+ mm->cached_hole_size = 0;
72921 }
72922
72923 full_search:
72924@@ -1303,34 +1502,40 @@ full_search:
72925 * Start a new search - just in case we missed
72926 * some holes.
72927 */
72928- if (start_addr != TASK_UNMAPPED_BASE) {
72929- addr = TASK_UNMAPPED_BASE;
72930- start_addr = addr;
72931+ if (start_addr != mm->mmap_base) {
72932+ start_addr = addr = mm->mmap_base;
72933 mm->cached_hole_size = 0;
72934 goto full_search;
72935 }
72936 return -ENOMEM;
72937 }
72938- if (!vma || addr + len <= vma->vm_start) {
72939- /*
72940- * Remember the place where we stopped the search:
72941- */
72942- mm->free_area_cache = addr + len;
72943- return addr;
72944- }
72945+ if (check_heap_stack_gap(vma, addr, len))
72946+ break;
72947 if (addr + mm->cached_hole_size < vma->vm_start)
72948 mm->cached_hole_size = vma->vm_start - addr;
72949 addr = vma->vm_end;
72950 }
72951+
72952+ /*
72953+ * Remember the place where we stopped the search:
72954+ */
72955+ mm->free_area_cache = addr + len;
72956+ return addr;
72957 }
72958 #endif
72959
72960 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
72961 {
72962+
72963+#ifdef CONFIG_PAX_SEGMEXEC
72964+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
72965+ return;
72966+#endif
72967+
72968 /*
72969 * Is this a new hole at the lowest possible address?
72970 */
72971- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
72972+ if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
72973 mm->free_area_cache = addr;
72974 mm->cached_hole_size = ~0UL;
72975 }
72976@@ -1348,7 +1553,7 @@ arch_get_unmapped_area_topdown(struct fi
72977 {
72978 struct vm_area_struct *vma;
72979 struct mm_struct *mm = current->mm;
72980- unsigned long addr = addr0;
72981+ unsigned long base = mm->mmap_base, addr = addr0;
72982
72983 /* requested length too big for entire address space */
72984 if (len > TASK_SIZE)
72985@@ -1357,13 +1562,18 @@ arch_get_unmapped_area_topdown(struct fi
72986 if (flags & MAP_FIXED)
72987 return addr;
72988
72989+#ifdef CONFIG_PAX_RANDMMAP
72990+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
72991+#endif
72992+
72993 /* requesting a specific address */
72994 if (addr) {
72995 addr = PAGE_ALIGN(addr);
72996- vma = find_vma(mm, addr);
72997- if (TASK_SIZE - len >= addr &&
72998- (!vma || addr + len <= vma->vm_start))
72999- return addr;
73000+ if (TASK_SIZE - len >= addr) {
73001+ vma = find_vma(mm, addr);
73002+ if (check_heap_stack_gap(vma, addr, len))
73003+ return addr;
73004+ }
73005 }
73006
73007 /* check if free_area_cache is useful for us */
73008@@ -1378,7 +1588,7 @@ arch_get_unmapped_area_topdown(struct fi
73009 /* make sure it can fit in the remaining address space */
73010 if (addr > len) {
73011 vma = find_vma(mm, addr-len);
73012- if (!vma || addr <= vma->vm_start)
73013+ if (check_heap_stack_gap(vma, addr - len, len))
73014 /* remember the address as a hint for next time */
73015 return (mm->free_area_cache = addr-len);
73016 }
73017@@ -1395,7 +1605,7 @@ arch_get_unmapped_area_topdown(struct fi
73018 * return with success:
73019 */
73020 vma = find_vma(mm, addr);
73021- if (!vma || addr+len <= vma->vm_start)
73022+ if (check_heap_stack_gap(vma, addr, len))
73023 /* remember the address as a hint for next time */
73024 return (mm->free_area_cache = addr);
73025
73026@@ -1404,8 +1614,8 @@ arch_get_unmapped_area_topdown(struct fi
73027 mm->cached_hole_size = vma->vm_start - addr;
73028
73029 /* try just below the current vma->vm_start */
73030- addr = vma->vm_start-len;
73031- } while (len < vma->vm_start);
73032+ addr = skip_heap_stack_gap(vma, len);
73033+ } while (!IS_ERR_VALUE(addr));
73034
73035 bottomup:
73036 /*
73037@@ -1414,13 +1624,21 @@ bottomup:
73038 * can happen with large stack limits and large mmap()
73039 * allocations.
73040 */
73041+ mm->mmap_base = TASK_UNMAPPED_BASE;
73042+
73043+#ifdef CONFIG_PAX_RANDMMAP
73044+ if (mm->pax_flags & MF_PAX_RANDMMAP)
73045+ mm->mmap_base += mm->delta_mmap;
73046+#endif
73047+
73048+ mm->free_area_cache = mm->mmap_base;
73049 mm->cached_hole_size = ~0UL;
73050- mm->free_area_cache = TASK_UNMAPPED_BASE;
73051 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
73052 /*
73053 * Restore the topdown base:
73054 */
73055- mm->free_area_cache = mm->mmap_base;
73056+ mm->mmap_base = base;
73057+ mm->free_area_cache = base;
73058 mm->cached_hole_size = ~0UL;
73059
73060 return addr;
73061@@ -1429,6 +1647,12 @@ bottomup:
73062
73063 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
73064 {
73065+
73066+#ifdef CONFIG_PAX_SEGMEXEC
73067+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
73068+ return;
73069+#endif
73070+
73071 /*
73072 * Is this a new hole at the highest possible address?
73073 */
73074@@ -1436,8 +1660,10 @@ void arch_unmap_area_topdown(struct mm_s
73075 mm->free_area_cache = addr;
73076
73077 /* dont allow allocations above current base */
73078- if (mm->free_area_cache > mm->mmap_base)
73079+ if (mm->free_area_cache > mm->mmap_base) {
73080 mm->free_area_cache = mm->mmap_base;
73081+ mm->cached_hole_size = ~0UL;
73082+ }
73083 }
73084
73085 unsigned long
73086@@ -1545,6 +1771,27 @@ out:
73087 return prev ? prev->vm_next : vma;
73088 }
73089
73090+#ifdef CONFIG_PAX_SEGMEXEC
73091+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
73092+{
73093+ struct vm_area_struct *vma_m;
73094+
73095+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
73096+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
73097+ BUG_ON(vma->vm_mirror);
73098+ return NULL;
73099+ }
73100+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
73101+ vma_m = vma->vm_mirror;
73102+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
73103+ BUG_ON(vma->vm_file != vma_m->vm_file);
73104+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
73105+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff || vma->anon_vma != vma_m->anon_vma);
73106+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
73107+ return vma_m;
73108+}
73109+#endif
73110+
73111 /*
73112 * Verify that the stack growth is acceptable and
73113 * update accounting. This is shared with both the
73114@@ -1561,6 +1808,7 @@ static int acct_stack_growth(struct vm_a
73115 return -ENOMEM;
73116
73117 /* Stack limit test */
73118+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
73119 if (size > rlim[RLIMIT_STACK].rlim_cur)
73120 return -ENOMEM;
73121
73122@@ -1570,6 +1818,7 @@ static int acct_stack_growth(struct vm_a
73123 unsigned long limit;
73124 locked = mm->locked_vm + grow;
73125 limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
73126+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
73127 if (locked > limit && !capable(CAP_IPC_LOCK))
73128 return -ENOMEM;
73129 }
73130@@ -1600,37 +1849,48 @@ static int acct_stack_growth(struct vm_a
73131 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
73132 * vma is the last one with address > vma->vm_end. Have to extend vma.
73133 */
73134+#ifndef CONFIG_IA64
73135+static
73136+#endif
73137 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
73138 {
73139 int error;
73140+ bool locknext;
73141
73142 if (!(vma->vm_flags & VM_GROWSUP))
73143 return -EFAULT;
73144
73145+ /* Also guard against wrapping around to address 0. */
73146+ if (address < PAGE_ALIGN(address+1))
73147+ address = PAGE_ALIGN(address+1);
73148+ else
73149+ return -ENOMEM;
73150+
73151 /*
73152 * We must make sure the anon_vma is allocated
73153 * so that the anon_vma locking is not a noop.
73154 */
73155 if (unlikely(anon_vma_prepare(vma)))
73156 return -ENOMEM;
73157+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
73158+ if (locknext && anon_vma_prepare(vma->vm_next))
73159+ return -ENOMEM;
73160 anon_vma_lock(vma);
73161+ if (locknext)
73162+ anon_vma_lock(vma->vm_next);
73163
73164 /*
73165 * vma->vm_start/vm_end cannot change under us because the caller
73166 * is required to hold the mmap_sem in read mode. We need the
73167- * anon_vma lock to serialize against concurrent expand_stacks.
73168- * Also guard against wrapping around to address 0.
73169+ * anon_vma locks to serialize against concurrent expand_stacks
73170+ * and expand_upwards.
73171 */
73172- if (address < PAGE_ALIGN(address+4))
73173- address = PAGE_ALIGN(address+4);
73174- else {
73175- anon_vma_unlock(vma);
73176- return -ENOMEM;
73177- }
73178 error = 0;
73179
73180 /* Somebody else might have raced and expanded it already */
73181- if (address > vma->vm_end) {
73182+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
73183+ error = -ENOMEM;
73184+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
73185 unsigned long size, grow;
73186
73187 size = address - vma->vm_start;
73188@@ -1643,6 +1903,8 @@ int expand_upwards(struct vm_area_struct
73189 vma->vm_end = address;
73190 }
73191 }
73192+ if (locknext)
73193+ anon_vma_unlock(vma->vm_next);
73194 anon_vma_unlock(vma);
73195 return error;
73196 }
73197@@ -1655,6 +1917,8 @@ static int expand_downwards(struct vm_ar
73198 unsigned long address)
73199 {
73200 int error;
73201+ bool lockprev = false;
73202+ struct vm_area_struct *prev;
73203
73204 /*
73205 * We must make sure the anon_vma is allocated
73206@@ -1668,6 +1932,15 @@ static int expand_downwards(struct vm_ar
73207 if (error)
73208 return error;
73209
73210+ prev = vma->vm_prev;
73211+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
73212+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
73213+#endif
73214+ if (lockprev && anon_vma_prepare(prev))
73215+ return -ENOMEM;
73216+ if (lockprev)
73217+ anon_vma_lock(prev);
73218+
73219 anon_vma_lock(vma);
73220
73221 /*
73222@@ -1677,9 +1950,17 @@ static int expand_downwards(struct vm_ar
73223 */
73224
73225 /* Somebody else might have raced and expanded it already */
73226- if (address < vma->vm_start) {
73227+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
73228+ error = -ENOMEM;
73229+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
73230 unsigned long size, grow;
73231
73232+#ifdef CONFIG_PAX_SEGMEXEC
73233+ struct vm_area_struct *vma_m;
73234+
73235+ vma_m = pax_find_mirror_vma(vma);
73236+#endif
73237+
73238 size = vma->vm_end - address;
73239 grow = (vma->vm_start - address) >> PAGE_SHIFT;
73240
73241@@ -1689,10 +1970,22 @@ static int expand_downwards(struct vm_ar
73242 if (!error) {
73243 vma->vm_start = address;
73244 vma->vm_pgoff -= grow;
73245+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
73246+
73247+#ifdef CONFIG_PAX_SEGMEXEC
73248+ if (vma_m) {
73249+ vma_m->vm_start -= grow << PAGE_SHIFT;
73250+ vma_m->vm_pgoff -= grow;
73251+ }
73252+#endif
73253+
73254+
73255 }
73256 }
73257 }
73258 anon_vma_unlock(vma);
73259+ if (lockprev)
73260+ anon_vma_unlock(prev);
73261 return error;
73262 }
73263
73264@@ -1768,6 +2061,13 @@ static void remove_vma_list(struct mm_st
73265 do {
73266 long nrpages = vma_pages(vma);
73267
73268+#ifdef CONFIG_PAX_SEGMEXEC
73269+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
73270+ vma = remove_vma(vma);
73271+ continue;
73272+ }
73273+#endif
73274+
73275 mm->total_vm -= nrpages;
73276 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
73277 vma = remove_vma(vma);
73278@@ -1813,6 +2113,16 @@ detach_vmas_to_be_unmapped(struct mm_str
73279 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
73280 vma->vm_prev = NULL;
73281 do {
73282+
73283+#ifdef CONFIG_PAX_SEGMEXEC
73284+ if (vma->vm_mirror) {
73285+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
73286+ vma->vm_mirror->vm_mirror = NULL;
73287+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
73288+ vma->vm_mirror = NULL;
73289+ }
73290+#endif
73291+
73292 rb_erase(&vma->vm_rb, &mm->mm_rb);
73293 mm->map_count--;
73294 tail_vma = vma;
73295@@ -1840,10 +2150,25 @@ int split_vma(struct mm_struct * mm, str
73296 struct mempolicy *pol;
73297 struct vm_area_struct *new;
73298
73299+#ifdef CONFIG_PAX_SEGMEXEC
73300+ struct vm_area_struct *vma_m, *new_m = NULL;
73301+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
73302+#endif
73303+
73304 if (is_vm_hugetlb_page(vma) && (addr &
73305 ~(huge_page_mask(hstate_vma(vma)))))
73306 return -EINVAL;
73307
73308+#ifdef CONFIG_PAX_SEGMEXEC
73309+ vma_m = pax_find_mirror_vma(vma);
73310+
73311+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
73312+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
73313+ if (mm->map_count >= sysctl_max_map_count-1)
73314+ return -ENOMEM;
73315+ } else
73316+#endif
73317+
73318 if (mm->map_count >= sysctl_max_map_count)
73319 return -ENOMEM;
73320
73321@@ -1851,6 +2176,16 @@ int split_vma(struct mm_struct * mm, str
73322 if (!new)
73323 return -ENOMEM;
73324
73325+#ifdef CONFIG_PAX_SEGMEXEC
73326+ if (vma_m) {
73327+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
73328+ if (!new_m) {
73329+ kmem_cache_free(vm_area_cachep, new);
73330+ return -ENOMEM;
73331+ }
73332+ }
73333+#endif
73334+
73335 /* most fields are the same, copy all, and then fixup */
73336 *new = *vma;
73337
73338@@ -1861,8 +2196,29 @@ int split_vma(struct mm_struct * mm, str
73339 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
73340 }
73341
73342+#ifdef CONFIG_PAX_SEGMEXEC
73343+ if (vma_m) {
73344+ *new_m = *vma_m;
73345+ new_m->vm_mirror = new;
73346+ new->vm_mirror = new_m;
73347+
73348+ if (new_below)
73349+ new_m->vm_end = addr_m;
73350+ else {
73351+ new_m->vm_start = addr_m;
73352+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
73353+ }
73354+ }
73355+#endif
73356+
73357 pol = mpol_dup(vma_policy(vma));
73358 if (IS_ERR(pol)) {
73359+
73360+#ifdef CONFIG_PAX_SEGMEXEC
73361+ if (new_m)
73362+ kmem_cache_free(vm_area_cachep, new_m);
73363+#endif
73364+
73365 kmem_cache_free(vm_area_cachep, new);
73366 return PTR_ERR(pol);
73367 }
73368@@ -1883,6 +2239,28 @@ int split_vma(struct mm_struct * mm, str
73369 else
73370 vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
73371
73372+#ifdef CONFIG_PAX_SEGMEXEC
73373+ if (vma_m) {
73374+ mpol_get(pol);
73375+ vma_set_policy(new_m, pol);
73376+
73377+ if (new_m->vm_file) {
73378+ get_file(new_m->vm_file);
73379+ if (vma_m->vm_flags & VM_EXECUTABLE)
73380+ added_exe_file_vma(mm);
73381+ }
73382+
73383+ if (new_m->vm_ops && new_m->vm_ops->open)
73384+ new_m->vm_ops->open(new_m);
73385+
73386+ if (new_below)
73387+ vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
73388+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
73389+ else
73390+ vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
73391+ }
73392+#endif
73393+
73394 return 0;
73395 }
73396
73397@@ -1891,11 +2269,30 @@ int split_vma(struct mm_struct * mm, str
73398 * work. This now handles partial unmappings.
73399 * Jeremy Fitzhardinge <jeremy@goop.org>
73400 */
73401+#ifdef CONFIG_PAX_SEGMEXEC
73402+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
73403+{
73404+ int ret = __do_munmap(mm, start, len);
73405+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
73406+ return ret;
73407+
73408+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
73409+}
73410+
73411+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
73412+#else
73413 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
73414+#endif
73415 {
73416 unsigned long end;
73417 struct vm_area_struct *vma, *prev, *last;
73418
73419+ /*
73420+ * mm->mmap_sem is required to protect against another thread
73421+ * changing the mappings in case we sleep.
73422+ */
73423+ verify_mm_writelocked(mm);
73424+
73425 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
73426 return -EINVAL;
73427
73428@@ -1959,6 +2356,8 @@ int do_munmap(struct mm_struct *mm, unsi
73429 /* Fix up all other VM information */
73430 remove_vma_list(mm, vma);
73431
73432+ track_exec_limit(mm, start, end, 0UL);
73433+
73434 return 0;
73435 }
73436
73437@@ -1971,22 +2370,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
73438
73439 profile_munmap(addr);
73440
73441+#ifdef CONFIG_PAX_SEGMEXEC
73442+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
73443+ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
73444+ return -EINVAL;
73445+#endif
73446+
73447 down_write(&mm->mmap_sem);
73448 ret = do_munmap(mm, addr, len);
73449 up_write(&mm->mmap_sem);
73450 return ret;
73451 }
73452
73453-static inline void verify_mm_writelocked(struct mm_struct *mm)
73454-{
73455-#ifdef CONFIG_DEBUG_VM
73456- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
73457- WARN_ON(1);
73458- up_read(&mm->mmap_sem);
73459- }
73460-#endif
73461-}
73462-
73463 /*
73464 * this is really a simplified "do_mmap". it only handles
73465 * anonymous maps. eventually we may be able to do some
73466@@ -2000,6 +2395,7 @@ unsigned long do_brk(unsigned long addr,
73467 struct rb_node ** rb_link, * rb_parent;
73468 pgoff_t pgoff = addr >> PAGE_SHIFT;
73469 int error;
73470+ unsigned long charged;
73471
73472 len = PAGE_ALIGN(len);
73473 if (!len)
73474@@ -2011,16 +2407,30 @@ unsigned long do_brk(unsigned long addr,
73475
73476 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
73477
73478+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
73479+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
73480+ flags &= ~VM_EXEC;
73481+
73482+#ifdef CONFIG_PAX_MPROTECT
73483+ if (mm->pax_flags & MF_PAX_MPROTECT)
73484+ flags &= ~VM_MAYEXEC;
73485+#endif
73486+
73487+ }
73488+#endif
73489+
73490 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
73491 if (error & ~PAGE_MASK)
73492 return error;
73493
73494+ charged = len >> PAGE_SHIFT;
73495+
73496 /*
73497 * mlock MCL_FUTURE?
73498 */
73499 if (mm->def_flags & VM_LOCKED) {
73500 unsigned long locked, lock_limit;
73501- locked = len >> PAGE_SHIFT;
73502+ locked = charged;
73503 locked += mm->locked_vm;
73504 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
73505 lock_limit >>= PAGE_SHIFT;
73506@@ -2037,22 +2447,22 @@ unsigned long do_brk(unsigned long addr,
73507 /*
73508 * Clear old maps. this also does some error checking for us
73509 */
73510- munmap_back:
73511 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
73512 if (vma && vma->vm_start < addr + len) {
73513 if (do_munmap(mm, addr, len))
73514 return -ENOMEM;
73515- goto munmap_back;
73516+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
73517+ BUG_ON(vma && vma->vm_start < addr + len);
73518 }
73519
73520 /* Check against address space limits *after* clearing old maps... */
73521- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
73522+ if (!may_expand_vm(mm, charged))
73523 return -ENOMEM;
73524
73525 if (mm->map_count > sysctl_max_map_count)
73526 return -ENOMEM;
73527
73528- if (security_vm_enough_memory(len >> PAGE_SHIFT))
73529+ if (security_vm_enough_memory(charged))
73530 return -ENOMEM;
73531
73532 /* Can we just expand an old private anonymous mapping? */
73533@@ -2066,7 +2476,7 @@ unsigned long do_brk(unsigned long addr,
73534 */
73535 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
73536 if (!vma) {
73537- vm_unacct_memory(len >> PAGE_SHIFT);
73538+ vm_unacct_memory(charged);
73539 return -ENOMEM;
73540 }
73541
73542@@ -2078,11 +2488,12 @@ unsigned long do_brk(unsigned long addr,
73543 vma->vm_page_prot = vm_get_page_prot(flags);
73544 vma_link(mm, vma, prev, rb_link, rb_parent);
73545 out:
73546- mm->total_vm += len >> PAGE_SHIFT;
73547+ mm->total_vm += charged;
73548 if (flags & VM_LOCKED) {
73549 if (!mlock_vma_pages_range(vma, addr, addr + len))
73550- mm->locked_vm += (len >> PAGE_SHIFT);
73551+ mm->locked_vm += charged;
73552 }
73553+ track_exec_limit(mm, addr, addr + len, flags);
73554 return addr;
73555 }
73556
73557@@ -2129,8 +2540,10 @@ void exit_mmap(struct mm_struct *mm)
73558 * Walk the list again, actually closing and freeing it,
73559 * with preemption enabled, without holding any MM locks.
73560 */
73561- while (vma)
73562+ while (vma) {
73563+ vma->vm_mirror = NULL;
73564 vma = remove_vma(vma);
73565+ }
73566
73567 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
73568 }
73569@@ -2144,6 +2557,10 @@ int insert_vm_struct(struct mm_struct *
73570 struct vm_area_struct * __vma, * prev;
73571 struct rb_node ** rb_link, * rb_parent;
73572
73573+#ifdef CONFIG_PAX_SEGMEXEC
73574+ struct vm_area_struct *vma_m = NULL;
73575+#endif
73576+
73577 /*
73578 * The vm_pgoff of a purely anonymous vma should be irrelevant
73579 * until its first write fault, when page's anon_vma and index
73580@@ -2166,7 +2583,22 @@ int insert_vm_struct(struct mm_struct *
73581 if ((vma->vm_flags & VM_ACCOUNT) &&
73582 security_vm_enough_memory_mm(mm, vma_pages(vma)))
73583 return -ENOMEM;
73584+
73585+#ifdef CONFIG_PAX_SEGMEXEC
73586+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
73587+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
73588+ if (!vma_m)
73589+ return -ENOMEM;
73590+ }
73591+#endif
73592+
73593 vma_link(mm, vma, prev, rb_link, rb_parent);
73594+
73595+#ifdef CONFIG_PAX_SEGMEXEC
73596+ if (vma_m)
73597+ pax_mirror_vma(vma_m, vma);
73598+#endif
73599+
73600 return 0;
73601 }
73602
73603@@ -2184,6 +2616,8 @@ struct vm_area_struct *copy_vma(struct v
73604 struct rb_node **rb_link, *rb_parent;
73605 struct mempolicy *pol;
73606
73607+ BUG_ON(vma->vm_mirror);
73608+
73609 /*
73610 * If anonymous vma has not yet been faulted, update new pgoff
73611 * to match new location, to increase its chance of merging.
73612@@ -2227,6 +2661,35 @@ struct vm_area_struct *copy_vma(struct v
73613 return new_vma;
73614 }
73615
73616+#ifdef CONFIG_PAX_SEGMEXEC
73617+void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
73618+{
73619+ struct vm_area_struct *prev_m;
73620+ struct rb_node **rb_link_m, *rb_parent_m;
73621+ struct mempolicy *pol_m;
73622+
73623+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
73624+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
73625+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
73626+ *vma_m = *vma;
73627+ pol_m = vma_policy(vma_m);
73628+ mpol_get(pol_m);
73629+ vma_set_policy(vma_m, pol_m);
73630+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
73631+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
73632+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
73633+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
73634+ if (vma_m->vm_file)
73635+ get_file(vma_m->vm_file);
73636+ if (vma_m->vm_ops && vma_m->vm_ops->open)
73637+ vma_m->vm_ops->open(vma_m);
73638+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
73639+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
73640+ vma_m->vm_mirror = vma;
73641+ vma->vm_mirror = vma_m;
73642+}
73643+#endif
73644+
73645 /*
73646 * Return true if the calling process may expand its vm space by the passed
73647 * number of pages
73648@@ -2237,7 +2700,7 @@ int may_expand_vm(struct mm_struct *mm,
73649 unsigned long lim;
73650
73651 lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
73652-
73653+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
73654 if (cur + npages > lim)
73655 return 0;
73656 return 1;
73657@@ -2307,6 +2770,22 @@ int install_special_mapping(struct mm_st
73658 vma->vm_start = addr;
73659 vma->vm_end = addr + len;
73660
73661+#ifdef CONFIG_PAX_MPROTECT
73662+ if (mm->pax_flags & MF_PAX_MPROTECT) {
73663+#ifndef CONFIG_PAX_MPROTECT_COMPAT
73664+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
73665+ return -EPERM;
73666+ if (!(vm_flags & VM_EXEC))
73667+ vm_flags &= ~VM_MAYEXEC;
73668+#else
73669+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
73670+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
73671+#endif
73672+ else
73673+ vm_flags &= ~VM_MAYWRITE;
73674+ }
73675+#endif
73676+
73677 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
73678 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
73679
73680diff -urNp linux-2.6.32.48/mm/mprotect.c linux-2.6.32.48/mm/mprotect.c
73681--- linux-2.6.32.48/mm/mprotect.c 2011-11-08 19:02:43.000000000 -0500
73682+++ linux-2.6.32.48/mm/mprotect.c 2011-11-15 19:59:43.000000000 -0500
73683@@ -24,10 +24,16 @@
73684 #include <linux/mmu_notifier.h>
73685 #include <linux/migrate.h>
73686 #include <linux/perf_event.h>
73687+
73688+#ifdef CONFIG_PAX_MPROTECT
73689+#include <linux/elf.h>
73690+#endif
73691+
73692 #include <asm/uaccess.h>
73693 #include <asm/pgtable.h>
73694 #include <asm/cacheflush.h>
73695 #include <asm/tlbflush.h>
73696+#include <asm/mmu_context.h>
73697
73698 #ifndef pgprot_modify
73699 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
73700@@ -132,6 +138,48 @@ static void change_protection(struct vm_
73701 flush_tlb_range(vma, start, end);
73702 }
73703
73704+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
73705+/* called while holding the mmap semaphor for writing except stack expansion */
73706+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
73707+{
73708+ unsigned long oldlimit, newlimit = 0UL;
73709+
73710+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || nx_enabled)
73711+ return;
73712+
73713+ spin_lock(&mm->page_table_lock);
73714+ oldlimit = mm->context.user_cs_limit;
73715+ if ((prot & VM_EXEC) && oldlimit < end)
73716+ /* USER_CS limit moved up */
73717+ newlimit = end;
73718+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
73719+ /* USER_CS limit moved down */
73720+ newlimit = start;
73721+
73722+ if (newlimit) {
73723+ mm->context.user_cs_limit = newlimit;
73724+
73725+#ifdef CONFIG_SMP
73726+ wmb();
73727+ cpus_clear(mm->context.cpu_user_cs_mask);
73728+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
73729+#endif
73730+
73731+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
73732+ }
73733+ spin_unlock(&mm->page_table_lock);
73734+ if (newlimit == end) {
73735+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
73736+
73737+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
73738+ if (is_vm_hugetlb_page(vma))
73739+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
73740+ else
73741+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
73742+ }
73743+}
73744+#endif
73745+
73746 int
73747 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
73748 unsigned long start, unsigned long end, unsigned long newflags)
73749@@ -144,11 +192,29 @@ mprotect_fixup(struct vm_area_struct *vm
73750 int error;
73751 int dirty_accountable = 0;
73752
73753+#ifdef CONFIG_PAX_SEGMEXEC
73754+ struct vm_area_struct *vma_m = NULL;
73755+ unsigned long start_m, end_m;
73756+
73757+ start_m = start + SEGMEXEC_TASK_SIZE;
73758+ end_m = end + SEGMEXEC_TASK_SIZE;
73759+#endif
73760+
73761 if (newflags == oldflags) {
73762 *pprev = vma;
73763 return 0;
73764 }
73765
73766+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
73767+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
73768+
73769+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
73770+ return -ENOMEM;
73771+
73772+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
73773+ return -ENOMEM;
73774+ }
73775+
73776 /*
73777 * If we make a private mapping writable we increase our commit;
73778 * but (without finer accounting) cannot reduce our commit if we
73779@@ -165,6 +231,38 @@ mprotect_fixup(struct vm_area_struct *vm
73780 }
73781 }
73782
73783+#ifdef CONFIG_PAX_SEGMEXEC
73784+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
73785+ if (start != vma->vm_start) {
73786+ error = split_vma(mm, vma, start, 1);
73787+ if (error)
73788+ goto fail;
73789+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
73790+ *pprev = (*pprev)->vm_next;
73791+ }
73792+
73793+ if (end != vma->vm_end) {
73794+ error = split_vma(mm, vma, end, 0);
73795+ if (error)
73796+ goto fail;
73797+ }
73798+
73799+ if (pax_find_mirror_vma(vma)) {
73800+ error = __do_munmap(mm, start_m, end_m - start_m);
73801+ if (error)
73802+ goto fail;
73803+ } else {
73804+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
73805+ if (!vma_m) {
73806+ error = -ENOMEM;
73807+ goto fail;
73808+ }
73809+ vma->vm_flags = newflags;
73810+ pax_mirror_vma(vma_m, vma);
73811+ }
73812+ }
73813+#endif
73814+
73815 /*
73816 * First try to merge with previous and/or next vma.
73817 */
73818@@ -195,9 +293,21 @@ success:
73819 * vm_flags and vm_page_prot are protected by the mmap_sem
73820 * held in write mode.
73821 */
73822+
73823+#ifdef CONFIG_PAX_SEGMEXEC
73824+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
73825+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
73826+#endif
73827+
73828 vma->vm_flags = newflags;
73829+
73830+#ifdef CONFIG_PAX_MPROTECT
73831+ if (mm->binfmt && mm->binfmt->handle_mprotect)
73832+ mm->binfmt->handle_mprotect(vma, newflags);
73833+#endif
73834+
73835 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
73836- vm_get_page_prot(newflags));
73837+ vm_get_page_prot(vma->vm_flags));
73838
73839 if (vma_wants_writenotify(vma)) {
73840 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
73841@@ -239,6 +349,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
73842 end = start + len;
73843 if (end <= start)
73844 return -ENOMEM;
73845+
73846+#ifdef CONFIG_PAX_SEGMEXEC
73847+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
73848+ if (end > SEGMEXEC_TASK_SIZE)
73849+ return -EINVAL;
73850+ } else
73851+#endif
73852+
73853+ if (end > TASK_SIZE)
73854+ return -EINVAL;
73855+
73856 if (!arch_validate_prot(prot))
73857 return -EINVAL;
73858
73859@@ -246,7 +367,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
73860 /*
73861 * Does the application expect PROT_READ to imply PROT_EXEC:
73862 */
73863- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
73864+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
73865 prot |= PROT_EXEC;
73866
73867 vm_flags = calc_vm_prot_bits(prot);
73868@@ -278,6 +399,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
73869 if (start > vma->vm_start)
73870 prev = vma;
73871
73872+#ifdef CONFIG_PAX_MPROTECT
73873+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
73874+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
73875+#endif
73876+
73877 for (nstart = start ; ; ) {
73878 unsigned long newflags;
73879
73880@@ -287,6 +413,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
73881
73882 /* newflags >> 4 shift VM_MAY% in place of VM_% */
73883 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
73884+ if (prot & (PROT_WRITE | PROT_EXEC))
73885+ gr_log_rwxmprotect(vma->vm_file);
73886+
73887+ error = -EACCES;
73888+ goto out;
73889+ }
73890+
73891+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
73892 error = -EACCES;
73893 goto out;
73894 }
73895@@ -301,6 +435,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
73896 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
73897 if (error)
73898 goto out;
73899+
73900+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
73901+
73902 nstart = tmp;
73903
73904 if (nstart < prev->vm_end)
73905diff -urNp linux-2.6.32.48/mm/mremap.c linux-2.6.32.48/mm/mremap.c
73906--- linux-2.6.32.48/mm/mremap.c 2011-11-08 19:02:43.000000000 -0500
73907+++ linux-2.6.32.48/mm/mremap.c 2011-11-15 19:59:43.000000000 -0500
73908@@ -112,6 +112,12 @@ static void move_ptes(struct vm_area_str
73909 continue;
73910 pte = ptep_clear_flush(vma, old_addr, old_pte);
73911 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
73912+
73913+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
73914+ if (!nx_enabled && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
73915+ pte = pte_exprotect(pte);
73916+#endif
73917+
73918 set_pte_at(mm, new_addr, new_pte, pte);
73919 }
73920
73921@@ -271,6 +277,11 @@ static struct vm_area_struct *vma_to_res
73922 if (is_vm_hugetlb_page(vma))
73923 goto Einval;
73924
73925+#ifdef CONFIG_PAX_SEGMEXEC
73926+ if (pax_find_mirror_vma(vma))
73927+ goto Einval;
73928+#endif
73929+
73930 /* We can't remap across vm area boundaries */
73931 if (old_len > vma->vm_end - addr)
73932 goto Efault;
73933@@ -327,20 +338,25 @@ static unsigned long mremap_to(unsigned
73934 unsigned long ret = -EINVAL;
73935 unsigned long charged = 0;
73936 unsigned long map_flags;
73937+ unsigned long pax_task_size = TASK_SIZE;
73938
73939 if (new_addr & ~PAGE_MASK)
73940 goto out;
73941
73942- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
73943+#ifdef CONFIG_PAX_SEGMEXEC
73944+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
73945+ pax_task_size = SEGMEXEC_TASK_SIZE;
73946+#endif
73947+
73948+ pax_task_size -= PAGE_SIZE;
73949+
73950+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
73951 goto out;
73952
73953 /* Check if the location we're moving into overlaps the
73954 * old location at all, and fail if it does.
73955 */
73956- if ((new_addr <= addr) && (new_addr+new_len) > addr)
73957- goto out;
73958-
73959- if ((addr <= new_addr) && (addr+old_len) > new_addr)
73960+ if (addr + old_len > new_addr && new_addr + new_len > addr)
73961 goto out;
73962
73963 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
73964@@ -412,6 +428,7 @@ unsigned long do_mremap(unsigned long ad
73965 struct vm_area_struct *vma;
73966 unsigned long ret = -EINVAL;
73967 unsigned long charged = 0;
73968+ unsigned long pax_task_size = TASK_SIZE;
73969
73970 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
73971 goto out;
73972@@ -430,6 +447,17 @@ unsigned long do_mremap(unsigned long ad
73973 if (!new_len)
73974 goto out;
73975
73976+#ifdef CONFIG_PAX_SEGMEXEC
73977+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
73978+ pax_task_size = SEGMEXEC_TASK_SIZE;
73979+#endif
73980+
73981+ pax_task_size -= PAGE_SIZE;
73982+
73983+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
73984+ old_len > pax_task_size || addr > pax_task_size-old_len)
73985+ goto out;
73986+
73987 if (flags & MREMAP_FIXED) {
73988 if (flags & MREMAP_MAYMOVE)
73989 ret = mremap_to(addr, old_len, new_addr, new_len);
73990@@ -476,6 +504,7 @@ unsigned long do_mremap(unsigned long ad
73991 addr + new_len);
73992 }
73993 ret = addr;
73994+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
73995 goto out;
73996 }
73997 }
73998@@ -502,7 +531,13 @@ unsigned long do_mremap(unsigned long ad
73999 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
74000 if (ret)
74001 goto out;
74002+
74003+ map_flags = vma->vm_flags;
74004 ret = move_vma(vma, addr, old_len, new_len, new_addr);
74005+ if (!(ret & ~PAGE_MASK)) {
74006+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
74007+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
74008+ }
74009 }
74010 out:
74011 if (ret & ~PAGE_MASK)
74012diff -urNp linux-2.6.32.48/mm/nommu.c linux-2.6.32.48/mm/nommu.c
74013--- linux-2.6.32.48/mm/nommu.c 2011-11-08 19:02:43.000000000 -0500
74014+++ linux-2.6.32.48/mm/nommu.c 2011-11-15 19:59:43.000000000 -0500
74015@@ -67,7 +67,6 @@ int sysctl_overcommit_memory = OVERCOMMI
74016 int sysctl_overcommit_ratio = 50; /* default is 50% */
74017 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
74018 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
74019-int heap_stack_gap = 0;
74020
74021 atomic_long_t mmap_pages_allocated;
74022
74023@@ -761,15 +760,6 @@ struct vm_area_struct *find_vma(struct m
74024 EXPORT_SYMBOL(find_vma);
74025
74026 /*
74027- * find a VMA
74028- * - we don't extend stack VMAs under NOMMU conditions
74029- */
74030-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
74031-{
74032- return find_vma(mm, addr);
74033-}
74034-
74035-/*
74036 * expand a stack to a given address
74037 * - not supported under NOMMU conditions
74038 */
74039diff -urNp linux-2.6.32.48/mm/page_alloc.c linux-2.6.32.48/mm/page_alloc.c
74040--- linux-2.6.32.48/mm/page_alloc.c 2011-11-08 19:02:43.000000000 -0500
74041+++ linux-2.6.32.48/mm/page_alloc.c 2011-11-15 19:59:43.000000000 -0500
74042@@ -289,7 +289,7 @@ out:
74043 * This usage means that zero-order pages may not be compound.
74044 */
74045
74046-static void free_compound_page(struct page *page)
74047+void free_compound_page(struct page *page)
74048 {
74049 __free_pages_ok(page, compound_order(page));
74050 }
74051@@ -587,6 +587,10 @@ static void __free_pages_ok(struct page
74052 int bad = 0;
74053 int wasMlocked = __TestClearPageMlocked(page);
74054
74055+#ifdef CONFIG_PAX_MEMORY_SANITIZE
74056+ unsigned long index = 1UL << order;
74057+#endif
74058+
74059 kmemcheck_free_shadow(page, order);
74060
74061 for (i = 0 ; i < (1 << order) ; ++i)
74062@@ -599,6 +603,12 @@ static void __free_pages_ok(struct page
74063 debug_check_no_obj_freed(page_address(page),
74064 PAGE_SIZE << order);
74065 }
74066+
74067+#ifdef CONFIG_PAX_MEMORY_SANITIZE
74068+ for (; index; --index)
74069+ sanitize_highpage(page + index - 1);
74070+#endif
74071+
74072 arch_free_page(page, order);
74073 kernel_map_pages(page, 1 << order, 0);
74074
74075@@ -702,8 +712,10 @@ static int prep_new_page(struct page *pa
74076 arch_alloc_page(page, order);
74077 kernel_map_pages(page, 1 << order, 1);
74078
74079+#ifndef CONFIG_PAX_MEMORY_SANITIZE
74080 if (gfp_flags & __GFP_ZERO)
74081 prep_zero_page(page, order, gfp_flags);
74082+#endif
74083
74084 if (order && (gfp_flags & __GFP_COMP))
74085 prep_compound_page(page, order);
74086@@ -1097,6 +1109,11 @@ static void free_hot_cold_page(struct pa
74087 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
74088 debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
74089 }
74090+
74091+#ifdef CONFIG_PAX_MEMORY_SANITIZE
74092+ sanitize_highpage(page);
74093+#endif
74094+
74095 arch_free_page(page, 0);
74096 kernel_map_pages(page, 1, 0);
74097
74098@@ -2179,6 +2196,8 @@ void show_free_areas(void)
74099 int cpu;
74100 struct zone *zone;
74101
74102+ pax_track_stack();
74103+
74104 for_each_populated_zone(zone) {
74105 show_node(zone);
74106 printk("%s per-cpu:\n", zone->name);
74107@@ -3736,7 +3755,7 @@ static void __init setup_usemap(struct p
74108 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
74109 }
74110 #else
74111-static void inline setup_usemap(struct pglist_data *pgdat,
74112+static inline void setup_usemap(struct pglist_data *pgdat,
74113 struct zone *zone, unsigned long zonesize) {}
74114 #endif /* CONFIG_SPARSEMEM */
74115
74116diff -urNp linux-2.6.32.48/mm/percpu.c linux-2.6.32.48/mm/percpu.c
74117--- linux-2.6.32.48/mm/percpu.c 2011-11-08 19:02:43.000000000 -0500
74118+++ linux-2.6.32.48/mm/percpu.c 2011-11-15 19:59:43.000000000 -0500
74119@@ -115,7 +115,7 @@ static unsigned int pcpu_first_unit_cpu
74120 static unsigned int pcpu_last_unit_cpu __read_mostly;
74121
74122 /* the address of the first chunk which starts with the kernel static area */
74123-void *pcpu_base_addr __read_mostly;
74124+void *pcpu_base_addr __read_only;
74125 EXPORT_SYMBOL_GPL(pcpu_base_addr);
74126
74127 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
74128diff -urNp linux-2.6.32.48/mm/rmap.c linux-2.6.32.48/mm/rmap.c
74129--- linux-2.6.32.48/mm/rmap.c 2011-11-08 19:02:43.000000000 -0500
74130+++ linux-2.6.32.48/mm/rmap.c 2011-11-15 19:59:43.000000000 -0500
74131@@ -121,6 +121,17 @@ int anon_vma_prepare(struct vm_area_stru
74132 /* page_table_lock to protect against threads */
74133 spin_lock(&mm->page_table_lock);
74134 if (likely(!vma->anon_vma)) {
74135+
74136+#ifdef CONFIG_PAX_SEGMEXEC
74137+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
74138+
74139+ if (vma_m) {
74140+ BUG_ON(vma_m->anon_vma);
74141+ vma_m->anon_vma = anon_vma;
74142+ list_add_tail(&vma_m->anon_vma_node, &anon_vma->head);
74143+ }
74144+#endif
74145+
74146 vma->anon_vma = anon_vma;
74147 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
74148 allocated = NULL;
74149diff -urNp linux-2.6.32.48/mm/shmem.c linux-2.6.32.48/mm/shmem.c
74150--- linux-2.6.32.48/mm/shmem.c 2011-11-08 19:02:43.000000000 -0500
74151+++ linux-2.6.32.48/mm/shmem.c 2011-11-15 19:59:43.000000000 -0500
74152@@ -31,7 +31,7 @@
74153 #include <linux/swap.h>
74154 #include <linux/ima.h>
74155
74156-static struct vfsmount *shm_mnt;
74157+struct vfsmount *shm_mnt;
74158
74159 #ifdef CONFIG_SHMEM
74160 /*
74161@@ -1061,6 +1061,8 @@ static int shmem_writepage(struct page *
74162 goto unlock;
74163 }
74164 entry = shmem_swp_entry(info, index, NULL);
74165+ if (!entry)
74166+ goto unlock;
74167 if (entry->val) {
74168 /*
74169 * The more uptodate page coming down from a stacked
74170@@ -1144,6 +1146,8 @@ static struct page *shmem_swapin(swp_ent
74171 struct vm_area_struct pvma;
74172 struct page *page;
74173
74174+ pax_track_stack();
74175+
74176 spol = mpol_cond_copy(&mpol,
74177 mpol_shared_policy_lookup(&info->policy, idx));
74178
74179@@ -1962,7 +1966,7 @@ static int shmem_symlink(struct inode *d
74180
74181 info = SHMEM_I(inode);
74182 inode->i_size = len-1;
74183- if (len <= (char *)inode - (char *)info) {
74184+ if (len <= (char *)inode - (char *)info && len <= 64) {
74185 /* do it inline */
74186 memcpy(info, symname, len);
74187 inode->i_op = &shmem_symlink_inline_operations;
74188@@ -2310,8 +2314,7 @@ int shmem_fill_super(struct super_block
74189 int err = -ENOMEM;
74190
74191 /* Round up to L1_CACHE_BYTES to resist false sharing */
74192- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
74193- L1_CACHE_BYTES), GFP_KERNEL);
74194+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
74195 if (!sbinfo)
74196 return -ENOMEM;
74197
74198diff -urNp linux-2.6.32.48/mm/slab.c linux-2.6.32.48/mm/slab.c
74199--- linux-2.6.32.48/mm/slab.c 2011-11-08 19:02:43.000000000 -0500
74200+++ linux-2.6.32.48/mm/slab.c 2011-11-15 19:59:43.000000000 -0500
74201@@ -174,7 +174,7 @@
74202
74203 /* Legal flag mask for kmem_cache_create(). */
74204 #if DEBUG
74205-# define CREATE_MASK (SLAB_RED_ZONE | \
74206+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
74207 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
74208 SLAB_CACHE_DMA | \
74209 SLAB_STORE_USER | \
74210@@ -182,7 +182,7 @@
74211 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
74212 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
74213 #else
74214-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
74215+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
74216 SLAB_CACHE_DMA | \
74217 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
74218 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
74219@@ -308,7 +308,7 @@ struct kmem_list3 {
74220 * Need this for bootstrapping a per node allocator.
74221 */
74222 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
74223-struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
74224+struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
74225 #define CACHE_CACHE 0
74226 #define SIZE_AC MAX_NUMNODES
74227 #define SIZE_L3 (2 * MAX_NUMNODES)
74228@@ -409,10 +409,10 @@ static void kmem_list3_init(struct kmem_
74229 if ((x)->max_freeable < i) \
74230 (x)->max_freeable = i; \
74231 } while (0)
74232-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
74233-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
74234-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
74235-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
74236+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
74237+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
74238+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
74239+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
74240 #else
74241 #define STATS_INC_ACTIVE(x) do { } while (0)
74242 #define STATS_DEC_ACTIVE(x) do { } while (0)
74243@@ -558,7 +558,7 @@ static inline void *index_to_obj(struct
74244 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
74245 */
74246 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
74247- const struct slab *slab, void *obj)
74248+ const struct slab *slab, const void *obj)
74249 {
74250 u32 offset = (obj - slab->s_mem);
74251 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
74252@@ -1453,7 +1453,7 @@ void __init kmem_cache_init(void)
74253 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
74254 sizes[INDEX_AC].cs_size,
74255 ARCH_KMALLOC_MINALIGN,
74256- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
74257+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
74258 NULL);
74259
74260 if (INDEX_AC != INDEX_L3) {
74261@@ -1461,7 +1461,7 @@ void __init kmem_cache_init(void)
74262 kmem_cache_create(names[INDEX_L3].name,
74263 sizes[INDEX_L3].cs_size,
74264 ARCH_KMALLOC_MINALIGN,
74265- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
74266+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
74267 NULL);
74268 }
74269
74270@@ -1479,7 +1479,7 @@ void __init kmem_cache_init(void)
74271 sizes->cs_cachep = kmem_cache_create(names->name,
74272 sizes->cs_size,
74273 ARCH_KMALLOC_MINALIGN,
74274- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
74275+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
74276 NULL);
74277 }
74278 #ifdef CONFIG_ZONE_DMA
74279@@ -4211,10 +4211,10 @@ static int s_show(struct seq_file *m, vo
74280 }
74281 /* cpu stats */
74282 {
74283- unsigned long allochit = atomic_read(&cachep->allochit);
74284- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
74285- unsigned long freehit = atomic_read(&cachep->freehit);
74286- unsigned long freemiss = atomic_read(&cachep->freemiss);
74287+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
74288+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
74289+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
74290+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
74291
74292 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
74293 allochit, allocmiss, freehit, freemiss);
74294@@ -4471,15 +4471,66 @@ static const struct file_operations proc
74295
74296 static int __init slab_proc_init(void)
74297 {
74298- proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
74299+ mode_t gr_mode = S_IRUGO;
74300+
74301+#ifdef CONFIG_GRKERNSEC_PROC_ADD
74302+ gr_mode = S_IRUSR;
74303+#endif
74304+
74305+ proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
74306 #ifdef CONFIG_DEBUG_SLAB_LEAK
74307- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
74308+ proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
74309 #endif
74310 return 0;
74311 }
74312 module_init(slab_proc_init);
74313 #endif
74314
74315+void check_object_size(const void *ptr, unsigned long n, bool to)
74316+{
74317+
74318+#ifdef CONFIG_PAX_USERCOPY
74319+ struct page *page;
74320+ struct kmem_cache *cachep = NULL;
74321+ struct slab *slabp;
74322+ unsigned int objnr;
74323+ unsigned long offset;
74324+
74325+ if (!n)
74326+ return;
74327+
74328+ if (ZERO_OR_NULL_PTR(ptr))
74329+ goto report;
74330+
74331+ if (!virt_addr_valid(ptr))
74332+ return;
74333+
74334+ page = virt_to_head_page(ptr);
74335+
74336+ if (!PageSlab(page)) {
74337+ if (object_is_on_stack(ptr, n) == -1)
74338+ goto report;
74339+ return;
74340+ }
74341+
74342+ cachep = page_get_cache(page);
74343+ if (!(cachep->flags & SLAB_USERCOPY))
74344+ goto report;
74345+
74346+ slabp = page_get_slab(page);
74347+ objnr = obj_to_index(cachep, slabp, ptr);
74348+ BUG_ON(objnr >= cachep->num);
74349+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
74350+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
74351+ return;
74352+
74353+report:
74354+ pax_report_usercopy(ptr, n, to, cachep ? cachep->name : NULL);
74355+#endif
74356+
74357+}
74358+EXPORT_SYMBOL(check_object_size);
74359+
74360 /**
74361 * ksize - get the actual amount of memory allocated for a given object
74362 * @objp: Pointer to the object
74363diff -urNp linux-2.6.32.48/mm/slob.c linux-2.6.32.48/mm/slob.c
74364--- linux-2.6.32.48/mm/slob.c 2011-11-08 19:02:43.000000000 -0500
74365+++ linux-2.6.32.48/mm/slob.c 2011-11-15 19:59:43.000000000 -0500
74366@@ -29,7 +29,7 @@
74367 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
74368 * alloc_pages() directly, allocating compound pages so the page order
74369 * does not have to be separately tracked, and also stores the exact
74370- * allocation size in page->private so that it can be used to accurately
74371+ * allocation size in slob_page->size so that it can be used to accurately
74372 * provide ksize(). These objects are detected in kfree() because slob_page()
74373 * is false for them.
74374 *
74375@@ -58,6 +58,7 @@
74376 */
74377
74378 #include <linux/kernel.h>
74379+#include <linux/sched.h>
74380 #include <linux/slab.h>
74381 #include <linux/mm.h>
74382 #include <linux/swap.h> /* struct reclaim_state */
74383@@ -100,7 +101,8 @@ struct slob_page {
74384 unsigned long flags; /* mandatory */
74385 atomic_t _count; /* mandatory */
74386 slobidx_t units; /* free units left in page */
74387- unsigned long pad[2];
74388+ unsigned long pad[1];
74389+ unsigned long size; /* size when >=PAGE_SIZE */
74390 slob_t *free; /* first free slob_t in page */
74391 struct list_head list; /* linked list of free pages */
74392 };
74393@@ -133,7 +135,7 @@ static LIST_HEAD(free_slob_large);
74394 */
74395 static inline int is_slob_page(struct slob_page *sp)
74396 {
74397- return PageSlab((struct page *)sp);
74398+ return PageSlab((struct page *)sp) && !sp->size;
74399 }
74400
74401 static inline void set_slob_page(struct slob_page *sp)
74402@@ -148,7 +150,7 @@ static inline void clear_slob_page(struc
74403
74404 static inline struct slob_page *slob_page(const void *addr)
74405 {
74406- return (struct slob_page *)virt_to_page(addr);
74407+ return (struct slob_page *)virt_to_head_page(addr);
74408 }
74409
74410 /*
74411@@ -208,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_
74412 /*
74413 * Return the size of a slob block.
74414 */
74415-static slobidx_t slob_units(slob_t *s)
74416+static slobidx_t slob_units(const slob_t *s)
74417 {
74418 if (s->units > 0)
74419 return s->units;
74420@@ -218,7 +220,7 @@ static slobidx_t slob_units(slob_t *s)
74421 /*
74422 * Return the next free slob block pointer after this one.
74423 */
74424-static slob_t *slob_next(slob_t *s)
74425+static slob_t *slob_next(const slob_t *s)
74426 {
74427 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
74428 slobidx_t next;
74429@@ -233,7 +235,7 @@ static slob_t *slob_next(slob_t *s)
74430 /*
74431 * Returns true if s is the last free block in its page.
74432 */
74433-static int slob_last(slob_t *s)
74434+static int slob_last(const slob_t *s)
74435 {
74436 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
74437 }
74438@@ -252,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, i
74439 if (!page)
74440 return NULL;
74441
74442+ set_slob_page(page);
74443 return page_address(page);
74444 }
74445
74446@@ -368,11 +371,11 @@ static void *slob_alloc(size_t size, gfp
74447 if (!b)
74448 return NULL;
74449 sp = slob_page(b);
74450- set_slob_page(sp);
74451
74452 spin_lock_irqsave(&slob_lock, flags);
74453 sp->units = SLOB_UNITS(PAGE_SIZE);
74454 sp->free = b;
74455+ sp->size = 0;
74456 INIT_LIST_HEAD(&sp->list);
74457 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
74458 set_slob_page_free(sp, slob_list);
74459@@ -475,10 +478,9 @@ out:
74460 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
74461 #endif
74462
74463-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
74464+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
74465 {
74466- unsigned int *m;
74467- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
74468+ slob_t *m;
74469 void *ret;
74470
74471 lockdep_trace_alloc(gfp);
74472@@ -491,7 +493,10 @@ void *__kmalloc_node(size_t size, gfp_t
74473
74474 if (!m)
74475 return NULL;
74476- *m = size;
74477+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
74478+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
74479+ m[0].units = size;
74480+ m[1].units = align;
74481 ret = (void *)m + align;
74482
74483 trace_kmalloc_node(_RET_IP_, ret,
74484@@ -501,16 +506,25 @@ void *__kmalloc_node(size_t size, gfp_t
74485
74486 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
74487 if (ret) {
74488- struct page *page;
74489- page = virt_to_page(ret);
74490- page->private = size;
74491+ struct slob_page *sp;
74492+ sp = slob_page(ret);
74493+ sp->size = size;
74494 }
74495
74496 trace_kmalloc_node(_RET_IP_, ret,
74497 size, PAGE_SIZE << order, gfp, node);
74498 }
74499
74500- kmemleak_alloc(ret, size, 1, gfp);
74501+ return ret;
74502+}
74503+
74504+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
74505+{
74506+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
74507+ void *ret = __kmalloc_node_align(size, gfp, node, align);
74508+
74509+ if (!ZERO_OR_NULL_PTR(ret))
74510+ kmemleak_alloc(ret, size, 1, gfp);
74511 return ret;
74512 }
74513 EXPORT_SYMBOL(__kmalloc_node);
74514@@ -528,13 +542,88 @@ void kfree(const void *block)
74515 sp = slob_page(block);
74516 if (is_slob_page(sp)) {
74517 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
74518- unsigned int *m = (unsigned int *)(block - align);
74519- slob_free(m, *m + align);
74520- } else
74521+ slob_t *m = (slob_t *)(block - align);
74522+ slob_free(m, m[0].units + align);
74523+ } else {
74524+ clear_slob_page(sp);
74525+ free_slob_page(sp);
74526+ sp->size = 0;
74527 put_page(&sp->page);
74528+ }
74529 }
74530 EXPORT_SYMBOL(kfree);
74531
74532+void check_object_size(const void *ptr, unsigned long n, bool to)
74533+{
74534+
74535+#ifdef CONFIG_PAX_USERCOPY
74536+ struct slob_page *sp;
74537+ const slob_t *free;
74538+ const void *base;
74539+ unsigned long flags;
74540+
74541+ if (!n)
74542+ return;
74543+
74544+ if (ZERO_OR_NULL_PTR(ptr))
74545+ goto report;
74546+
74547+ if (!virt_addr_valid(ptr))
74548+ return;
74549+
74550+ sp = slob_page(ptr);
74551+ if (!PageSlab((struct page*)sp)) {
74552+ if (object_is_on_stack(ptr, n) == -1)
74553+ goto report;
74554+ return;
74555+ }
74556+
74557+ if (sp->size) {
74558+ base = page_address(&sp->page);
74559+ if (base <= ptr && n <= sp->size - (ptr - base))
74560+ return;
74561+ goto report;
74562+ }
74563+
74564+ /* some tricky double walking to find the chunk */
74565+ spin_lock_irqsave(&slob_lock, flags);
74566+ base = (void *)((unsigned long)ptr & PAGE_MASK);
74567+ free = sp->free;
74568+
74569+ while (!slob_last(free) && (void *)free <= ptr) {
74570+ base = free + slob_units(free);
74571+ free = slob_next(free);
74572+ }
74573+
74574+ while (base < (void *)free) {
74575+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
74576+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
74577+ int offset;
74578+
74579+ if (ptr < base + align)
74580+ break;
74581+
74582+ offset = ptr - base - align;
74583+ if (offset >= m) {
74584+ base += size;
74585+ continue;
74586+ }
74587+
74588+ if (n > m - offset)
74589+ break;
74590+
74591+ spin_unlock_irqrestore(&slob_lock, flags);
74592+ return;
74593+ }
74594+
74595+ spin_unlock_irqrestore(&slob_lock, flags);
74596+report:
74597+ pax_report_usercopy(ptr, n, to, NULL);
74598+#endif
74599+
74600+}
74601+EXPORT_SYMBOL(check_object_size);
74602+
74603 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
74604 size_t ksize(const void *block)
74605 {
74606@@ -547,10 +636,10 @@ size_t ksize(const void *block)
74607 sp = slob_page(block);
74608 if (is_slob_page(sp)) {
74609 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
74610- unsigned int *m = (unsigned int *)(block - align);
74611- return SLOB_UNITS(*m) * SLOB_UNIT;
74612+ slob_t *m = (slob_t *)(block - align);
74613+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
74614 } else
74615- return sp->page.private;
74616+ return sp->size;
74617 }
74618 EXPORT_SYMBOL(ksize);
74619
74620@@ -566,8 +655,13 @@ struct kmem_cache *kmem_cache_create(con
74621 {
74622 struct kmem_cache *c;
74623
74624+#ifdef CONFIG_PAX_USERCOPY
74625+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
74626+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
74627+#else
74628 c = slob_alloc(sizeof(struct kmem_cache),
74629 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
74630+#endif
74631
74632 if (c) {
74633 c->name = name;
74634@@ -605,17 +699,25 @@ void *kmem_cache_alloc_node(struct kmem_
74635 {
74636 void *b;
74637
74638+#ifdef CONFIG_PAX_USERCOPY
74639+ b = __kmalloc_node_align(c->size, flags, node, c->align);
74640+#else
74641 if (c->size < PAGE_SIZE) {
74642 b = slob_alloc(c->size, flags, c->align, node);
74643 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
74644 SLOB_UNITS(c->size) * SLOB_UNIT,
74645 flags, node);
74646 } else {
74647+ struct slob_page *sp;
74648+
74649 b = slob_new_pages(flags, get_order(c->size), node);
74650+ sp = slob_page(b);
74651+ sp->size = c->size;
74652 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
74653 PAGE_SIZE << get_order(c->size),
74654 flags, node);
74655 }
74656+#endif
74657
74658 if (c->ctor)
74659 c->ctor(b);
74660@@ -627,10 +729,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
74661
74662 static void __kmem_cache_free(void *b, int size)
74663 {
74664- if (size < PAGE_SIZE)
74665+ struct slob_page *sp = slob_page(b);
74666+
74667+ if (is_slob_page(sp))
74668 slob_free(b, size);
74669- else
74670+ else {
74671+ clear_slob_page(sp);
74672+ free_slob_page(sp);
74673+ sp->size = 0;
74674 slob_free_pages(b, get_order(size));
74675+ }
74676 }
74677
74678 static void kmem_rcu_free(struct rcu_head *head)
74679@@ -643,18 +751,32 @@ static void kmem_rcu_free(struct rcu_hea
74680
74681 void kmem_cache_free(struct kmem_cache *c, void *b)
74682 {
74683+ int size = c->size;
74684+
74685+#ifdef CONFIG_PAX_USERCOPY
74686+ if (size + c->align < PAGE_SIZE) {
74687+ size += c->align;
74688+ b -= c->align;
74689+ }
74690+#endif
74691+
74692 kmemleak_free_recursive(b, c->flags);
74693 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
74694 struct slob_rcu *slob_rcu;
74695- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
74696+ slob_rcu = b + (size - sizeof(struct slob_rcu));
74697 INIT_RCU_HEAD(&slob_rcu->head);
74698- slob_rcu->size = c->size;
74699+ slob_rcu->size = size;
74700 call_rcu(&slob_rcu->head, kmem_rcu_free);
74701 } else {
74702- __kmem_cache_free(b, c->size);
74703+ __kmem_cache_free(b, size);
74704 }
74705
74706+#ifdef CONFIG_PAX_USERCOPY
74707+ trace_kfree(_RET_IP_, b);
74708+#else
74709 trace_kmem_cache_free(_RET_IP_, b);
74710+#endif
74711+
74712 }
74713 EXPORT_SYMBOL(kmem_cache_free);
74714
74715diff -urNp linux-2.6.32.48/mm/slub.c linux-2.6.32.48/mm/slub.c
74716--- linux-2.6.32.48/mm/slub.c 2011-11-08 19:02:43.000000000 -0500
74717+++ linux-2.6.32.48/mm/slub.c 2011-11-15 19:59:43.000000000 -0500
74718@@ -201,7 +201,7 @@ struct track {
74719
74720 enum track_item { TRACK_ALLOC, TRACK_FREE };
74721
74722-#ifdef CONFIG_SLUB_DEBUG
74723+#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
74724 static int sysfs_slab_add(struct kmem_cache *);
74725 static int sysfs_slab_alias(struct kmem_cache *, const char *);
74726 static void sysfs_slab_remove(struct kmem_cache *);
74727@@ -410,7 +410,7 @@ static void print_track(const char *s, s
74728 if (!t->addr)
74729 return;
74730
74731- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
74732+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
74733 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
74734 }
74735
74736@@ -1893,6 +1893,8 @@ void kmem_cache_free(struct kmem_cache *
74737
74738 page = virt_to_head_page(x);
74739
74740+ BUG_ON(!PageSlab(page));
74741+
74742 slab_free(s, page, x, _RET_IP_);
74743
74744 trace_kmem_cache_free(_RET_IP_, x);
74745@@ -1937,7 +1939,7 @@ static int slub_min_objects;
74746 * Merge control. If this is set then no merging of slab caches will occur.
74747 * (Could be removed. This was introduced to pacify the merge skeptics.)
74748 */
74749-static int slub_nomerge;
74750+static int slub_nomerge = 1;
74751
74752 /*
74753 * Calculate the order of allocation given an slab object size.
74754@@ -2493,7 +2495,7 @@ static int kmem_cache_open(struct kmem_c
74755 * list to avoid pounding the page allocator excessively.
74756 */
74757 set_min_partial(s, ilog2(s->size));
74758- s->refcount = 1;
74759+ atomic_set(&s->refcount, 1);
74760 #ifdef CONFIG_NUMA
74761 s->remote_node_defrag_ratio = 1000;
74762 #endif
74763@@ -2630,8 +2632,7 @@ static inline int kmem_cache_close(struc
74764 void kmem_cache_destroy(struct kmem_cache *s)
74765 {
74766 down_write(&slub_lock);
74767- s->refcount--;
74768- if (!s->refcount) {
74769+ if (atomic_dec_and_test(&s->refcount)) {
74770 list_del(&s->list);
74771 up_write(&slub_lock);
74772 if (kmem_cache_close(s)) {
74773@@ -2691,12 +2692,10 @@ static int __init setup_slub_nomerge(cha
74774 __setup("slub_nomerge", setup_slub_nomerge);
74775
74776 static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
74777- const char *name, int size, gfp_t gfp_flags)
74778+ const char *name, int size, gfp_t gfp_flags, unsigned int flags)
74779 {
74780- unsigned int flags = 0;
74781-
74782 if (gfp_flags & SLUB_DMA)
74783- flags = SLAB_CACHE_DMA;
74784+ flags |= SLAB_CACHE_DMA;
74785
74786 /*
74787 * This function is called with IRQs disabled during early-boot on
74788@@ -2915,6 +2914,46 @@ void *__kmalloc_node(size_t size, gfp_t
74789 EXPORT_SYMBOL(__kmalloc_node);
74790 #endif
74791
74792+void check_object_size(const void *ptr, unsigned long n, bool to)
74793+{
74794+
74795+#ifdef CONFIG_PAX_USERCOPY
74796+ struct page *page;
74797+ struct kmem_cache *s = NULL;
74798+ unsigned long offset;
74799+
74800+ if (!n)
74801+ return;
74802+
74803+ if (ZERO_OR_NULL_PTR(ptr))
74804+ goto report;
74805+
74806+ if (!virt_addr_valid(ptr))
74807+ return;
74808+
74809+ page = get_object_page(ptr);
74810+
74811+ if (!page) {
74812+ if (object_is_on_stack(ptr, n) == -1)
74813+ goto report;
74814+ return;
74815+ }
74816+
74817+ s = page->slab;
74818+ if (!(s->flags & SLAB_USERCOPY))
74819+ goto report;
74820+
74821+ offset = (ptr - page_address(page)) % s->size;
74822+ if (offset <= s->objsize && n <= s->objsize - offset)
74823+ return;
74824+
74825+report:
74826+ pax_report_usercopy(ptr, n, to, s ? s->name : NULL);
74827+#endif
74828+
74829+}
74830+EXPORT_SYMBOL(check_object_size);
74831+
74832 size_t ksize(const void *object)
74833 {
74834 struct page *page;
74835@@ -3185,8 +3224,8 @@ void __init kmem_cache_init(void)
74836 * kmem_cache_open for slab_state == DOWN.
74837 */
74838 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
74839- sizeof(struct kmem_cache_node), GFP_NOWAIT);
74840- kmalloc_caches[0].refcount = -1;
74841+ sizeof(struct kmem_cache_node), GFP_NOWAIT, 0);
74842+ atomic_set(&kmalloc_caches[0].refcount, -1);
74843 caches++;
74844
74845 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
74846@@ -3198,18 +3237,18 @@ void __init kmem_cache_init(void)
74847 /* Caches that are not of the two-to-the-power-of size */
74848 if (KMALLOC_MIN_SIZE <= 32) {
74849 create_kmalloc_cache(&kmalloc_caches[1],
74850- "kmalloc-96", 96, GFP_NOWAIT);
74851+ "kmalloc-96", 96, GFP_NOWAIT, SLAB_USERCOPY);
74852 caches++;
74853 }
74854 if (KMALLOC_MIN_SIZE <= 64) {
74855 create_kmalloc_cache(&kmalloc_caches[2],
74856- "kmalloc-192", 192, GFP_NOWAIT);
74857+ "kmalloc-192", 192, GFP_NOWAIT, SLAB_USERCOPY);
74858 caches++;
74859 }
74860
74861 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
74862 create_kmalloc_cache(&kmalloc_caches[i],
74863- "kmalloc", 1 << i, GFP_NOWAIT);
74864+ "kmalloc", 1 << i, GFP_NOWAIT, SLAB_USERCOPY);
74865 caches++;
74866 }
74867
74868@@ -3293,7 +3332,7 @@ static int slab_unmergeable(struct kmem_
74869 /*
74870 * We may have set a slab to be unmergeable during bootstrap.
74871 */
74872- if (s->refcount < 0)
74873+ if (atomic_read(&s->refcount) < 0)
74874 return 1;
74875
74876 return 0;
74877@@ -3353,7 +3392,7 @@ struct kmem_cache *kmem_cache_create(con
74878 if (s) {
74879 int cpu;
74880
74881- s->refcount++;
74882+ atomic_inc(&s->refcount);
74883 /*
74884 * Adjust the object sizes so that we clear
74885 * the complete object on kzalloc.
74886@@ -3372,7 +3411,7 @@ struct kmem_cache *kmem_cache_create(con
74887
74888 if (sysfs_slab_alias(s, name)) {
74889 down_write(&slub_lock);
74890- s->refcount--;
74891+ atomic_dec(&s->refcount);
74892 up_write(&slub_lock);
74893 goto err;
74894 }
74895@@ -4101,7 +4140,7 @@ SLAB_ATTR_RO(ctor);
74896
74897 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
74898 {
74899- return sprintf(buf, "%d\n", s->refcount - 1);
74900+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
74901 }
74902 SLAB_ATTR_RO(aliases);
74903
74904@@ -4503,7 +4542,7 @@ static void kmem_cache_release(struct ko
74905 kfree(s);
74906 }
74907
74908-static struct sysfs_ops slab_sysfs_ops = {
74909+static const struct sysfs_ops slab_sysfs_ops = {
74910 .show = slab_attr_show,
74911 .store = slab_attr_store,
74912 };
74913@@ -4522,7 +4561,7 @@ static int uevent_filter(struct kset *ks
74914 return 0;
74915 }
74916
74917-static struct kset_uevent_ops slab_uevent_ops = {
74918+static const struct kset_uevent_ops slab_uevent_ops = {
74919 .filter = uevent_filter,
74920 };
74921
74922@@ -4564,6 +4603,7 @@ static char *create_unique_id(struct kme
74923 return name;
74924 }
74925
74926+#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
74927 static int sysfs_slab_add(struct kmem_cache *s)
74928 {
74929 int err;
74930@@ -4619,6 +4659,7 @@ static void sysfs_slab_remove(struct kme
74931 kobject_del(&s->kobj);
74932 kobject_put(&s->kobj);
74933 }
74934+#endif
74935
74936 /*
74937 * Need to buffer aliases during bootup until sysfs becomes
74938@@ -4632,6 +4673,7 @@ struct saved_alias {
74939
74940 static struct saved_alias *alias_list;
74941
74942+#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
74943 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
74944 {
74945 struct saved_alias *al;
74946@@ -4654,6 +4696,7 @@ static int sysfs_slab_alias(struct kmem_
74947 alias_list = al;
74948 return 0;
74949 }
74950+#endif
74951
74952 static int __init slab_sysfs_init(void)
74953 {
74954@@ -4785,7 +4828,13 @@ static const struct file_operations proc
74955
74956 static int __init slab_proc_init(void)
74957 {
74958- proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
74959+ mode_t gr_mode = S_IRUGO;
74960+
74961+#ifdef CONFIG_GRKERNSEC_PROC_ADD
74962+ gr_mode = S_IRUSR;
74963+#endif
74964+
74965+ proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
74966 return 0;
74967 }
74968 module_init(slab_proc_init);
74969diff -urNp linux-2.6.32.48/mm/swap.c linux-2.6.32.48/mm/swap.c
74970--- linux-2.6.32.48/mm/swap.c 2011-11-08 19:02:43.000000000 -0500
74971+++ linux-2.6.32.48/mm/swap.c 2011-11-15 19:59:43.000000000 -0500
74972@@ -30,6 +30,7 @@
74973 #include <linux/notifier.h>
74974 #include <linux/backing-dev.h>
74975 #include <linux/memcontrol.h>
74976+#include <linux/hugetlb.h>
74977
74978 #include "internal.h"
74979
74980@@ -65,6 +66,8 @@ static void put_compound_page(struct pag
74981 compound_page_dtor *dtor;
74982
74983 dtor = get_compound_page_dtor(page);
74984+ if (!PageHuge(page))
74985+ BUG_ON(dtor != free_compound_page);
74986 (*dtor)(page);
74987 }
74988 }
74989diff -urNp linux-2.6.32.48/mm/util.c linux-2.6.32.48/mm/util.c
74990--- linux-2.6.32.48/mm/util.c 2011-11-08 19:02:43.000000000 -0500
74991+++ linux-2.6.32.48/mm/util.c 2011-11-15 19:59:43.000000000 -0500
74992@@ -228,6 +228,12 @@ EXPORT_SYMBOL(strndup_user);
74993 void arch_pick_mmap_layout(struct mm_struct *mm)
74994 {
74995 mm->mmap_base = TASK_UNMAPPED_BASE;
74996+
74997+#ifdef CONFIG_PAX_RANDMMAP
74998+ if (mm->pax_flags & MF_PAX_RANDMMAP)
74999+ mm->mmap_base += mm->delta_mmap;
75000+#endif
75001+
75002 mm->get_unmapped_area = arch_get_unmapped_area;
75003 mm->unmap_area = arch_unmap_area;
75004 }
75005diff -urNp linux-2.6.32.48/mm/vmalloc.c linux-2.6.32.48/mm/vmalloc.c
75006--- linux-2.6.32.48/mm/vmalloc.c 2011-11-08 19:02:43.000000000 -0500
75007+++ linux-2.6.32.48/mm/vmalloc.c 2011-11-15 19:59:43.000000000 -0500
75008@@ -40,8 +40,19 @@ static void vunmap_pte_range(pmd_t *pmd,
75009
75010 pte = pte_offset_kernel(pmd, addr);
75011 do {
75012- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
75013- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
75014+
75015+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
75016+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
75017+ BUG_ON(!pte_exec(*pte));
75018+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
75019+ continue;
75020+ }
75021+#endif
75022+
75023+ {
75024+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
75025+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
75026+ }
75027 } while (pte++, addr += PAGE_SIZE, addr != end);
75028 }
75029
75030@@ -92,6 +103,7 @@ static int vmap_pte_range(pmd_t *pmd, un
75031 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
75032 {
75033 pte_t *pte;
75034+ int ret = -ENOMEM;
75035
75036 /*
75037 * nr is a running index into the array which helps higher level
75038@@ -101,17 +113,32 @@ static int vmap_pte_range(pmd_t *pmd, un
75039 pte = pte_alloc_kernel(pmd, addr);
75040 if (!pte)
75041 return -ENOMEM;
75042+
75043+ pax_open_kernel();
75044 do {
75045 struct page *page = pages[*nr];
75046
75047- if (WARN_ON(!pte_none(*pte)))
75048- return -EBUSY;
75049- if (WARN_ON(!page))
75050- return -ENOMEM;
75051+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
75052+ if (!(pgprot_val(prot) & _PAGE_NX))
75053+ BUG_ON(!pte_exec(*pte) || pte_pfn(*pte) != __pa(addr) >> PAGE_SHIFT);
75054+ else
75055+#endif
75056+
75057+ if (WARN_ON(!pte_none(*pte))) {
75058+ ret = -EBUSY;
75059+ goto out;
75060+ }
75061+ if (WARN_ON(!page)) {
75062+ ret = -ENOMEM;
75063+ goto out;
75064+ }
75065 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
75066 (*nr)++;
75067 } while (pte++, addr += PAGE_SIZE, addr != end);
75068- return 0;
75069+ ret = 0;
75070+out:
75071+ pax_close_kernel();
75072+ return ret;
75073 }
75074
75075 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
75076@@ -192,11 +219,20 @@ int is_vmalloc_or_module_addr(const void
75077 * and fall back on vmalloc() if that fails. Others
75078 * just put it in the vmalloc space.
75079 */
75080-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
75081+#ifdef CONFIG_MODULES
75082+#ifdef MODULES_VADDR
75083 unsigned long addr = (unsigned long)x;
75084 if (addr >= MODULES_VADDR && addr < MODULES_END)
75085 return 1;
75086 #endif
75087+
75088+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
75089+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
75090+ return 1;
75091+#endif
75092+
75093+#endif
75094+
75095 return is_vmalloc_addr(x);
75096 }
75097
75098@@ -217,8 +253,14 @@ struct page *vmalloc_to_page(const void
75099
75100 if (!pgd_none(*pgd)) {
75101 pud_t *pud = pud_offset(pgd, addr);
75102+#ifdef CONFIG_X86
75103+ if (!pud_large(*pud))
75104+#endif
75105 if (!pud_none(*pud)) {
75106 pmd_t *pmd = pmd_offset(pud, addr);
75107+#ifdef CONFIG_X86
75108+ if (!pmd_large(*pmd))
75109+#endif
75110 if (!pmd_none(*pmd)) {
75111 pte_t *ptep, pte;
75112
75113@@ -292,13 +334,13 @@ static void __insert_vmap_area(struct vm
75114 struct rb_node *tmp;
75115
75116 while (*p) {
75117- struct vmap_area *tmp;
75118+ struct vmap_area *varea;
75119
75120 parent = *p;
75121- tmp = rb_entry(parent, struct vmap_area, rb_node);
75122- if (va->va_start < tmp->va_end)
75123+ varea = rb_entry(parent, struct vmap_area, rb_node);
75124+ if (va->va_start < varea->va_end)
75125 p = &(*p)->rb_left;
75126- else if (va->va_end > tmp->va_start)
75127+ else if (va->va_end > varea->va_start)
75128 p = &(*p)->rb_right;
75129 else
75130 BUG();
75131@@ -1233,6 +1275,16 @@ static struct vm_struct *__get_vm_area_n
75132 struct vm_struct *area;
75133
75134 BUG_ON(in_interrupt());
75135+
75136+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
75137+ if (flags & VM_KERNEXEC) {
75138+ if (start != VMALLOC_START || end != VMALLOC_END)
75139+ return NULL;
75140+ start = (unsigned long)MODULES_EXEC_VADDR;
75141+ end = (unsigned long)MODULES_EXEC_END;
75142+ }
75143+#endif
75144+
75145 if (flags & VM_IOREMAP) {
75146 int bit = fls(size);
75147
75148@@ -1458,6 +1510,11 @@ void *vmap(struct page **pages, unsigned
75149 if (count > totalram_pages)
75150 return NULL;
75151
75152+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
75153+ if (!(pgprot_val(prot) & _PAGE_NX))
75154+ flags |= VM_KERNEXEC;
75155+#endif
75156+
75157 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
75158 __builtin_return_address(0));
75159 if (!area)
75160@@ -1568,6 +1625,13 @@ static void *__vmalloc_node(unsigned lon
75161 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
75162 return NULL;
75163
75164+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
75165+ if (!(pgprot_val(prot) & _PAGE_NX))
75166+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_KERNEXEC, VMALLOC_START, VMALLOC_END,
75167+ node, gfp_mask, caller);
75168+ else
75169+#endif
75170+
75171 area = __get_vm_area_node(size, align, VM_ALLOC, VMALLOC_START,
75172 VMALLOC_END, node, gfp_mask, caller);
75173
75174@@ -1586,6 +1650,7 @@ static void *__vmalloc_node(unsigned lon
75175 return addr;
75176 }
75177
75178+#undef __vmalloc
75179 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
75180 {
75181 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
75182@@ -1602,6 +1667,7 @@ EXPORT_SYMBOL(__vmalloc);
75183 * For tight control over page level allocator and protection flags
75184 * use __vmalloc() instead.
75185 */
75186+#undef vmalloc
75187 void *vmalloc(unsigned long size)
75188 {
75189 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
75190@@ -1616,6 +1682,7 @@ EXPORT_SYMBOL(vmalloc);
75191 * The resulting memory area is zeroed so it can be mapped to userspace
75192 * without leaking data.
75193 */
75194+#undef vmalloc_user
75195 void *vmalloc_user(unsigned long size)
75196 {
75197 struct vm_struct *area;
75198@@ -1643,6 +1710,7 @@ EXPORT_SYMBOL(vmalloc_user);
75199 * For tight control over page level allocator and protection flags
75200 * use __vmalloc() instead.
75201 */
75202+#undef vmalloc_node
75203 void *vmalloc_node(unsigned long size, int node)
75204 {
75205 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
75206@@ -1665,10 +1733,10 @@ EXPORT_SYMBOL(vmalloc_node);
75207 * For tight control over page level allocator and protection flags
75208 * use __vmalloc() instead.
75209 */
75210-
75211+#undef vmalloc_exec
75212 void *vmalloc_exec(unsigned long size)
75213 {
75214- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
75215+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
75216 -1, __builtin_return_address(0));
75217 }
75218
75219@@ -1687,6 +1755,7 @@ void *vmalloc_exec(unsigned long size)
75220 * Allocate enough 32bit PA addressable pages to cover @size from the
75221 * page level allocator and map them into contiguous kernel virtual space.
75222 */
75223+#undef vmalloc_32
75224 void *vmalloc_32(unsigned long size)
75225 {
75226 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
75227@@ -1701,6 +1770,7 @@ EXPORT_SYMBOL(vmalloc_32);
75228 * The resulting memory area is 32bit addressable and zeroed so it can be
75229 * mapped to userspace without leaking data.
75230 */
75231+#undef vmalloc_32_user
75232 void *vmalloc_32_user(unsigned long size)
75233 {
75234 struct vm_struct *area;
75235@@ -1965,6 +2035,8 @@ int remap_vmalloc_range(struct vm_area_s
75236 unsigned long uaddr = vma->vm_start;
75237 unsigned long usize = vma->vm_end - vma->vm_start;
75238
75239+ BUG_ON(vma->vm_mirror);
75240+
75241 if ((PAGE_SIZE-1) & (unsigned long)addr)
75242 return -EINVAL;
75243
75244diff -urNp linux-2.6.32.48/mm/vmstat.c linux-2.6.32.48/mm/vmstat.c
75245--- linux-2.6.32.48/mm/vmstat.c 2011-11-08 19:02:43.000000000 -0500
75246+++ linux-2.6.32.48/mm/vmstat.c 2011-11-15 19:59:43.000000000 -0500
75247@@ -74,7 +74,7 @@ void vm_events_fold_cpu(int cpu)
75248 *
75249 * vm_stat contains the global counters
75250 */
75251-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
75252+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
75253 EXPORT_SYMBOL(vm_stat);
75254
75255 #ifdef CONFIG_SMP
75256@@ -324,7 +324,7 @@ void refresh_cpu_vm_stats(int cpu)
75257 v = p->vm_stat_diff[i];
75258 p->vm_stat_diff[i] = 0;
75259 local_irq_restore(flags);
75260- atomic_long_add(v, &zone->vm_stat[i]);
75261+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
75262 global_diff[i] += v;
75263 #ifdef CONFIG_NUMA
75264 /* 3 seconds idle till flush */
75265@@ -362,7 +362,7 @@ void refresh_cpu_vm_stats(int cpu)
75266
75267 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
75268 if (global_diff[i])
75269- atomic_long_add(global_diff[i], &vm_stat[i]);
75270+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
75271 }
75272
75273 #endif
75274@@ -953,10 +953,20 @@ static int __init setup_vmstat(void)
75275 start_cpu_timer(cpu);
75276 #endif
75277 #ifdef CONFIG_PROC_FS
75278- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
75279- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
75280- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
75281- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
75282+ {
75283+ mode_t gr_mode = S_IRUGO;
75284+#ifdef CONFIG_GRKERNSEC_PROC_ADD
75285+ gr_mode = S_IRUSR;
75286+#endif
75287+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
75288+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
75289+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
75290+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
75291+#else
75292+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
75293+#endif
75294+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
75295+ }
75296 #endif
75297 return 0;
75298 }
75299diff -urNp linux-2.6.32.48/net/8021q/vlan.c linux-2.6.32.48/net/8021q/vlan.c
75300--- linux-2.6.32.48/net/8021q/vlan.c 2011-11-08 19:02:43.000000000 -0500
75301+++ linux-2.6.32.48/net/8021q/vlan.c 2011-11-15 19:59:43.000000000 -0500
75302@@ -622,8 +622,7 @@ static int vlan_ioctl_handler(struct net
75303 err = -EPERM;
75304 if (!capable(CAP_NET_ADMIN))
75305 break;
75306- if ((args.u.name_type >= 0) &&
75307- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
75308+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
75309 struct vlan_net *vn;
75310
75311 vn = net_generic(net, vlan_net_id);
75312diff -urNp linux-2.6.32.48/net/9p/trans_fd.c linux-2.6.32.48/net/9p/trans_fd.c
75313--- linux-2.6.32.48/net/9p/trans_fd.c 2011-11-08 19:02:43.000000000 -0500
75314+++ linux-2.6.32.48/net/9p/trans_fd.c 2011-11-15 19:59:43.000000000 -0500
75315@@ -419,7 +419,7 @@ static int p9_fd_write(struct p9_client
75316 oldfs = get_fs();
75317 set_fs(get_ds());
75318 /* The cast to a user pointer is valid due to the set_fs() */
75319- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
75320+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
75321 set_fs(oldfs);
75322
75323 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
75324diff -urNp linux-2.6.32.48/net/atm/atm_misc.c linux-2.6.32.48/net/atm/atm_misc.c
75325--- linux-2.6.32.48/net/atm/atm_misc.c 2011-11-08 19:02:43.000000000 -0500
75326+++ linux-2.6.32.48/net/atm/atm_misc.c 2011-11-15 19:59:43.000000000 -0500
75327@@ -19,7 +19,7 @@ int atm_charge(struct atm_vcc *vcc,int t
75328 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
75329 return 1;
75330 atm_return(vcc,truesize);
75331- atomic_inc(&vcc->stats->rx_drop);
75332+ atomic_inc_unchecked(&vcc->stats->rx_drop);
75333 return 0;
75334 }
75335
75336@@ -41,7 +41,7 @@ struct sk_buff *atm_alloc_charge(struct
75337 }
75338 }
75339 atm_return(vcc,guess);
75340- atomic_inc(&vcc->stats->rx_drop);
75341+ atomic_inc_unchecked(&vcc->stats->rx_drop);
75342 return NULL;
75343 }
75344
75345@@ -88,7 +88,7 @@ int atm_pcr_goal(const struct atm_trafpr
75346
75347 void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
75348 {
75349-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
75350+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
75351 __SONET_ITEMS
75352 #undef __HANDLE_ITEM
75353 }
75354@@ -96,7 +96,7 @@ void sonet_copy_stats(struct k_sonet_sta
75355
75356 void sonet_subtract_stats(struct k_sonet_stats *from,struct sonet_stats *to)
75357 {
75358-#define __HANDLE_ITEM(i) atomic_sub(to->i,&from->i)
75359+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
75360 __SONET_ITEMS
75361 #undef __HANDLE_ITEM
75362 }
75363diff -urNp linux-2.6.32.48/net/atm/lec.h linux-2.6.32.48/net/atm/lec.h
75364--- linux-2.6.32.48/net/atm/lec.h 2011-11-08 19:02:43.000000000 -0500
75365+++ linux-2.6.32.48/net/atm/lec.h 2011-11-15 19:59:43.000000000 -0500
75366@@ -48,7 +48,7 @@ struct lane2_ops {
75367 const u8 *tlvs, u32 sizeoftlvs);
75368 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
75369 const u8 *tlvs, u32 sizeoftlvs);
75370-};
75371+} __no_const;
75372
75373 /*
75374 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
75375diff -urNp linux-2.6.32.48/net/atm/mpc.h linux-2.6.32.48/net/atm/mpc.h
75376--- linux-2.6.32.48/net/atm/mpc.h 2011-11-08 19:02:43.000000000 -0500
75377+++ linux-2.6.32.48/net/atm/mpc.h 2011-11-15 19:59:43.000000000 -0500
75378@@ -33,7 +33,7 @@ struct mpoa_client {
75379 struct mpc_parameters parameters; /* parameters for this client */
75380
75381 const struct net_device_ops *old_ops;
75382- struct net_device_ops new_ops;
75383+ net_device_ops_no_const new_ops;
75384 };
75385
75386
75387diff -urNp linux-2.6.32.48/net/atm/mpoa_caches.c linux-2.6.32.48/net/atm/mpoa_caches.c
75388--- linux-2.6.32.48/net/atm/mpoa_caches.c 2011-11-08 19:02:43.000000000 -0500
75389+++ linux-2.6.32.48/net/atm/mpoa_caches.c 2011-11-15 19:59:43.000000000 -0500
75390@@ -498,6 +498,8 @@ static void clear_expired(struct mpoa_cl
75391 struct timeval now;
75392 struct k_message msg;
75393
75394+ pax_track_stack();
75395+
75396 do_gettimeofday(&now);
75397
75398 write_lock_irq(&client->egress_lock);
75399diff -urNp linux-2.6.32.48/net/atm/proc.c linux-2.6.32.48/net/atm/proc.c
75400--- linux-2.6.32.48/net/atm/proc.c 2011-11-08 19:02:43.000000000 -0500
75401+++ linux-2.6.32.48/net/atm/proc.c 2011-11-15 19:59:43.000000000 -0500
75402@@ -43,9 +43,9 @@ static void add_stats(struct seq_file *s
75403 const struct k_atm_aal_stats *stats)
75404 {
75405 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
75406- atomic_read(&stats->tx),atomic_read(&stats->tx_err),
75407- atomic_read(&stats->rx),atomic_read(&stats->rx_err),
75408- atomic_read(&stats->rx_drop));
75409+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
75410+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
75411+ atomic_read_unchecked(&stats->rx_drop));
75412 }
75413
75414 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
75415@@ -188,7 +188,12 @@ static void vcc_info(struct seq_file *se
75416 {
75417 struct sock *sk = sk_atm(vcc);
75418
75419+#ifdef CONFIG_GRKERNSEC_HIDESYM
75420+ seq_printf(seq, "%p ", NULL);
75421+#else
75422 seq_printf(seq, "%p ", vcc);
75423+#endif
75424+
75425 if (!vcc->dev)
75426 seq_printf(seq, "Unassigned ");
75427 else
75428@@ -214,7 +219,11 @@ static void svc_info(struct seq_file *se
75429 {
75430 if (!vcc->dev)
75431 seq_printf(seq, sizeof(void *) == 4 ?
75432+#ifdef CONFIG_GRKERNSEC_HIDESYM
75433+ "N/A@%p%10s" : "N/A@%p%2s", NULL, "");
75434+#else
75435 "N/A@%p%10s" : "N/A@%p%2s", vcc, "");
75436+#endif
75437 else
75438 seq_printf(seq, "%3d %3d %5d ",
75439 vcc->dev->number, vcc->vpi, vcc->vci);
75440diff -urNp linux-2.6.32.48/net/atm/resources.c linux-2.6.32.48/net/atm/resources.c
75441--- linux-2.6.32.48/net/atm/resources.c 2011-11-08 19:02:43.000000000 -0500
75442+++ linux-2.6.32.48/net/atm/resources.c 2011-11-15 19:59:43.000000000 -0500
75443@@ -161,7 +161,7 @@ void atm_dev_deregister(struct atm_dev *
75444 static void copy_aal_stats(struct k_atm_aal_stats *from,
75445 struct atm_aal_stats *to)
75446 {
75447-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
75448+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
75449 __AAL_STAT_ITEMS
75450 #undef __HANDLE_ITEM
75451 }
75452@@ -170,7 +170,7 @@ static void copy_aal_stats(struct k_atm_
75453 static void subtract_aal_stats(struct k_atm_aal_stats *from,
75454 struct atm_aal_stats *to)
75455 {
75456-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
75457+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
75458 __AAL_STAT_ITEMS
75459 #undef __HANDLE_ITEM
75460 }
75461diff -urNp linux-2.6.32.48/net/bridge/br_private.h linux-2.6.32.48/net/bridge/br_private.h
75462--- linux-2.6.32.48/net/bridge/br_private.h 2011-11-08 19:02:43.000000000 -0500
75463+++ linux-2.6.32.48/net/bridge/br_private.h 2011-11-15 19:59:43.000000000 -0500
75464@@ -255,7 +255,7 @@ extern void br_ifinfo_notify(int event,
75465
75466 #ifdef CONFIG_SYSFS
75467 /* br_sysfs_if.c */
75468-extern struct sysfs_ops brport_sysfs_ops;
75469+extern const struct sysfs_ops brport_sysfs_ops;
75470 extern int br_sysfs_addif(struct net_bridge_port *p);
75471
75472 /* br_sysfs_br.c */
75473diff -urNp linux-2.6.32.48/net/bridge/br_stp_if.c linux-2.6.32.48/net/bridge/br_stp_if.c
75474--- linux-2.6.32.48/net/bridge/br_stp_if.c 2011-11-08 19:02:43.000000000 -0500
75475+++ linux-2.6.32.48/net/bridge/br_stp_if.c 2011-11-15 19:59:43.000000000 -0500
75476@@ -146,7 +146,7 @@ static void br_stp_stop(struct net_bridg
75477 char *envp[] = { NULL };
75478
75479 if (br->stp_enabled == BR_USER_STP) {
75480- r = call_usermodehelper(BR_STP_PROG, argv, envp, 1);
75481+ r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
75482 printk(KERN_INFO "%s: userspace STP stopped, return code %d\n",
75483 br->dev->name, r);
75484
75485diff -urNp linux-2.6.32.48/net/bridge/br_sysfs_if.c linux-2.6.32.48/net/bridge/br_sysfs_if.c
75486--- linux-2.6.32.48/net/bridge/br_sysfs_if.c 2011-11-08 19:02:43.000000000 -0500
75487+++ linux-2.6.32.48/net/bridge/br_sysfs_if.c 2011-11-15 19:59:43.000000000 -0500
75488@@ -220,7 +220,7 @@ static ssize_t brport_store(struct kobje
75489 return ret;
75490 }
75491
75492-struct sysfs_ops brport_sysfs_ops = {
75493+const struct sysfs_ops brport_sysfs_ops = {
75494 .show = brport_show,
75495 .store = brport_store,
75496 };
75497diff -urNp linux-2.6.32.48/net/bridge/netfilter/ebtables.c linux-2.6.32.48/net/bridge/netfilter/ebtables.c
75498--- linux-2.6.32.48/net/bridge/netfilter/ebtables.c 2011-11-08 19:02:43.000000000 -0500
75499+++ linux-2.6.32.48/net/bridge/netfilter/ebtables.c 2011-11-15 19:59:43.000000000 -0500
75500@@ -1337,6 +1337,8 @@ static int copy_everything_to_user(struc
75501 unsigned int entries_size, nentries;
75502 char *entries;
75503
75504+ pax_track_stack();
75505+
75506 if (cmd == EBT_SO_GET_ENTRIES) {
75507 entries_size = t->private->entries_size;
75508 nentries = t->private->nentries;
75509diff -urNp linux-2.6.32.48/net/can/bcm.c linux-2.6.32.48/net/can/bcm.c
75510--- linux-2.6.32.48/net/can/bcm.c 2011-11-08 19:02:43.000000000 -0500
75511+++ linux-2.6.32.48/net/can/bcm.c 2011-11-15 19:59:43.000000000 -0500
75512@@ -164,9 +164,15 @@ static int bcm_proc_show(struct seq_file
75513 struct bcm_sock *bo = bcm_sk(sk);
75514 struct bcm_op *op;
75515
75516+#ifdef CONFIG_GRKERNSEC_HIDESYM
75517+ seq_printf(m, ">>> socket %p", NULL);
75518+ seq_printf(m, " / sk %p", NULL);
75519+ seq_printf(m, " / bo %p", NULL);
75520+#else
75521 seq_printf(m, ">>> socket %p", sk->sk_socket);
75522 seq_printf(m, " / sk %p", sk);
75523 seq_printf(m, " / bo %p", bo);
75524+#endif
75525 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
75526 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
75527 seq_printf(m, " <<<\n");
75528diff -urNp linux-2.6.32.48/net/compat.c linux-2.6.32.48/net/compat.c
75529--- linux-2.6.32.48/net/compat.c 2011-11-08 19:02:43.000000000 -0500
75530+++ linux-2.6.32.48/net/compat.c 2011-11-15 19:59:43.000000000 -0500
75531@@ -69,9 +69,9 @@ int get_compat_msghdr(struct msghdr *kms
75532 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
75533 __get_user(kmsg->msg_flags, &umsg->msg_flags))
75534 return -EFAULT;
75535- kmsg->msg_name = compat_ptr(tmp1);
75536- kmsg->msg_iov = compat_ptr(tmp2);
75537- kmsg->msg_control = compat_ptr(tmp3);
75538+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
75539+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
75540+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
75541 return 0;
75542 }
75543
75544@@ -94,7 +94,7 @@ int verify_compat_iovec(struct msghdr *k
75545 kern_msg->msg_name = NULL;
75546
75547 tot_len = iov_from_user_compat_to_kern(kern_iov,
75548- (struct compat_iovec __user *)kern_msg->msg_iov,
75549+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
75550 kern_msg->msg_iovlen);
75551 if (tot_len >= 0)
75552 kern_msg->msg_iov = kern_iov;
75553@@ -114,20 +114,20 @@ int verify_compat_iovec(struct msghdr *k
75554
75555 #define CMSG_COMPAT_FIRSTHDR(msg) \
75556 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
75557- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
75558+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
75559 (struct compat_cmsghdr __user *)NULL)
75560
75561 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
75562 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
75563 (ucmlen) <= (unsigned long) \
75564 ((mhdr)->msg_controllen - \
75565- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
75566+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
75567
75568 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
75569 struct compat_cmsghdr __user *cmsg, int cmsg_len)
75570 {
75571 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
75572- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
75573+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
75574 msg->msg_controllen)
75575 return NULL;
75576 return (struct compat_cmsghdr __user *)ptr;
75577@@ -219,7 +219,7 @@ int put_cmsg_compat(struct msghdr *kmsg,
75578 {
75579 struct compat_timeval ctv;
75580 struct compat_timespec cts[3];
75581- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
75582+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
75583 struct compat_cmsghdr cmhdr;
75584 int cmlen;
75585
75586@@ -271,7 +271,7 @@ int put_cmsg_compat(struct msghdr *kmsg,
75587
75588 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
75589 {
75590- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
75591+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
75592 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
75593 int fdnum = scm->fp->count;
75594 struct file **fp = scm->fp->fp;
75595@@ -433,7 +433,7 @@ static int do_get_sock_timeout(struct so
75596 len = sizeof(ktime);
75597 old_fs = get_fs();
75598 set_fs(KERNEL_DS);
75599- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
75600+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
75601 set_fs(old_fs);
75602
75603 if (!err) {
75604@@ -570,7 +570,7 @@ int compat_mc_setsockopt(struct sock *so
75605 case MCAST_JOIN_GROUP:
75606 case MCAST_LEAVE_GROUP:
75607 {
75608- struct compat_group_req __user *gr32 = (void *)optval;
75609+ struct compat_group_req __user *gr32 = (void __user *)optval;
75610 struct group_req __user *kgr =
75611 compat_alloc_user_space(sizeof(struct group_req));
75612 u32 interface;
75613@@ -591,7 +591,7 @@ int compat_mc_setsockopt(struct sock *so
75614 case MCAST_BLOCK_SOURCE:
75615 case MCAST_UNBLOCK_SOURCE:
75616 {
75617- struct compat_group_source_req __user *gsr32 = (void *)optval;
75618+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
75619 struct group_source_req __user *kgsr = compat_alloc_user_space(
75620 sizeof(struct group_source_req));
75621 u32 interface;
75622@@ -612,7 +612,7 @@ int compat_mc_setsockopt(struct sock *so
75623 }
75624 case MCAST_MSFILTER:
75625 {
75626- struct compat_group_filter __user *gf32 = (void *)optval;
75627+ struct compat_group_filter __user *gf32 = (void __user *)optval;
75628 struct group_filter __user *kgf;
75629 u32 interface, fmode, numsrc;
75630
75631diff -urNp linux-2.6.32.48/net/core/dev.c linux-2.6.32.48/net/core/dev.c
75632--- linux-2.6.32.48/net/core/dev.c 2011-11-08 19:02:43.000000000 -0500
75633+++ linux-2.6.32.48/net/core/dev.c 2011-11-15 19:59:43.000000000 -0500
75634@@ -1047,10 +1047,14 @@ void dev_load(struct net *net, const cha
75635 if (no_module && capable(CAP_NET_ADMIN))
75636 no_module = request_module("netdev-%s", name);
75637 if (no_module && capable(CAP_SYS_MODULE)) {
75638+#ifdef CONFIG_GRKERNSEC_MODHARDEN
75639+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
75640+#else
75641 if (!request_module("%s", name))
75642 pr_err("Loading kernel module for a network device "
75643 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
75644 "instead\n", name);
75645+#endif
75646 }
75647 }
75648 EXPORT_SYMBOL(dev_load);
75649@@ -1654,7 +1658,7 @@ static inline int illegal_highdma(struct
75650
75651 struct dev_gso_cb {
75652 void (*destructor)(struct sk_buff *skb);
75653-};
75654+} __no_const;
75655
75656 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
75657
75658@@ -2063,7 +2067,7 @@ int netif_rx_ni(struct sk_buff *skb)
75659 }
75660 EXPORT_SYMBOL(netif_rx_ni);
75661
75662-static void net_tx_action(struct softirq_action *h)
75663+static void net_tx_action(void)
75664 {
75665 struct softnet_data *sd = &__get_cpu_var(softnet_data);
75666
75667@@ -2826,7 +2830,7 @@ void netif_napi_del(struct napi_struct *
75668 EXPORT_SYMBOL(netif_napi_del);
75669
75670
75671-static void net_rx_action(struct softirq_action *h)
75672+static void net_rx_action(void)
75673 {
75674 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
75675 unsigned long time_limit = jiffies + 2;
75676diff -urNp linux-2.6.32.48/net/core/flow.c linux-2.6.32.48/net/core/flow.c
75677--- linux-2.6.32.48/net/core/flow.c 2011-11-08 19:02:43.000000000 -0500
75678+++ linux-2.6.32.48/net/core/flow.c 2011-11-15 19:59:43.000000000 -0500
75679@@ -35,11 +35,11 @@ struct flow_cache_entry {
75680 atomic_t *object_ref;
75681 };
75682
75683-atomic_t flow_cache_genid = ATOMIC_INIT(0);
75684+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
75685
75686 static u32 flow_hash_shift;
75687 #define flow_hash_size (1 << flow_hash_shift)
75688-static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
75689+static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables);
75690
75691 #define flow_table(cpu) (per_cpu(flow_tables, cpu))
75692
75693@@ -52,7 +52,7 @@ struct flow_percpu_info {
75694 u32 hash_rnd;
75695 int count;
75696 };
75697-static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
75698+static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info);
75699
75700 #define flow_hash_rnd_recalc(cpu) \
75701 (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
75702@@ -69,7 +69,7 @@ struct flow_flush_info {
75703 atomic_t cpuleft;
75704 struct completion completion;
75705 };
75706-static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
75707+static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets);
75708
75709 #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
75710
75711@@ -190,7 +190,7 @@ void *flow_cache_lookup(struct net *net,
75712 if (fle->family == family &&
75713 fle->dir == dir &&
75714 flow_key_compare(key, &fle->key) == 0) {
75715- if (fle->genid == atomic_read(&flow_cache_genid)) {
75716+ if (fle->genid == atomic_read_unchecked(&flow_cache_genid)) {
75717 void *ret = fle->object;
75718
75719 if (ret)
75720@@ -228,7 +228,7 @@ nocache:
75721 err = resolver(net, key, family, dir, &obj, &obj_ref);
75722
75723 if (fle && !err) {
75724- fle->genid = atomic_read(&flow_cache_genid);
75725+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
75726
75727 if (fle->object)
75728 atomic_dec(fle->object_ref);
75729@@ -258,7 +258,7 @@ static void flow_cache_flush_tasklet(uns
75730
75731 fle = flow_table(cpu)[i];
75732 for (; fle; fle = fle->next) {
75733- unsigned genid = atomic_read(&flow_cache_genid);
75734+ unsigned genid = atomic_read_unchecked(&flow_cache_genid);
75735
75736 if (!fle->object || fle->genid == genid)
75737 continue;
75738diff -urNp linux-2.6.32.48/net/core/rtnetlink.c linux-2.6.32.48/net/core/rtnetlink.c
75739--- linux-2.6.32.48/net/core/rtnetlink.c 2011-11-08 19:02:43.000000000 -0500
75740+++ linux-2.6.32.48/net/core/rtnetlink.c 2011-11-15 19:59:43.000000000 -0500
75741@@ -57,7 +57,7 @@ struct rtnl_link
75742 {
75743 rtnl_doit_func doit;
75744 rtnl_dumpit_func dumpit;
75745-};
75746+} __no_const;
75747
75748 static DEFINE_MUTEX(rtnl_mutex);
75749
75750diff -urNp linux-2.6.32.48/net/core/scm.c linux-2.6.32.48/net/core/scm.c
75751--- linux-2.6.32.48/net/core/scm.c 2011-11-08 19:02:43.000000000 -0500
75752+++ linux-2.6.32.48/net/core/scm.c 2011-11-15 19:59:43.000000000 -0500
75753@@ -191,7 +191,7 @@ error:
75754 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
75755 {
75756 struct cmsghdr __user *cm
75757- = (__force struct cmsghdr __user *)msg->msg_control;
75758+ = (struct cmsghdr __force_user *)msg->msg_control;
75759 struct cmsghdr cmhdr;
75760 int cmlen = CMSG_LEN(len);
75761 int err;
75762@@ -214,7 +214,7 @@ int put_cmsg(struct msghdr * msg, int le
75763 err = -EFAULT;
75764 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
75765 goto out;
75766- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
75767+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
75768 goto out;
75769 cmlen = CMSG_SPACE(len);
75770 if (msg->msg_controllen < cmlen)
75771@@ -229,7 +229,7 @@ out:
75772 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
75773 {
75774 struct cmsghdr __user *cm
75775- = (__force struct cmsghdr __user*)msg->msg_control;
75776+ = (struct cmsghdr __force_user *)msg->msg_control;
75777
75778 int fdmax = 0;
75779 int fdnum = scm->fp->count;
75780@@ -249,7 +249,7 @@ void scm_detach_fds(struct msghdr *msg,
75781 if (fdnum < fdmax)
75782 fdmax = fdnum;
75783
75784- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
75785+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
75786 i++, cmfptr++)
75787 {
75788 int new_fd;
75789diff -urNp linux-2.6.32.48/net/core/secure_seq.c linux-2.6.32.48/net/core/secure_seq.c
75790--- linux-2.6.32.48/net/core/secure_seq.c 2011-11-08 19:02:43.000000000 -0500
75791+++ linux-2.6.32.48/net/core/secure_seq.c 2011-11-15 19:59:43.000000000 -0500
75792@@ -57,7 +57,7 @@ __u32 secure_tcpv6_sequence_number(__be3
75793 EXPORT_SYMBOL(secure_tcpv6_sequence_number);
75794
75795 u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
75796- __be16 dport)
75797+ __be16 dport)
75798 {
75799 u32 secret[MD5_MESSAGE_BYTES / 4];
75800 u32 hash[MD5_DIGEST_WORDS];
75801@@ -71,7 +71,6 @@ u32 secure_ipv6_port_ephemeral(const __b
75802 secret[i] = net_secret[i];
75803
75804 md5_transform(hash, secret);
75805-
75806 return hash[0];
75807 }
75808 #endif
75809diff -urNp linux-2.6.32.48/net/core/skbuff.c linux-2.6.32.48/net/core/skbuff.c
75810--- linux-2.6.32.48/net/core/skbuff.c 2011-11-08 19:02:43.000000000 -0500
75811+++ linux-2.6.32.48/net/core/skbuff.c 2011-11-15 19:59:43.000000000 -0500
75812@@ -1544,6 +1544,8 @@ int skb_splice_bits(struct sk_buff *skb,
75813 struct sk_buff *frag_iter;
75814 struct sock *sk = skb->sk;
75815
75816+ pax_track_stack();
75817+
75818 /*
75819 * __skb_splice_bits() only fails if the output has no room left,
75820 * so no point in going over the frag_list for the error case.
75821diff -urNp linux-2.6.32.48/net/core/sock.c linux-2.6.32.48/net/core/sock.c
75822--- linux-2.6.32.48/net/core/sock.c 2011-11-08 19:02:43.000000000 -0500
75823+++ linux-2.6.32.48/net/core/sock.c 2011-11-15 19:59:43.000000000 -0500
75824@@ -864,11 +864,15 @@ int sock_getsockopt(struct socket *sock,
75825 break;
75826
75827 case SO_PEERCRED:
75828+ {
75829+ struct ucred peercred;
75830 if (len > sizeof(sk->sk_peercred))
75831 len = sizeof(sk->sk_peercred);
75832- if (copy_to_user(optval, &sk->sk_peercred, len))
75833+ peercred = sk->sk_peercred;
75834+ if (copy_to_user(optval, &peercred, len))
75835 return -EFAULT;
75836 goto lenout;
75837+ }
75838
75839 case SO_PEERNAME:
75840 {
75841@@ -1892,7 +1896,7 @@ void sock_init_data(struct socket *sock,
75842 */
75843 smp_wmb();
75844 atomic_set(&sk->sk_refcnt, 1);
75845- atomic_set(&sk->sk_drops, 0);
75846+ atomic_set_unchecked(&sk->sk_drops, 0);
75847 }
75848 EXPORT_SYMBOL(sock_init_data);
75849
75850diff -urNp linux-2.6.32.48/net/decnet/sysctl_net_decnet.c linux-2.6.32.48/net/decnet/sysctl_net_decnet.c
75851--- linux-2.6.32.48/net/decnet/sysctl_net_decnet.c 2011-11-08 19:02:43.000000000 -0500
75852+++ linux-2.6.32.48/net/decnet/sysctl_net_decnet.c 2011-11-15 19:59:43.000000000 -0500
75853@@ -206,7 +206,7 @@ static int dn_node_address_handler(ctl_t
75854
75855 if (len > *lenp) len = *lenp;
75856
75857- if (copy_to_user(buffer, addr, len))
75858+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
75859 return -EFAULT;
75860
75861 *lenp = len;
75862@@ -327,7 +327,7 @@ static int dn_def_dev_handler(ctl_table
75863
75864 if (len > *lenp) len = *lenp;
75865
75866- if (copy_to_user(buffer, devname, len))
75867+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
75868 return -EFAULT;
75869
75870 *lenp = len;
75871diff -urNp linux-2.6.32.48/net/econet/Kconfig linux-2.6.32.48/net/econet/Kconfig
75872--- linux-2.6.32.48/net/econet/Kconfig 2011-11-08 19:02:43.000000000 -0500
75873+++ linux-2.6.32.48/net/econet/Kconfig 2011-11-15 19:59:43.000000000 -0500
75874@@ -4,7 +4,7 @@
75875
75876 config ECONET
75877 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
75878- depends on EXPERIMENTAL && INET
75879+ depends on EXPERIMENTAL && INET && BROKEN
75880 ---help---
75881 Econet is a fairly old and slow networking protocol mainly used by
75882 Acorn computers to access file and print servers. It uses native
75883diff -urNp linux-2.6.32.48/net/ieee802154/dgram.c linux-2.6.32.48/net/ieee802154/dgram.c
75884--- linux-2.6.32.48/net/ieee802154/dgram.c 2011-11-08 19:02:43.000000000 -0500
75885+++ linux-2.6.32.48/net/ieee802154/dgram.c 2011-11-15 19:59:43.000000000 -0500
75886@@ -318,7 +318,7 @@ out:
75887 static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
75888 {
75889 if (sock_queue_rcv_skb(sk, skb) < 0) {
75890- atomic_inc(&sk->sk_drops);
75891+ atomic_inc_unchecked(&sk->sk_drops);
75892 kfree_skb(skb);
75893 return NET_RX_DROP;
75894 }
75895diff -urNp linux-2.6.32.48/net/ieee802154/raw.c linux-2.6.32.48/net/ieee802154/raw.c
75896--- linux-2.6.32.48/net/ieee802154/raw.c 2011-11-08 19:02:43.000000000 -0500
75897+++ linux-2.6.32.48/net/ieee802154/raw.c 2011-11-15 19:59:43.000000000 -0500
75898@@ -206,7 +206,7 @@ out:
75899 static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
75900 {
75901 if (sock_queue_rcv_skb(sk, skb) < 0) {
75902- atomic_inc(&sk->sk_drops);
75903+ atomic_inc_unchecked(&sk->sk_drops);
75904 kfree_skb(skb);
75905 return NET_RX_DROP;
75906 }
75907diff -urNp linux-2.6.32.48/net/ipv4/inet_diag.c linux-2.6.32.48/net/ipv4/inet_diag.c
75908--- linux-2.6.32.48/net/ipv4/inet_diag.c 2011-11-08 19:02:43.000000000 -0500
75909+++ linux-2.6.32.48/net/ipv4/inet_diag.c 2011-11-15 19:59:43.000000000 -0500
75910@@ -113,8 +113,13 @@ static int inet_csk_diag_fill(struct soc
75911 r->idiag_retrans = 0;
75912
75913 r->id.idiag_if = sk->sk_bound_dev_if;
75914+#ifdef CONFIG_GRKERNSEC_HIDESYM
75915+ r->id.idiag_cookie[0] = 0;
75916+ r->id.idiag_cookie[1] = 0;
75917+#else
75918 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
75919 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
75920+#endif
75921
75922 r->id.idiag_sport = inet->sport;
75923 r->id.idiag_dport = inet->dport;
75924@@ -200,8 +205,15 @@ static int inet_twsk_diag_fill(struct in
75925 r->idiag_family = tw->tw_family;
75926 r->idiag_retrans = 0;
75927 r->id.idiag_if = tw->tw_bound_dev_if;
75928+
75929+#ifdef CONFIG_GRKERNSEC_HIDESYM
75930+ r->id.idiag_cookie[0] = 0;
75931+ r->id.idiag_cookie[1] = 0;
75932+#else
75933 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
75934 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
75935+#endif
75936+
75937 r->id.idiag_sport = tw->tw_sport;
75938 r->id.idiag_dport = tw->tw_dport;
75939 r->id.idiag_src[0] = tw->tw_rcv_saddr;
75940@@ -284,12 +296,14 @@ static int inet_diag_get_exact(struct sk
75941 if (sk == NULL)
75942 goto unlock;
75943
75944+#ifndef CONFIG_GRKERNSEC_HIDESYM
75945 err = -ESTALE;
75946 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
75947 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
75948 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
75949 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
75950 goto out;
75951+#endif
75952
75953 err = -ENOMEM;
75954 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
75955@@ -579,8 +593,14 @@ static int inet_diag_fill_req(struct sk_
75956 r->idiag_retrans = req->retrans;
75957
75958 r->id.idiag_if = sk->sk_bound_dev_if;
75959+
75960+#ifdef CONFIG_GRKERNSEC_HIDESYM
75961+ r->id.idiag_cookie[0] = 0;
75962+ r->id.idiag_cookie[1] = 0;
75963+#else
75964 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
75965 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
75966+#endif
75967
75968 tmo = req->expires - jiffies;
75969 if (tmo < 0)
75970diff -urNp linux-2.6.32.48/net/ipv4/inet_hashtables.c linux-2.6.32.48/net/ipv4/inet_hashtables.c
75971--- linux-2.6.32.48/net/ipv4/inet_hashtables.c 2011-11-08 19:02:43.000000000 -0500
75972+++ linux-2.6.32.48/net/ipv4/inet_hashtables.c 2011-11-15 19:59:43.000000000 -0500
75973@@ -18,12 +18,15 @@
75974 #include <linux/sched.h>
75975 #include <linux/slab.h>
75976 #include <linux/wait.h>
75977+#include <linux/security.h>
75978
75979 #include <net/inet_connection_sock.h>
75980 #include <net/inet_hashtables.h>
75981 #include <net/secure_seq.h>
75982 #include <net/ip.h>
75983
75984+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
75985+
75986 /*
75987 * Allocate and initialize a new local port bind bucket.
75988 * The bindhash mutex for snum's hash chain must be held here.
75989@@ -491,6 +494,8 @@ ok:
75990 }
75991 spin_unlock(&head->lock);
75992
75993+ gr_update_task_in_ip_table(current, inet_sk(sk));
75994+
75995 if (tw) {
75996 inet_twsk_deschedule(tw, death_row);
75997 inet_twsk_put(tw);
75998diff -urNp linux-2.6.32.48/net/ipv4/inetpeer.c linux-2.6.32.48/net/ipv4/inetpeer.c
75999--- linux-2.6.32.48/net/ipv4/inetpeer.c 2011-11-08 19:02:43.000000000 -0500
76000+++ linux-2.6.32.48/net/ipv4/inetpeer.c 2011-11-15 19:59:43.000000000 -0500
76001@@ -367,6 +367,8 @@ struct inet_peer *inet_getpeer(__be32 da
76002 struct inet_peer *p, *n;
76003 struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
76004
76005+ pax_track_stack();
76006+
76007 /* Look up for the address quickly. */
76008 read_lock_bh(&peer_pool_lock);
76009 p = lookup(daddr, NULL);
76010@@ -390,7 +392,7 @@ struct inet_peer *inet_getpeer(__be32 da
76011 return NULL;
76012 n->v4daddr = daddr;
76013 atomic_set(&n->refcnt, 1);
76014- atomic_set(&n->rid, 0);
76015+ atomic_set_unchecked(&n->rid, 0);
76016 n->ip_id_count = secure_ip_id(daddr);
76017 n->tcp_ts_stamp = 0;
76018
76019diff -urNp linux-2.6.32.48/net/ipv4/ipconfig.c linux-2.6.32.48/net/ipv4/ipconfig.c
76020--- linux-2.6.32.48/net/ipv4/ipconfig.c 2011-11-08 19:02:43.000000000 -0500
76021+++ linux-2.6.32.48/net/ipv4/ipconfig.c 2011-11-15 19:59:43.000000000 -0500
76022@@ -295,7 +295,7 @@ static int __init ic_devinet_ioctl(unsig
76023
76024 mm_segment_t oldfs = get_fs();
76025 set_fs(get_ds());
76026- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
76027+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
76028 set_fs(oldfs);
76029 return res;
76030 }
76031@@ -306,7 +306,7 @@ static int __init ic_dev_ioctl(unsigned
76032
76033 mm_segment_t oldfs = get_fs();
76034 set_fs(get_ds());
76035- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
76036+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
76037 set_fs(oldfs);
76038 return res;
76039 }
76040@@ -317,7 +317,7 @@ static int __init ic_route_ioctl(unsigne
76041
76042 mm_segment_t oldfs = get_fs();
76043 set_fs(get_ds());
76044- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
76045+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
76046 set_fs(oldfs);
76047 return res;
76048 }
76049diff -urNp linux-2.6.32.48/net/ipv4/ip_fragment.c linux-2.6.32.48/net/ipv4/ip_fragment.c
76050--- linux-2.6.32.48/net/ipv4/ip_fragment.c 2011-11-08 19:02:43.000000000 -0500
76051+++ linux-2.6.32.48/net/ipv4/ip_fragment.c 2011-11-15 19:59:43.000000000 -0500
76052@@ -255,7 +255,7 @@ static inline int ip_frag_too_far(struct
76053 return 0;
76054
76055 start = qp->rid;
76056- end = atomic_inc_return(&peer->rid);
76057+ end = atomic_inc_return_unchecked(&peer->rid);
76058 qp->rid = end;
76059
76060 rc = qp->q.fragments && (end - start) > max;
76061diff -urNp linux-2.6.32.48/net/ipv4/ip_sockglue.c linux-2.6.32.48/net/ipv4/ip_sockglue.c
76062--- linux-2.6.32.48/net/ipv4/ip_sockglue.c 2011-11-08 19:02:43.000000000 -0500
76063+++ linux-2.6.32.48/net/ipv4/ip_sockglue.c 2011-11-15 19:59:43.000000000 -0500
76064@@ -1015,6 +1015,8 @@ static int do_ip_getsockopt(struct sock
76065 int val;
76066 int len;
76067
76068+ pax_track_stack();
76069+
76070 if (level != SOL_IP)
76071 return -EOPNOTSUPP;
76072
76073@@ -1173,7 +1175,7 @@ static int do_ip_getsockopt(struct sock
76074 if (sk->sk_type != SOCK_STREAM)
76075 return -ENOPROTOOPT;
76076
76077- msg.msg_control = optval;
76078+ msg.msg_control = (void __force_kernel *)optval;
76079 msg.msg_controllen = len;
76080 msg.msg_flags = 0;
76081
76082diff -urNp linux-2.6.32.48/net/ipv4/netfilter/arp_tables.c linux-2.6.32.48/net/ipv4/netfilter/arp_tables.c
76083--- linux-2.6.32.48/net/ipv4/netfilter/arp_tables.c 2011-11-08 19:02:43.000000000 -0500
76084+++ linux-2.6.32.48/net/ipv4/netfilter/arp_tables.c 2011-11-15 19:59:43.000000000 -0500
76085@@ -934,6 +934,7 @@ static int get_info(struct net *net, voi
76086 private = &tmp;
76087 }
76088 #endif
76089+ memset(&info, 0, sizeof(info));
76090 info.valid_hooks = t->valid_hooks;
76091 memcpy(info.hook_entry, private->hook_entry,
76092 sizeof(info.hook_entry));
76093diff -urNp linux-2.6.32.48/net/ipv4/netfilter/ip_queue.c linux-2.6.32.48/net/ipv4/netfilter/ip_queue.c
76094--- linux-2.6.32.48/net/ipv4/netfilter/ip_queue.c 2011-11-08 19:02:43.000000000 -0500
76095+++ linux-2.6.32.48/net/ipv4/netfilter/ip_queue.c 2011-11-15 19:59:43.000000000 -0500
76096@@ -286,6 +286,9 @@ ipq_mangle_ipv4(ipq_verdict_msg_t *v, st
76097
76098 if (v->data_len < sizeof(*user_iph))
76099 return 0;
76100+ if (v->data_len > 65535)
76101+ return -EMSGSIZE;
76102+
76103 diff = v->data_len - e->skb->len;
76104 if (diff < 0) {
76105 if (pskb_trim(e->skb, v->data_len))
76106@@ -409,7 +412,8 @@ ipq_dev_drop(int ifindex)
76107 static inline void
76108 __ipq_rcv_skb(struct sk_buff *skb)
76109 {
76110- int status, type, pid, flags, nlmsglen, skblen;
76111+ int status, type, pid, flags;
76112+ unsigned int nlmsglen, skblen;
76113 struct nlmsghdr *nlh;
76114
76115 skblen = skb->len;
76116diff -urNp linux-2.6.32.48/net/ipv4/netfilter/ip_tables.c linux-2.6.32.48/net/ipv4/netfilter/ip_tables.c
76117--- linux-2.6.32.48/net/ipv4/netfilter/ip_tables.c 2011-11-08 19:02:43.000000000 -0500
76118+++ linux-2.6.32.48/net/ipv4/netfilter/ip_tables.c 2011-11-15 19:59:43.000000000 -0500
76119@@ -1141,6 +1141,7 @@ static int get_info(struct net *net, voi
76120 private = &tmp;
76121 }
76122 #endif
76123+ memset(&info, 0, sizeof(info));
76124 info.valid_hooks = t->valid_hooks;
76125 memcpy(info.hook_entry, private->hook_entry,
76126 sizeof(info.hook_entry));
76127diff -urNp linux-2.6.32.48/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-2.6.32.48/net/ipv4/netfilter/nf_nat_snmp_basic.c
76128--- linux-2.6.32.48/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-11-08 19:02:43.000000000 -0500
76129+++ linux-2.6.32.48/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-11-15 19:59:43.000000000 -0500
76130@@ -397,7 +397,7 @@ static unsigned char asn1_octets_decode(
76131
76132 *len = 0;
76133
76134- *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
76135+ *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
76136 if (*octets == NULL) {
76137 if (net_ratelimit())
76138 printk("OOM in bsalg (%d)\n", __LINE__);
76139diff -urNp linux-2.6.32.48/net/ipv4/raw.c linux-2.6.32.48/net/ipv4/raw.c
76140--- linux-2.6.32.48/net/ipv4/raw.c 2011-11-08 19:02:43.000000000 -0500
76141+++ linux-2.6.32.48/net/ipv4/raw.c 2011-11-15 19:59:43.000000000 -0500
76142@@ -292,7 +292,7 @@ static int raw_rcv_skb(struct sock * sk,
76143 /* Charge it to the socket. */
76144
76145 if (sock_queue_rcv_skb(sk, skb) < 0) {
76146- atomic_inc(&sk->sk_drops);
76147+ atomic_inc_unchecked(&sk->sk_drops);
76148 kfree_skb(skb);
76149 return NET_RX_DROP;
76150 }
76151@@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk,
76152 int raw_rcv(struct sock *sk, struct sk_buff *skb)
76153 {
76154 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
76155- atomic_inc(&sk->sk_drops);
76156+ atomic_inc_unchecked(&sk->sk_drops);
76157 kfree_skb(skb);
76158 return NET_RX_DROP;
76159 }
76160@@ -724,16 +724,23 @@ static int raw_init(struct sock *sk)
76161
76162 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
76163 {
76164+ struct icmp_filter filter;
76165+
76166+ if (optlen < 0)
76167+ return -EINVAL;
76168 if (optlen > sizeof(struct icmp_filter))
76169 optlen = sizeof(struct icmp_filter);
76170- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
76171+ if (copy_from_user(&filter, optval, optlen))
76172 return -EFAULT;
76173+ raw_sk(sk)->filter = filter;
76174+
76175 return 0;
76176 }
76177
76178 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
76179 {
76180 int len, ret = -EFAULT;
76181+ struct icmp_filter filter;
76182
76183 if (get_user(len, optlen))
76184 goto out;
76185@@ -743,8 +750,9 @@ static int raw_geticmpfilter(struct sock
76186 if (len > sizeof(struct icmp_filter))
76187 len = sizeof(struct icmp_filter);
76188 ret = -EFAULT;
76189- if (put_user(len, optlen) ||
76190- copy_to_user(optval, &raw_sk(sk)->filter, len))
76191+ filter = raw_sk(sk)->filter;
76192+ if (put_user(len, optlen) || len > sizeof filter ||
76193+ copy_to_user(optval, &filter, len))
76194 goto out;
76195 ret = 0;
76196 out: return ret;
76197@@ -954,7 +962,13 @@ static void raw_sock_seq_show(struct seq
76198 sk_wmem_alloc_get(sp),
76199 sk_rmem_alloc_get(sp),
76200 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
76201- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
76202+ atomic_read(&sp->sk_refcnt),
76203+#ifdef CONFIG_GRKERNSEC_HIDESYM
76204+ NULL,
76205+#else
76206+ sp,
76207+#endif
76208+ atomic_read_unchecked(&sp->sk_drops));
76209 }
76210
76211 static int raw_seq_show(struct seq_file *seq, void *v)
76212diff -urNp linux-2.6.32.48/net/ipv4/route.c linux-2.6.32.48/net/ipv4/route.c
76213--- linux-2.6.32.48/net/ipv4/route.c 2011-11-08 19:02:43.000000000 -0500
76214+++ linux-2.6.32.48/net/ipv4/route.c 2011-11-15 19:59:43.000000000 -0500
76215@@ -269,7 +269,7 @@ static inline unsigned int rt_hash(__be3
76216
76217 static inline int rt_genid(struct net *net)
76218 {
76219- return atomic_read(&net->ipv4.rt_genid);
76220+ return atomic_read_unchecked(&net->ipv4.rt_genid);
76221 }
76222
76223 #ifdef CONFIG_PROC_FS
76224@@ -889,7 +889,7 @@ static void rt_cache_invalidate(struct n
76225 unsigned char shuffle;
76226
76227 get_random_bytes(&shuffle, sizeof(shuffle));
76228- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
76229+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
76230 }
76231
76232 /*
76233@@ -3357,7 +3357,7 @@ static __net_initdata struct pernet_oper
76234
76235 static __net_init int rt_secret_timer_init(struct net *net)
76236 {
76237- atomic_set(&net->ipv4.rt_genid,
76238+ atomic_set_unchecked(&net->ipv4.rt_genid,
76239 (int) ((num_physpages ^ (num_physpages>>8)) ^
76240 (jiffies ^ (jiffies >> 7))));
76241
76242diff -urNp linux-2.6.32.48/net/ipv4/tcp.c linux-2.6.32.48/net/ipv4/tcp.c
76243--- linux-2.6.32.48/net/ipv4/tcp.c 2011-11-08 19:02:43.000000000 -0500
76244+++ linux-2.6.32.48/net/ipv4/tcp.c 2011-11-15 19:59:43.000000000 -0500
76245@@ -2085,6 +2085,8 @@ static int do_tcp_setsockopt(struct sock
76246 int val;
76247 int err = 0;
76248
76249+ pax_track_stack();
76250+
76251 /* This is a string value all the others are int's */
76252 if (optname == TCP_CONGESTION) {
76253 char name[TCP_CA_NAME_MAX];
76254@@ -2355,6 +2357,8 @@ static int do_tcp_getsockopt(struct sock
76255 struct tcp_sock *tp = tcp_sk(sk);
76256 int val, len;
76257
76258+ pax_track_stack();
76259+
76260 if (get_user(len, optlen))
76261 return -EFAULT;
76262
76263diff -urNp linux-2.6.32.48/net/ipv4/tcp_ipv4.c linux-2.6.32.48/net/ipv4/tcp_ipv4.c
76264--- linux-2.6.32.48/net/ipv4/tcp_ipv4.c 2011-11-08 19:02:43.000000000 -0500
76265+++ linux-2.6.32.48/net/ipv4/tcp_ipv4.c 2011-11-15 19:59:43.000000000 -0500
76266@@ -85,6 +85,9 @@
76267 int sysctl_tcp_tw_reuse __read_mostly;
76268 int sysctl_tcp_low_latency __read_mostly;
76269
76270+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76271+extern int grsec_enable_blackhole;
76272+#endif
76273
76274 #ifdef CONFIG_TCP_MD5SIG
76275 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
76276@@ -1543,6 +1546,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
76277 return 0;
76278
76279 reset:
76280+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76281+ if (!grsec_enable_blackhole)
76282+#endif
76283 tcp_v4_send_reset(rsk, skb);
76284 discard:
76285 kfree_skb(skb);
76286@@ -1604,12 +1610,20 @@ int tcp_v4_rcv(struct sk_buff *skb)
76287 TCP_SKB_CB(skb)->sacked = 0;
76288
76289 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
76290- if (!sk)
76291+ if (!sk) {
76292+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76293+ ret = 1;
76294+#endif
76295 goto no_tcp_socket;
76296+ }
76297
76298 process:
76299- if (sk->sk_state == TCP_TIME_WAIT)
76300+ if (sk->sk_state == TCP_TIME_WAIT) {
76301+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76302+ ret = 2;
76303+#endif
76304 goto do_time_wait;
76305+ }
76306
76307 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
76308 goto discard_and_relse;
76309@@ -1651,6 +1665,10 @@ no_tcp_socket:
76310 bad_packet:
76311 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
76312 } else {
76313+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76314+ if (!grsec_enable_blackhole || (ret == 1 &&
76315+ (skb->dev->flags & IFF_LOOPBACK)))
76316+#endif
76317 tcp_v4_send_reset(NULL, skb);
76318 }
76319
76320@@ -2238,7 +2256,11 @@ static void get_openreq4(struct sock *sk
76321 0, /* non standard timer */
76322 0, /* open_requests have no inode */
76323 atomic_read(&sk->sk_refcnt),
76324+#ifdef CONFIG_GRKERNSEC_HIDESYM
76325+ NULL,
76326+#else
76327 req,
76328+#endif
76329 len);
76330 }
76331
76332@@ -2280,7 +2302,12 @@ static void get_tcp4_sock(struct sock *s
76333 sock_i_uid(sk),
76334 icsk->icsk_probes_out,
76335 sock_i_ino(sk),
76336- atomic_read(&sk->sk_refcnt), sk,
76337+ atomic_read(&sk->sk_refcnt),
76338+#ifdef CONFIG_GRKERNSEC_HIDESYM
76339+ NULL,
76340+#else
76341+ sk,
76342+#endif
76343 jiffies_to_clock_t(icsk->icsk_rto),
76344 jiffies_to_clock_t(icsk->icsk_ack.ato),
76345 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
76346@@ -2308,7 +2335,13 @@ static void get_timewait4_sock(struct in
76347 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
76348 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
76349 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
76350- atomic_read(&tw->tw_refcnt), tw, len);
76351+ atomic_read(&tw->tw_refcnt),
76352+#ifdef CONFIG_GRKERNSEC_HIDESYM
76353+ NULL,
76354+#else
76355+ tw,
76356+#endif
76357+ len);
76358 }
76359
76360 #define TMPSZ 150
76361diff -urNp linux-2.6.32.48/net/ipv4/tcp_minisocks.c linux-2.6.32.48/net/ipv4/tcp_minisocks.c
76362--- linux-2.6.32.48/net/ipv4/tcp_minisocks.c 2011-11-08 19:02:43.000000000 -0500
76363+++ linux-2.6.32.48/net/ipv4/tcp_minisocks.c 2011-11-15 19:59:43.000000000 -0500
76364@@ -26,6 +26,10 @@
76365 #include <net/inet_common.h>
76366 #include <net/xfrm.h>
76367
76368+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76369+extern int grsec_enable_blackhole;
76370+#endif
76371+
76372 #ifdef CONFIG_SYSCTL
76373 #define SYNC_INIT 0 /* let the user enable it */
76374 #else
76375@@ -672,6 +676,10 @@ listen_overflow:
76376
76377 embryonic_reset:
76378 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
76379+
76380+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76381+ if (!grsec_enable_blackhole)
76382+#endif
76383 if (!(flg & TCP_FLAG_RST))
76384 req->rsk_ops->send_reset(sk, skb);
76385
76386diff -urNp linux-2.6.32.48/net/ipv4/tcp_output.c linux-2.6.32.48/net/ipv4/tcp_output.c
76387--- linux-2.6.32.48/net/ipv4/tcp_output.c 2011-11-08 19:02:43.000000000 -0500
76388+++ linux-2.6.32.48/net/ipv4/tcp_output.c 2011-11-15 19:59:43.000000000 -0500
76389@@ -2234,6 +2234,8 @@ struct sk_buff *tcp_make_synack(struct s
76390 __u8 *md5_hash_location;
76391 int mss;
76392
76393+ pax_track_stack();
76394+
76395 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
76396 if (skb == NULL)
76397 return NULL;
76398diff -urNp linux-2.6.32.48/net/ipv4/tcp_probe.c linux-2.6.32.48/net/ipv4/tcp_probe.c
76399--- linux-2.6.32.48/net/ipv4/tcp_probe.c 2011-11-08 19:02:43.000000000 -0500
76400+++ linux-2.6.32.48/net/ipv4/tcp_probe.c 2011-11-15 19:59:43.000000000 -0500
76401@@ -200,7 +200,7 @@ static ssize_t tcpprobe_read(struct file
76402 if (cnt + width >= len)
76403 break;
76404
76405- if (copy_to_user(buf + cnt, tbuf, width))
76406+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
76407 return -EFAULT;
76408 cnt += width;
76409 }
76410diff -urNp linux-2.6.32.48/net/ipv4/tcp_timer.c linux-2.6.32.48/net/ipv4/tcp_timer.c
76411--- linux-2.6.32.48/net/ipv4/tcp_timer.c 2011-11-08 19:02:43.000000000 -0500
76412+++ linux-2.6.32.48/net/ipv4/tcp_timer.c 2011-11-15 19:59:43.000000000 -0500
76413@@ -21,6 +21,10 @@
76414 #include <linux/module.h>
76415 #include <net/tcp.h>
76416
76417+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76418+extern int grsec_lastack_retries;
76419+#endif
76420+
76421 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
76422 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
76423 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
76424@@ -164,6 +168,13 @@ static int tcp_write_timeout(struct sock
76425 }
76426 }
76427
76428+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76429+ if ((sk->sk_state == TCP_LAST_ACK) &&
76430+ (grsec_lastack_retries > 0) &&
76431+ (grsec_lastack_retries < retry_until))
76432+ retry_until = grsec_lastack_retries;
76433+#endif
76434+
76435 if (retransmits_timed_out(sk, retry_until)) {
76436 /* Has it gone just too far? */
76437 tcp_write_err(sk);
76438diff -urNp linux-2.6.32.48/net/ipv4/udp.c linux-2.6.32.48/net/ipv4/udp.c
76439--- linux-2.6.32.48/net/ipv4/udp.c 2011-11-08 19:02:43.000000000 -0500
76440+++ linux-2.6.32.48/net/ipv4/udp.c 2011-11-15 19:59:43.000000000 -0500
76441@@ -86,6 +86,7 @@
76442 #include <linux/types.h>
76443 #include <linux/fcntl.h>
76444 #include <linux/module.h>
76445+#include <linux/security.h>
76446 #include <linux/socket.h>
76447 #include <linux/sockios.h>
76448 #include <linux/igmp.h>
76449@@ -106,6 +107,10 @@
76450 #include <net/xfrm.h>
76451 #include "udp_impl.h"
76452
76453+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76454+extern int grsec_enable_blackhole;
76455+#endif
76456+
76457 struct udp_table udp_table;
76458 EXPORT_SYMBOL(udp_table);
76459
76460@@ -371,6 +376,9 @@ found:
76461 return s;
76462 }
76463
76464+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
76465+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
76466+
76467 /*
76468 * This routine is called by the ICMP module when it gets some
76469 * sort of error condition. If err < 0 then the socket should
76470@@ -639,9 +647,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
76471 dport = usin->sin_port;
76472 if (dport == 0)
76473 return -EINVAL;
76474+
76475+ err = gr_search_udp_sendmsg(sk, usin);
76476+ if (err)
76477+ return err;
76478 } else {
76479 if (sk->sk_state != TCP_ESTABLISHED)
76480 return -EDESTADDRREQ;
76481+
76482+ err = gr_search_udp_sendmsg(sk, NULL);
76483+ if (err)
76484+ return err;
76485+
76486 daddr = inet->daddr;
76487 dport = inet->dport;
76488 /* Open fast path for connected socket.
76489@@ -945,6 +962,10 @@ try_again:
76490 if (!skb)
76491 goto out;
76492
76493+ err = gr_search_udp_recvmsg(sk, skb);
76494+ if (err)
76495+ goto out_free;
76496+
76497 ulen = skb->len - sizeof(struct udphdr);
76498 copied = len;
76499 if (copied > ulen)
76500@@ -1068,7 +1089,7 @@ static int __udp_queue_rcv_skb(struct so
76501 if (rc == -ENOMEM) {
76502 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
76503 is_udplite);
76504- atomic_inc(&sk->sk_drops);
76505+ atomic_inc_unchecked(&sk->sk_drops);
76506 }
76507 goto drop;
76508 }
76509@@ -1338,6 +1359,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
76510 goto csum_error;
76511
76512 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
76513+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76514+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
76515+#endif
76516 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
76517
76518 /*
76519@@ -1758,8 +1782,13 @@ static void udp4_format_sock(struct sock
76520 sk_wmem_alloc_get(sp),
76521 sk_rmem_alloc_get(sp),
76522 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
76523- atomic_read(&sp->sk_refcnt), sp,
76524- atomic_read(&sp->sk_drops), len);
76525+ atomic_read(&sp->sk_refcnt),
76526+#ifdef CONFIG_GRKERNSEC_HIDESYM
76527+ NULL,
76528+#else
76529+ sp,
76530+#endif
76531+ atomic_read_unchecked(&sp->sk_drops), len);
76532 }
76533
76534 int udp4_seq_show(struct seq_file *seq, void *v)
76535diff -urNp linux-2.6.32.48/net/ipv6/addrconf.c linux-2.6.32.48/net/ipv6/addrconf.c
76536--- linux-2.6.32.48/net/ipv6/addrconf.c 2011-11-08 19:02:43.000000000 -0500
76537+++ linux-2.6.32.48/net/ipv6/addrconf.c 2011-11-15 19:59:43.000000000 -0500
76538@@ -2053,7 +2053,7 @@ int addrconf_set_dstaddr(struct net *net
76539 p.iph.ihl = 5;
76540 p.iph.protocol = IPPROTO_IPV6;
76541 p.iph.ttl = 64;
76542- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
76543+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
76544
76545 if (ops->ndo_do_ioctl) {
76546 mm_segment_t oldfs = get_fs();
76547diff -urNp linux-2.6.32.48/net/ipv6/inet6_connection_sock.c linux-2.6.32.48/net/ipv6/inet6_connection_sock.c
76548--- linux-2.6.32.48/net/ipv6/inet6_connection_sock.c 2011-11-08 19:02:43.000000000 -0500
76549+++ linux-2.6.32.48/net/ipv6/inet6_connection_sock.c 2011-11-15 19:59:43.000000000 -0500
76550@@ -152,7 +152,7 @@ void __inet6_csk_dst_store(struct sock *
76551 #ifdef CONFIG_XFRM
76552 {
76553 struct rt6_info *rt = (struct rt6_info *)dst;
76554- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
76555+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
76556 }
76557 #endif
76558 }
76559@@ -167,7 +167,7 @@ struct dst_entry *__inet6_csk_dst_check(
76560 #ifdef CONFIG_XFRM
76561 if (dst) {
76562 struct rt6_info *rt = (struct rt6_info *)dst;
76563- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
76564+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
76565 sk->sk_dst_cache = NULL;
76566 dst_release(dst);
76567 dst = NULL;
76568diff -urNp linux-2.6.32.48/net/ipv6/inet6_hashtables.c linux-2.6.32.48/net/ipv6/inet6_hashtables.c
76569--- linux-2.6.32.48/net/ipv6/inet6_hashtables.c 2011-11-08 19:02:43.000000000 -0500
76570+++ linux-2.6.32.48/net/ipv6/inet6_hashtables.c 2011-11-15 19:59:43.000000000 -0500
76571@@ -119,7 +119,7 @@ out:
76572 }
76573 EXPORT_SYMBOL(__inet6_lookup_established);
76574
76575-static int inline compute_score(struct sock *sk, struct net *net,
76576+static inline int compute_score(struct sock *sk, struct net *net,
76577 const unsigned short hnum,
76578 const struct in6_addr *daddr,
76579 const int dif)
76580diff -urNp linux-2.6.32.48/net/ipv6/ipv6_sockglue.c linux-2.6.32.48/net/ipv6/ipv6_sockglue.c
76581--- linux-2.6.32.48/net/ipv6/ipv6_sockglue.c 2011-11-08 19:02:43.000000000 -0500
76582+++ linux-2.6.32.48/net/ipv6/ipv6_sockglue.c 2011-11-15 19:59:43.000000000 -0500
76583@@ -130,6 +130,8 @@ static int do_ipv6_setsockopt(struct soc
76584 int val, valbool;
76585 int retv = -ENOPROTOOPT;
76586
76587+ pax_track_stack();
76588+
76589 if (optval == NULL)
76590 val=0;
76591 else {
76592@@ -881,6 +883,8 @@ static int do_ipv6_getsockopt(struct soc
76593 int len;
76594 int val;
76595
76596+ pax_track_stack();
76597+
76598 if (ip6_mroute_opt(optname))
76599 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
76600
76601@@ -922,7 +926,7 @@ static int do_ipv6_getsockopt(struct soc
76602 if (sk->sk_type != SOCK_STREAM)
76603 return -ENOPROTOOPT;
76604
76605- msg.msg_control = optval;
76606+ msg.msg_control = (void __force_kernel *)optval;
76607 msg.msg_controllen = len;
76608 msg.msg_flags = 0;
76609
76610diff -urNp linux-2.6.32.48/net/ipv6/netfilter/ip6_queue.c linux-2.6.32.48/net/ipv6/netfilter/ip6_queue.c
76611--- linux-2.6.32.48/net/ipv6/netfilter/ip6_queue.c 2011-11-08 19:02:43.000000000 -0500
76612+++ linux-2.6.32.48/net/ipv6/netfilter/ip6_queue.c 2011-11-15 19:59:43.000000000 -0500
76613@@ -287,6 +287,9 @@ ipq_mangle_ipv6(ipq_verdict_msg_t *v, st
76614
76615 if (v->data_len < sizeof(*user_iph))
76616 return 0;
76617+ if (v->data_len > 65535)
76618+ return -EMSGSIZE;
76619+
76620 diff = v->data_len - e->skb->len;
76621 if (diff < 0) {
76622 if (pskb_trim(e->skb, v->data_len))
76623@@ -411,7 +414,8 @@ ipq_dev_drop(int ifindex)
76624 static inline void
76625 __ipq_rcv_skb(struct sk_buff *skb)
76626 {
76627- int status, type, pid, flags, nlmsglen, skblen;
76628+ int status, type, pid, flags;
76629+ unsigned int nlmsglen, skblen;
76630 struct nlmsghdr *nlh;
76631
76632 skblen = skb->len;
76633diff -urNp linux-2.6.32.48/net/ipv6/netfilter/ip6_tables.c linux-2.6.32.48/net/ipv6/netfilter/ip6_tables.c
76634--- linux-2.6.32.48/net/ipv6/netfilter/ip6_tables.c 2011-11-08 19:02:43.000000000 -0500
76635+++ linux-2.6.32.48/net/ipv6/netfilter/ip6_tables.c 2011-11-15 19:59:43.000000000 -0500
76636@@ -1173,6 +1173,7 @@ static int get_info(struct net *net, voi
76637 private = &tmp;
76638 }
76639 #endif
76640+ memset(&info, 0, sizeof(info));
76641 info.valid_hooks = t->valid_hooks;
76642 memcpy(info.hook_entry, private->hook_entry,
76643 sizeof(info.hook_entry));
76644diff -urNp linux-2.6.32.48/net/ipv6/raw.c linux-2.6.32.48/net/ipv6/raw.c
76645--- linux-2.6.32.48/net/ipv6/raw.c 2011-11-08 19:02:43.000000000 -0500
76646+++ linux-2.6.32.48/net/ipv6/raw.c 2011-11-15 19:59:43.000000000 -0500
76647@@ -375,14 +375,14 @@ static inline int rawv6_rcv_skb(struct s
76648 {
76649 if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
76650 skb_checksum_complete(skb)) {
76651- atomic_inc(&sk->sk_drops);
76652+ atomic_inc_unchecked(&sk->sk_drops);
76653 kfree_skb(skb);
76654 return NET_RX_DROP;
76655 }
76656
76657 /* Charge it to the socket. */
76658 if (sock_queue_rcv_skb(sk,skb)<0) {
76659- atomic_inc(&sk->sk_drops);
76660+ atomic_inc_unchecked(&sk->sk_drops);
76661 kfree_skb(skb);
76662 return NET_RX_DROP;
76663 }
76664@@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
76665 struct raw6_sock *rp = raw6_sk(sk);
76666
76667 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
76668- atomic_inc(&sk->sk_drops);
76669+ atomic_inc_unchecked(&sk->sk_drops);
76670 kfree_skb(skb);
76671 return NET_RX_DROP;
76672 }
76673@@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
76674
76675 if (inet->hdrincl) {
76676 if (skb_checksum_complete(skb)) {
76677- atomic_inc(&sk->sk_drops);
76678+ atomic_inc_unchecked(&sk->sk_drops);
76679 kfree_skb(skb);
76680 return NET_RX_DROP;
76681 }
76682@@ -518,7 +518,7 @@ csum_copy_err:
76683 as some normal condition.
76684 */
76685 err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
76686- atomic_inc(&sk->sk_drops);
76687+ atomic_inc_unchecked(&sk->sk_drops);
76688 goto out;
76689 }
76690
76691@@ -600,7 +600,7 @@ out:
76692 return err;
76693 }
76694
76695-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
76696+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
76697 struct flowi *fl, struct rt6_info *rt,
76698 unsigned int flags)
76699 {
76700@@ -738,6 +738,8 @@ static int rawv6_sendmsg(struct kiocb *i
76701 u16 proto;
76702 int err;
76703
76704+ pax_track_stack();
76705+
76706 /* Rough check on arithmetic overflow,
76707 better check is made in ip6_append_data().
76708 */
76709@@ -916,12 +918,17 @@ do_confirm:
76710 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
76711 char __user *optval, int optlen)
76712 {
76713+ struct icmp6_filter filter;
76714+
76715 switch (optname) {
76716 case ICMPV6_FILTER:
76717+ if (optlen < 0)
76718+ return -EINVAL;
76719 if (optlen > sizeof(struct icmp6_filter))
76720 optlen = sizeof(struct icmp6_filter);
76721- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
76722+ if (copy_from_user(&filter, optval, optlen))
76723 return -EFAULT;
76724+ raw6_sk(sk)->filter = filter;
76725 return 0;
76726 default:
76727 return -ENOPROTOOPT;
76728@@ -934,6 +941,7 @@ static int rawv6_geticmpfilter(struct so
76729 char __user *optval, int __user *optlen)
76730 {
76731 int len;
76732+ struct icmp6_filter filter;
76733
76734 switch (optname) {
76735 case ICMPV6_FILTER:
76736@@ -945,7 +953,8 @@ static int rawv6_geticmpfilter(struct so
76737 len = sizeof(struct icmp6_filter);
76738 if (put_user(len, optlen))
76739 return -EFAULT;
76740- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
76741+ filter = raw6_sk(sk)->filter;
76742+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
76743 return -EFAULT;
76744 return 0;
76745 default:
76746@@ -1241,7 +1250,13 @@ static void raw6_sock_seq_show(struct se
76747 0, 0L, 0,
76748 sock_i_uid(sp), 0,
76749 sock_i_ino(sp),
76750- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
76751+ atomic_read(&sp->sk_refcnt),
76752+#ifdef CONFIG_GRKERNSEC_HIDESYM
76753+ NULL,
76754+#else
76755+ sp,
76756+#endif
76757+ atomic_read_unchecked(&sp->sk_drops));
76758 }
76759
76760 static int raw6_seq_show(struct seq_file *seq, void *v)
76761diff -urNp linux-2.6.32.48/net/ipv6/tcp_ipv6.c linux-2.6.32.48/net/ipv6/tcp_ipv6.c
76762--- linux-2.6.32.48/net/ipv6/tcp_ipv6.c 2011-11-08 19:02:43.000000000 -0500
76763+++ linux-2.6.32.48/net/ipv6/tcp_ipv6.c 2011-11-15 19:59:43.000000000 -0500
76764@@ -89,6 +89,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
76765 }
76766 #endif
76767
76768+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76769+extern int grsec_enable_blackhole;
76770+#endif
76771+
76772 static void tcp_v6_hash(struct sock *sk)
76773 {
76774 if (sk->sk_state != TCP_CLOSE) {
76775@@ -1579,6 +1583,9 @@ static int tcp_v6_do_rcv(struct sock *sk
76776 return 0;
76777
76778 reset:
76779+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76780+ if (!grsec_enable_blackhole)
76781+#endif
76782 tcp_v6_send_reset(sk, skb);
76783 discard:
76784 if (opt_skb)
76785@@ -1656,12 +1663,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
76786 TCP_SKB_CB(skb)->sacked = 0;
76787
76788 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
76789- if (!sk)
76790+ if (!sk) {
76791+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76792+ ret = 1;
76793+#endif
76794 goto no_tcp_socket;
76795+ }
76796
76797 process:
76798- if (sk->sk_state == TCP_TIME_WAIT)
76799+ if (sk->sk_state == TCP_TIME_WAIT) {
76800+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76801+ ret = 2;
76802+#endif
76803 goto do_time_wait;
76804+ }
76805
76806 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
76807 goto discard_and_relse;
76808@@ -1701,6 +1716,10 @@ no_tcp_socket:
76809 bad_packet:
76810 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
76811 } else {
76812+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76813+ if (!grsec_enable_blackhole || (ret == 1 &&
76814+ (skb->dev->flags & IFF_LOOPBACK)))
76815+#endif
76816 tcp_v6_send_reset(NULL, skb);
76817 }
76818
76819@@ -1916,7 +1935,13 @@ static void get_openreq6(struct seq_file
76820 uid,
76821 0, /* non standard timer */
76822 0, /* open_requests have no inode */
76823- 0, req);
76824+ 0,
76825+#ifdef CONFIG_GRKERNSEC_HIDESYM
76826+ NULL
76827+#else
76828+ req
76829+#endif
76830+ );
76831 }
76832
76833 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
76834@@ -1966,7 +1991,12 @@ static void get_tcp6_sock(struct seq_fil
76835 sock_i_uid(sp),
76836 icsk->icsk_probes_out,
76837 sock_i_ino(sp),
76838- atomic_read(&sp->sk_refcnt), sp,
76839+ atomic_read(&sp->sk_refcnt),
76840+#ifdef CONFIG_GRKERNSEC_HIDESYM
76841+ NULL,
76842+#else
76843+ sp,
76844+#endif
76845 jiffies_to_clock_t(icsk->icsk_rto),
76846 jiffies_to_clock_t(icsk->icsk_ack.ato),
76847 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
76848@@ -2001,7 +2031,13 @@ static void get_timewait6_sock(struct se
76849 dest->s6_addr32[2], dest->s6_addr32[3], destp,
76850 tw->tw_substate, 0, 0,
76851 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
76852- atomic_read(&tw->tw_refcnt), tw);
76853+ atomic_read(&tw->tw_refcnt),
76854+#ifdef CONFIG_GRKERNSEC_HIDESYM
76855+ NULL
76856+#else
76857+ tw
76858+#endif
76859+ );
76860 }
76861
76862 static int tcp6_seq_show(struct seq_file *seq, void *v)
76863diff -urNp linux-2.6.32.48/net/ipv6/udp.c linux-2.6.32.48/net/ipv6/udp.c
76864--- linux-2.6.32.48/net/ipv6/udp.c 2011-11-08 19:02:43.000000000 -0500
76865+++ linux-2.6.32.48/net/ipv6/udp.c 2011-11-15 19:59:43.000000000 -0500
76866@@ -49,6 +49,10 @@
76867 #include <linux/seq_file.h>
76868 #include "udp_impl.h"
76869
76870+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76871+extern int grsec_enable_blackhole;
76872+#endif
76873+
76874 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
76875 {
76876 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
76877@@ -391,7 +395,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
76878 if (rc == -ENOMEM) {
76879 UDP6_INC_STATS_BH(sock_net(sk),
76880 UDP_MIB_RCVBUFERRORS, is_udplite);
76881- atomic_inc(&sk->sk_drops);
76882+ atomic_inc_unchecked(&sk->sk_drops);
76883 }
76884 goto drop;
76885 }
76886@@ -590,6 +594,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
76887 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
76888 proto == IPPROTO_UDPLITE);
76889
76890+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76891+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
76892+#endif
76893 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
76894
76895 kfree_skb(skb);
76896@@ -1209,8 +1216,13 @@ static void udp6_sock_seq_show(struct se
76897 0, 0L, 0,
76898 sock_i_uid(sp), 0,
76899 sock_i_ino(sp),
76900- atomic_read(&sp->sk_refcnt), sp,
76901- atomic_read(&sp->sk_drops));
76902+ atomic_read(&sp->sk_refcnt),
76903+#ifdef CONFIG_GRKERNSEC_HIDESYM
76904+ NULL,
76905+#else
76906+ sp,
76907+#endif
76908+ atomic_read_unchecked(&sp->sk_drops));
76909 }
76910
76911 int udp6_seq_show(struct seq_file *seq, void *v)
76912diff -urNp linux-2.6.32.48/net/irda/ircomm/ircomm_tty.c linux-2.6.32.48/net/irda/ircomm/ircomm_tty.c
76913--- linux-2.6.32.48/net/irda/ircomm/ircomm_tty.c 2011-11-08 19:02:43.000000000 -0500
76914+++ linux-2.6.32.48/net/irda/ircomm/ircomm_tty.c 2011-11-15 19:59:43.000000000 -0500
76915@@ -280,16 +280,16 @@ static int ircomm_tty_block_til_ready(st
76916 add_wait_queue(&self->open_wait, &wait);
76917
76918 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
76919- __FILE__,__LINE__, tty->driver->name, self->open_count );
76920+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
76921
76922 /* As far as I can see, we protect open_count - Jean II */
76923 spin_lock_irqsave(&self->spinlock, flags);
76924 if (!tty_hung_up_p(filp)) {
76925 extra_count = 1;
76926- self->open_count--;
76927+ local_dec(&self->open_count);
76928 }
76929 spin_unlock_irqrestore(&self->spinlock, flags);
76930- self->blocked_open++;
76931+ local_inc(&self->blocked_open);
76932
76933 while (1) {
76934 if (tty->termios->c_cflag & CBAUD) {
76935@@ -329,7 +329,7 @@ static int ircomm_tty_block_til_ready(st
76936 }
76937
76938 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
76939- __FILE__,__LINE__, tty->driver->name, self->open_count );
76940+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
76941
76942 schedule();
76943 }
76944@@ -340,13 +340,13 @@ static int ircomm_tty_block_til_ready(st
76945 if (extra_count) {
76946 /* ++ is not atomic, so this should be protected - Jean II */
76947 spin_lock_irqsave(&self->spinlock, flags);
76948- self->open_count++;
76949+ local_inc(&self->open_count);
76950 spin_unlock_irqrestore(&self->spinlock, flags);
76951 }
76952- self->blocked_open--;
76953+ local_dec(&self->blocked_open);
76954
76955 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
76956- __FILE__,__LINE__, tty->driver->name, self->open_count);
76957+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
76958
76959 if (!retval)
76960 self->flags |= ASYNC_NORMAL_ACTIVE;
76961@@ -415,14 +415,14 @@ static int ircomm_tty_open(struct tty_st
76962 }
76963 /* ++ is not atomic, so this should be protected - Jean II */
76964 spin_lock_irqsave(&self->spinlock, flags);
76965- self->open_count++;
76966+ local_inc(&self->open_count);
76967
76968 tty->driver_data = self;
76969 self->tty = tty;
76970 spin_unlock_irqrestore(&self->spinlock, flags);
76971
76972 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
76973- self->line, self->open_count);
76974+ self->line, local_read(&self->open_count));
76975
76976 /* Not really used by us, but lets do it anyway */
76977 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
76978@@ -511,7 +511,7 @@ static void ircomm_tty_close(struct tty_
76979 return;
76980 }
76981
76982- if ((tty->count == 1) && (self->open_count != 1)) {
76983+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
76984 /*
76985 * Uh, oh. tty->count is 1, which means that the tty
76986 * structure will be freed. state->count should always
76987@@ -521,16 +521,16 @@ static void ircomm_tty_close(struct tty_
76988 */
76989 IRDA_DEBUG(0, "%s(), bad serial port count; "
76990 "tty->count is 1, state->count is %d\n", __func__ ,
76991- self->open_count);
76992- self->open_count = 1;
76993+ local_read(&self->open_count));
76994+ local_set(&self->open_count, 1);
76995 }
76996
76997- if (--self->open_count < 0) {
76998+ if (local_dec_return(&self->open_count) < 0) {
76999 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
77000- __func__, self->line, self->open_count);
77001- self->open_count = 0;
77002+ __func__, self->line, local_read(&self->open_count));
77003+ local_set(&self->open_count, 0);
77004 }
77005- if (self->open_count) {
77006+ if (local_read(&self->open_count)) {
77007 spin_unlock_irqrestore(&self->spinlock, flags);
77008
77009 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
77010@@ -562,7 +562,7 @@ static void ircomm_tty_close(struct tty_
77011 tty->closing = 0;
77012 self->tty = NULL;
77013
77014- if (self->blocked_open) {
77015+ if (local_read(&self->blocked_open)) {
77016 if (self->close_delay)
77017 schedule_timeout_interruptible(self->close_delay);
77018 wake_up_interruptible(&self->open_wait);
77019@@ -1017,7 +1017,7 @@ static void ircomm_tty_hangup(struct tty
77020 spin_lock_irqsave(&self->spinlock, flags);
77021 self->flags &= ~ASYNC_NORMAL_ACTIVE;
77022 self->tty = NULL;
77023- self->open_count = 0;
77024+ local_set(&self->open_count, 0);
77025 spin_unlock_irqrestore(&self->spinlock, flags);
77026
77027 wake_up_interruptible(&self->open_wait);
77028@@ -1369,7 +1369,7 @@ static void ircomm_tty_line_info(struct
77029 seq_putc(m, '\n');
77030
77031 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
77032- seq_printf(m, "Open count: %d\n", self->open_count);
77033+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
77034 seq_printf(m, "Max data size: %d\n", self->max_data_size);
77035 seq_printf(m, "Max header size: %d\n", self->max_header_size);
77036
77037diff -urNp linux-2.6.32.48/net/iucv/af_iucv.c linux-2.6.32.48/net/iucv/af_iucv.c
77038--- linux-2.6.32.48/net/iucv/af_iucv.c 2011-11-08 19:02:43.000000000 -0500
77039+++ linux-2.6.32.48/net/iucv/af_iucv.c 2011-11-15 19:59:43.000000000 -0500
77040@@ -651,10 +651,10 @@ static int iucv_sock_autobind(struct soc
77041
77042 write_lock_bh(&iucv_sk_list.lock);
77043
77044- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
77045+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
77046 while (__iucv_get_sock_by_name(name)) {
77047 sprintf(name, "%08x",
77048- atomic_inc_return(&iucv_sk_list.autobind_name));
77049+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
77050 }
77051
77052 write_unlock_bh(&iucv_sk_list.lock);
77053diff -urNp linux-2.6.32.48/net/key/af_key.c linux-2.6.32.48/net/key/af_key.c
77054--- linux-2.6.32.48/net/key/af_key.c 2011-11-08 19:02:43.000000000 -0500
77055+++ linux-2.6.32.48/net/key/af_key.c 2011-11-15 19:59:43.000000000 -0500
77056@@ -2489,6 +2489,8 @@ static int pfkey_migrate(struct sock *sk
77057 struct xfrm_migrate m[XFRM_MAX_DEPTH];
77058 struct xfrm_kmaddress k;
77059
77060+ pax_track_stack();
77061+
77062 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
77063 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
77064 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
77065@@ -3660,7 +3662,11 @@ static int pfkey_seq_show(struct seq_fil
77066 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
77067 else
77068 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
77069+#ifdef CONFIG_GRKERNSEC_HIDESYM
77070+ NULL,
77071+#else
77072 s,
77073+#endif
77074 atomic_read(&s->sk_refcnt),
77075 sk_rmem_alloc_get(s),
77076 sk_wmem_alloc_get(s),
77077diff -urNp linux-2.6.32.48/net/lapb/lapb_iface.c linux-2.6.32.48/net/lapb/lapb_iface.c
77078--- linux-2.6.32.48/net/lapb/lapb_iface.c 2011-11-08 19:02:43.000000000 -0500
77079+++ linux-2.6.32.48/net/lapb/lapb_iface.c 2011-11-15 19:59:43.000000000 -0500
77080@@ -157,7 +157,7 @@ int lapb_register(struct net_device *dev
77081 goto out;
77082
77083 lapb->dev = dev;
77084- lapb->callbacks = *callbacks;
77085+ lapb->callbacks = callbacks;
77086
77087 __lapb_insert_cb(lapb);
77088
77089@@ -379,32 +379,32 @@ int lapb_data_received(struct net_device
77090
77091 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
77092 {
77093- if (lapb->callbacks.connect_confirmation)
77094- lapb->callbacks.connect_confirmation(lapb->dev, reason);
77095+ if (lapb->callbacks->connect_confirmation)
77096+ lapb->callbacks->connect_confirmation(lapb->dev, reason);
77097 }
77098
77099 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
77100 {
77101- if (lapb->callbacks.connect_indication)
77102- lapb->callbacks.connect_indication(lapb->dev, reason);
77103+ if (lapb->callbacks->connect_indication)
77104+ lapb->callbacks->connect_indication(lapb->dev, reason);
77105 }
77106
77107 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
77108 {
77109- if (lapb->callbacks.disconnect_confirmation)
77110- lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
77111+ if (lapb->callbacks->disconnect_confirmation)
77112+ lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
77113 }
77114
77115 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
77116 {
77117- if (lapb->callbacks.disconnect_indication)
77118- lapb->callbacks.disconnect_indication(lapb->dev, reason);
77119+ if (lapb->callbacks->disconnect_indication)
77120+ lapb->callbacks->disconnect_indication(lapb->dev, reason);
77121 }
77122
77123 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
77124 {
77125- if (lapb->callbacks.data_indication)
77126- return lapb->callbacks.data_indication(lapb->dev, skb);
77127+ if (lapb->callbacks->data_indication)
77128+ return lapb->callbacks->data_indication(lapb->dev, skb);
77129
77130 kfree_skb(skb);
77131 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
77132@@ -414,8 +414,8 @@ int lapb_data_transmit(struct lapb_cb *l
77133 {
77134 int used = 0;
77135
77136- if (lapb->callbacks.data_transmit) {
77137- lapb->callbacks.data_transmit(lapb->dev, skb);
77138+ if (lapb->callbacks->data_transmit) {
77139+ lapb->callbacks->data_transmit(lapb->dev, skb);
77140 used = 1;
77141 }
77142
77143diff -urNp linux-2.6.32.48/net/mac80211/cfg.c linux-2.6.32.48/net/mac80211/cfg.c
77144--- linux-2.6.32.48/net/mac80211/cfg.c 2011-11-08 19:02:43.000000000 -0500
77145+++ linux-2.6.32.48/net/mac80211/cfg.c 2011-11-15 19:59:43.000000000 -0500
77146@@ -1369,7 +1369,7 @@ static int ieee80211_set_bitrate_mask(st
77147 return err;
77148 }
77149
77150-struct cfg80211_ops mac80211_config_ops = {
77151+const struct cfg80211_ops mac80211_config_ops = {
77152 .add_virtual_intf = ieee80211_add_iface,
77153 .del_virtual_intf = ieee80211_del_iface,
77154 .change_virtual_intf = ieee80211_change_iface,
77155diff -urNp linux-2.6.32.48/net/mac80211/cfg.h linux-2.6.32.48/net/mac80211/cfg.h
77156--- linux-2.6.32.48/net/mac80211/cfg.h 2011-11-08 19:02:43.000000000 -0500
77157+++ linux-2.6.32.48/net/mac80211/cfg.h 2011-11-15 19:59:43.000000000 -0500
77158@@ -4,6 +4,6 @@
77159 #ifndef __CFG_H
77160 #define __CFG_H
77161
77162-extern struct cfg80211_ops mac80211_config_ops;
77163+extern const struct cfg80211_ops mac80211_config_ops;
77164
77165 #endif /* __CFG_H */
77166diff -urNp linux-2.6.32.48/net/mac80211/debugfs_key.c linux-2.6.32.48/net/mac80211/debugfs_key.c
77167--- linux-2.6.32.48/net/mac80211/debugfs_key.c 2011-11-08 19:02:43.000000000 -0500
77168+++ linux-2.6.32.48/net/mac80211/debugfs_key.c 2011-11-15 19:59:43.000000000 -0500
77169@@ -211,9 +211,13 @@ static ssize_t key_key_read(struct file
77170 size_t count, loff_t *ppos)
77171 {
77172 struct ieee80211_key *key = file->private_data;
77173- int i, res, bufsize = 2 * key->conf.keylen + 2;
77174+ int i, bufsize = 2 * key->conf.keylen + 2;
77175 char *buf = kmalloc(bufsize, GFP_KERNEL);
77176 char *p = buf;
77177+ ssize_t res;
77178+
77179+ if (buf == NULL)
77180+ return -ENOMEM;
77181
77182 for (i = 0; i < key->conf.keylen; i++)
77183 p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]);
77184diff -urNp linux-2.6.32.48/net/mac80211/debugfs_sta.c linux-2.6.32.48/net/mac80211/debugfs_sta.c
77185--- linux-2.6.32.48/net/mac80211/debugfs_sta.c 2011-11-08 19:02:43.000000000 -0500
77186+++ linux-2.6.32.48/net/mac80211/debugfs_sta.c 2011-11-15 19:59:43.000000000 -0500
77187@@ -124,6 +124,8 @@ static ssize_t sta_agg_status_read(struc
77188 int i;
77189 struct sta_info *sta = file->private_data;
77190
77191+ pax_track_stack();
77192+
77193 spin_lock_bh(&sta->lock);
77194 p += scnprintf(p, sizeof(buf)+buf-p, "next dialog_token is %#02x\n",
77195 sta->ampdu_mlme.dialog_token_allocator + 1);
77196diff -urNp linux-2.6.32.48/net/mac80211/ieee80211_i.h linux-2.6.32.48/net/mac80211/ieee80211_i.h
77197--- linux-2.6.32.48/net/mac80211/ieee80211_i.h 2011-11-08 19:02:43.000000000 -0500
77198+++ linux-2.6.32.48/net/mac80211/ieee80211_i.h 2011-11-15 19:59:43.000000000 -0500
77199@@ -25,6 +25,7 @@
77200 #include <linux/etherdevice.h>
77201 #include <net/cfg80211.h>
77202 #include <net/mac80211.h>
77203+#include <asm/local.h>
77204 #include "key.h"
77205 #include "sta_info.h"
77206
77207@@ -635,7 +636,7 @@ struct ieee80211_local {
77208 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
77209 spinlock_t queue_stop_reason_lock;
77210
77211- int open_count;
77212+ local_t open_count;
77213 int monitors, cooked_mntrs;
77214 /* number of interfaces with corresponding FIF_ flags */
77215 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll;
77216diff -urNp linux-2.6.32.48/net/mac80211/iface.c linux-2.6.32.48/net/mac80211/iface.c
77217--- linux-2.6.32.48/net/mac80211/iface.c 2011-11-08 19:02:43.000000000 -0500
77218+++ linux-2.6.32.48/net/mac80211/iface.c 2011-11-15 19:59:43.000000000 -0500
77219@@ -166,7 +166,7 @@ static int ieee80211_open(struct net_dev
77220 break;
77221 }
77222
77223- if (local->open_count == 0) {
77224+ if (local_read(&local->open_count) == 0) {
77225 res = drv_start(local);
77226 if (res)
77227 goto err_del_bss;
77228@@ -196,7 +196,7 @@ static int ieee80211_open(struct net_dev
77229 * Validate the MAC address for this device.
77230 */
77231 if (!is_valid_ether_addr(dev->dev_addr)) {
77232- if (!local->open_count)
77233+ if (!local_read(&local->open_count))
77234 drv_stop(local);
77235 return -EADDRNOTAVAIL;
77236 }
77237@@ -292,7 +292,7 @@ static int ieee80211_open(struct net_dev
77238
77239 hw_reconf_flags |= __ieee80211_recalc_idle(local);
77240
77241- local->open_count++;
77242+ local_inc(&local->open_count);
77243 if (hw_reconf_flags) {
77244 ieee80211_hw_config(local, hw_reconf_flags);
77245 /*
77246@@ -320,7 +320,7 @@ static int ieee80211_open(struct net_dev
77247 err_del_interface:
77248 drv_remove_interface(local, &conf);
77249 err_stop:
77250- if (!local->open_count)
77251+ if (!local_read(&local->open_count))
77252 drv_stop(local);
77253 err_del_bss:
77254 sdata->bss = NULL;
77255@@ -420,7 +420,7 @@ static int ieee80211_stop(struct net_dev
77256 WARN_ON(!list_empty(&sdata->u.ap.vlans));
77257 }
77258
77259- local->open_count--;
77260+ local_dec(&local->open_count);
77261
77262 switch (sdata->vif.type) {
77263 case NL80211_IFTYPE_AP_VLAN:
77264@@ -526,7 +526,7 @@ static int ieee80211_stop(struct net_dev
77265
77266 ieee80211_recalc_ps(local, -1);
77267
77268- if (local->open_count == 0) {
77269+ if (local_read(&local->open_count) == 0) {
77270 ieee80211_clear_tx_pending(local);
77271 ieee80211_stop_device(local);
77272
77273diff -urNp linux-2.6.32.48/net/mac80211/main.c linux-2.6.32.48/net/mac80211/main.c
77274--- linux-2.6.32.48/net/mac80211/main.c 2011-11-08 19:02:43.000000000 -0500
77275+++ linux-2.6.32.48/net/mac80211/main.c 2011-11-15 19:59:43.000000000 -0500
77276@@ -145,7 +145,7 @@ int ieee80211_hw_config(struct ieee80211
77277 local->hw.conf.power_level = power;
77278 }
77279
77280- if (changed && local->open_count) {
77281+ if (changed && local_read(&local->open_count)) {
77282 ret = drv_config(local, changed);
77283 /*
77284 * Goal:
77285diff -urNp linux-2.6.32.48/net/mac80211/mlme.c linux-2.6.32.48/net/mac80211/mlme.c
77286--- linux-2.6.32.48/net/mac80211/mlme.c 2011-11-08 19:02:43.000000000 -0500
77287+++ linux-2.6.32.48/net/mac80211/mlme.c 2011-11-15 19:59:43.000000000 -0500
77288@@ -1438,6 +1438,8 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee
77289 bool have_higher_than_11mbit = false, newsta = false;
77290 u16 ap_ht_cap_flags;
77291
77292+ pax_track_stack();
77293+
77294 /*
77295 * AssocResp and ReassocResp have identical structure, so process both
77296 * of them in this function.
77297diff -urNp linux-2.6.32.48/net/mac80211/pm.c linux-2.6.32.48/net/mac80211/pm.c
77298--- linux-2.6.32.48/net/mac80211/pm.c 2011-11-08 19:02:43.000000000 -0500
77299+++ linux-2.6.32.48/net/mac80211/pm.c 2011-11-15 19:59:43.000000000 -0500
77300@@ -107,7 +107,7 @@ int __ieee80211_suspend(struct ieee80211
77301 }
77302
77303 /* stop hardware - this must stop RX */
77304- if (local->open_count)
77305+ if (local_read(&local->open_count))
77306 ieee80211_stop_device(local);
77307
77308 local->suspended = true;
77309diff -urNp linux-2.6.32.48/net/mac80211/rate.c linux-2.6.32.48/net/mac80211/rate.c
77310--- linux-2.6.32.48/net/mac80211/rate.c 2011-11-08 19:02:43.000000000 -0500
77311+++ linux-2.6.32.48/net/mac80211/rate.c 2011-11-15 19:59:43.000000000 -0500
77312@@ -287,7 +287,7 @@ int ieee80211_init_rate_ctrl_alg(struct
77313 struct rate_control_ref *ref, *old;
77314
77315 ASSERT_RTNL();
77316- if (local->open_count)
77317+ if (local_read(&local->open_count))
77318 return -EBUSY;
77319
77320 ref = rate_control_alloc(name, local);
77321diff -urNp linux-2.6.32.48/net/mac80211/tx.c linux-2.6.32.48/net/mac80211/tx.c
77322--- linux-2.6.32.48/net/mac80211/tx.c 2011-11-08 19:02:43.000000000 -0500
77323+++ linux-2.6.32.48/net/mac80211/tx.c 2011-11-15 19:59:43.000000000 -0500
77324@@ -173,7 +173,7 @@ static __le16 ieee80211_duration(struct
77325 return cpu_to_le16(dur);
77326 }
77327
77328-static int inline is_ieee80211_device(struct ieee80211_local *local,
77329+static inline int is_ieee80211_device(struct ieee80211_local *local,
77330 struct net_device *dev)
77331 {
77332 return local == wdev_priv(dev->ieee80211_ptr);
77333diff -urNp linux-2.6.32.48/net/mac80211/util.c linux-2.6.32.48/net/mac80211/util.c
77334--- linux-2.6.32.48/net/mac80211/util.c 2011-11-08 19:02:43.000000000 -0500
77335+++ linux-2.6.32.48/net/mac80211/util.c 2011-11-15 19:59:43.000000000 -0500
77336@@ -1042,7 +1042,7 @@ int ieee80211_reconfig(struct ieee80211_
77337 local->resuming = true;
77338
77339 /* restart hardware */
77340- if (local->open_count) {
77341+ if (local_read(&local->open_count)) {
77342 /*
77343 * Upon resume hardware can sometimes be goofy due to
77344 * various platform / driver / bus issues, so restarting
77345diff -urNp linux-2.6.32.48/net/netfilter/ipvs/ip_vs_app.c linux-2.6.32.48/net/netfilter/ipvs/ip_vs_app.c
77346--- linux-2.6.32.48/net/netfilter/ipvs/ip_vs_app.c 2011-11-08 19:02:43.000000000 -0500
77347+++ linux-2.6.32.48/net/netfilter/ipvs/ip_vs_app.c 2011-11-15 19:59:43.000000000 -0500
77348@@ -564,7 +564,7 @@ static const struct file_operations ip_v
77349 .open = ip_vs_app_open,
77350 .read = seq_read,
77351 .llseek = seq_lseek,
77352- .release = seq_release,
77353+ .release = seq_release_net,
77354 };
77355 #endif
77356
77357diff -urNp linux-2.6.32.48/net/netfilter/ipvs/ip_vs_conn.c linux-2.6.32.48/net/netfilter/ipvs/ip_vs_conn.c
77358--- linux-2.6.32.48/net/netfilter/ipvs/ip_vs_conn.c 2011-11-08 19:02:43.000000000 -0500
77359+++ linux-2.6.32.48/net/netfilter/ipvs/ip_vs_conn.c 2011-11-15 19:59:43.000000000 -0500
77360@@ -453,10 +453,10 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
77361 /* if the connection is not template and is created
77362 * by sync, preserve the activity flag.
77363 */
77364- cp->flags |= atomic_read(&dest->conn_flags) &
77365+ cp->flags |= atomic_read_unchecked(&dest->conn_flags) &
77366 (~IP_VS_CONN_F_INACTIVE);
77367 else
77368- cp->flags |= atomic_read(&dest->conn_flags);
77369+ cp->flags |= atomic_read_unchecked(&dest->conn_flags);
77370 cp->dest = dest;
77371
77372 IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
77373@@ -723,7 +723,7 @@ ip_vs_conn_new(int af, int proto, const
77374 atomic_set(&cp->refcnt, 1);
77375
77376 atomic_set(&cp->n_control, 0);
77377- atomic_set(&cp->in_pkts, 0);
77378+ atomic_set_unchecked(&cp->in_pkts, 0);
77379
77380 atomic_inc(&ip_vs_conn_count);
77381 if (flags & IP_VS_CONN_F_NO_CPORT)
77382@@ -871,7 +871,7 @@ static const struct file_operations ip_v
77383 .open = ip_vs_conn_open,
77384 .read = seq_read,
77385 .llseek = seq_lseek,
77386- .release = seq_release,
77387+ .release = seq_release_net,
77388 };
77389
77390 static const char *ip_vs_origin_name(unsigned flags)
77391@@ -934,7 +934,7 @@ static const struct file_operations ip_v
77392 .open = ip_vs_conn_sync_open,
77393 .read = seq_read,
77394 .llseek = seq_lseek,
77395- .release = seq_release,
77396+ .release = seq_release_net,
77397 };
77398
77399 #endif
77400@@ -961,7 +961,7 @@ static inline int todrop_entry(struct ip
77401
77402 /* Don't drop the entry if its number of incoming packets is not
77403 located in [0, 8] */
77404- i = atomic_read(&cp->in_pkts);
77405+ i = atomic_read_unchecked(&cp->in_pkts);
77406 if (i > 8 || i < 0) return 0;
77407
77408 if (!todrop_rate[i]) return 0;
77409diff -urNp linux-2.6.32.48/net/netfilter/ipvs/ip_vs_core.c linux-2.6.32.48/net/netfilter/ipvs/ip_vs_core.c
77410--- linux-2.6.32.48/net/netfilter/ipvs/ip_vs_core.c 2011-11-08 19:02:43.000000000 -0500
77411+++ linux-2.6.32.48/net/netfilter/ipvs/ip_vs_core.c 2011-11-15 19:59:43.000000000 -0500
77412@@ -485,7 +485,7 @@ int ip_vs_leave(struct ip_vs_service *sv
77413 ret = cp->packet_xmit(skb, cp, pp);
77414 /* do not touch skb anymore */
77415
77416- atomic_inc(&cp->in_pkts);
77417+ atomic_inc_unchecked(&cp->in_pkts);
77418 ip_vs_conn_put(cp);
77419 return ret;
77420 }
77421@@ -1357,7 +1357,7 @@ ip_vs_in(unsigned int hooknum, struct sk
77422 * Sync connection if it is about to close to
77423 * encorage the standby servers to update the connections timeout
77424 */
77425- pkts = atomic_add_return(1, &cp->in_pkts);
77426+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
77427 if (af == AF_INET &&
77428 (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
77429 (((cp->protocol != IPPROTO_TCP ||
77430diff -urNp linux-2.6.32.48/net/netfilter/ipvs/ip_vs_ctl.c linux-2.6.32.48/net/netfilter/ipvs/ip_vs_ctl.c
77431--- linux-2.6.32.48/net/netfilter/ipvs/ip_vs_ctl.c 2011-11-08 19:02:43.000000000 -0500
77432+++ linux-2.6.32.48/net/netfilter/ipvs/ip_vs_ctl.c 2011-11-15 19:59:43.000000000 -0500
77433@@ -792,7 +792,7 @@ __ip_vs_update_dest(struct ip_vs_service
77434 ip_vs_rs_hash(dest);
77435 write_unlock_bh(&__ip_vs_rs_lock);
77436 }
77437- atomic_set(&dest->conn_flags, conn_flags);
77438+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
77439
77440 /* bind the service */
77441 if (!dest->svc) {
77442@@ -1888,7 +1888,7 @@ static int ip_vs_info_seq_show(struct se
77443 " %-7s %-6d %-10d %-10d\n",
77444 &dest->addr.in6,
77445 ntohs(dest->port),
77446- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
77447+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
77448 atomic_read(&dest->weight),
77449 atomic_read(&dest->activeconns),
77450 atomic_read(&dest->inactconns));
77451@@ -1899,7 +1899,7 @@ static int ip_vs_info_seq_show(struct se
77452 "%-7s %-6d %-10d %-10d\n",
77453 ntohl(dest->addr.ip),
77454 ntohs(dest->port),
77455- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
77456+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
77457 atomic_read(&dest->weight),
77458 atomic_read(&dest->activeconns),
77459 atomic_read(&dest->inactconns));
77460@@ -1927,7 +1927,7 @@ static const struct file_operations ip_v
77461 .open = ip_vs_info_open,
77462 .read = seq_read,
77463 .llseek = seq_lseek,
77464- .release = seq_release_private,
77465+ .release = seq_release_net,
77466 };
77467
77468 #endif
77469@@ -1976,7 +1976,7 @@ static const struct file_operations ip_v
77470 .open = ip_vs_stats_seq_open,
77471 .read = seq_read,
77472 .llseek = seq_lseek,
77473- .release = single_release,
77474+ .release = single_release_net,
77475 };
77476
77477 #endif
77478@@ -2292,7 +2292,7 @@ __ip_vs_get_dest_entries(const struct ip
77479
77480 entry.addr = dest->addr.ip;
77481 entry.port = dest->port;
77482- entry.conn_flags = atomic_read(&dest->conn_flags);
77483+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
77484 entry.weight = atomic_read(&dest->weight);
77485 entry.u_threshold = dest->u_threshold;
77486 entry.l_threshold = dest->l_threshold;
77487@@ -2353,6 +2353,8 @@ do_ip_vs_get_ctl(struct sock *sk, int cm
77488 unsigned char arg[128];
77489 int ret = 0;
77490
77491+ pax_track_stack();
77492+
77493 if (!capable(CAP_NET_ADMIN))
77494 return -EPERM;
77495
77496@@ -2802,7 +2804,7 @@ static int ip_vs_genl_fill_dest(struct s
77497 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
77498
77499 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
77500- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
77501+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
77502 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
77503 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
77504 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
77505diff -urNp linux-2.6.32.48/net/netfilter/ipvs/ip_vs_sync.c linux-2.6.32.48/net/netfilter/ipvs/ip_vs_sync.c
77506--- linux-2.6.32.48/net/netfilter/ipvs/ip_vs_sync.c 2011-11-08 19:02:43.000000000 -0500
77507+++ linux-2.6.32.48/net/netfilter/ipvs/ip_vs_sync.c 2011-11-15 19:59:43.000000000 -0500
77508@@ -438,7 +438,7 @@ static void ip_vs_process_message(const
77509
77510 if (opt)
77511 memcpy(&cp->in_seq, opt, sizeof(*opt));
77512- atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
77513+ atomic_set_unchecked(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
77514 cp->state = state;
77515 cp->old_state = cp->state;
77516 /*
77517diff -urNp linux-2.6.32.48/net/netfilter/ipvs/ip_vs_xmit.c linux-2.6.32.48/net/netfilter/ipvs/ip_vs_xmit.c
77518--- linux-2.6.32.48/net/netfilter/ipvs/ip_vs_xmit.c 2011-11-08 19:02:43.000000000 -0500
77519+++ linux-2.6.32.48/net/netfilter/ipvs/ip_vs_xmit.c 2011-11-15 19:59:43.000000000 -0500
77520@@ -875,7 +875,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
77521 else
77522 rc = NF_ACCEPT;
77523 /* do not touch skb anymore */
77524- atomic_inc(&cp->in_pkts);
77525+ atomic_inc_unchecked(&cp->in_pkts);
77526 goto out;
77527 }
77528
77529@@ -949,7 +949,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
77530 else
77531 rc = NF_ACCEPT;
77532 /* do not touch skb anymore */
77533- atomic_inc(&cp->in_pkts);
77534+ atomic_inc_unchecked(&cp->in_pkts);
77535 goto out;
77536 }
77537
77538diff -urNp linux-2.6.32.48/net/netfilter/Kconfig linux-2.6.32.48/net/netfilter/Kconfig
77539--- linux-2.6.32.48/net/netfilter/Kconfig 2011-11-08 19:02:43.000000000 -0500
77540+++ linux-2.6.32.48/net/netfilter/Kconfig 2011-11-15 19:59:43.000000000 -0500
77541@@ -635,6 +635,16 @@ config NETFILTER_XT_MATCH_ESP
77542
77543 To compile it as a module, choose M here. If unsure, say N.
77544
77545+config NETFILTER_XT_MATCH_GRADM
77546+ tristate '"gradm" match support'
77547+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
77548+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
77549+ ---help---
77550+ The gradm match allows to match on grsecurity RBAC being enabled.
77551+ It is useful when iptables rules are applied early on bootup to
77552+ prevent connections to the machine (except from a trusted host)
77553+ while the RBAC system is disabled.
77554+
77555 config NETFILTER_XT_MATCH_HASHLIMIT
77556 tristate '"hashlimit" match support'
77557 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
77558diff -urNp linux-2.6.32.48/net/netfilter/Makefile linux-2.6.32.48/net/netfilter/Makefile
77559--- linux-2.6.32.48/net/netfilter/Makefile 2011-11-08 19:02:43.000000000 -0500
77560+++ linux-2.6.32.48/net/netfilter/Makefile 2011-11-15 19:59:43.000000000 -0500
77561@@ -68,6 +68,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRAC
77562 obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
77563 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
77564 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
77565+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
77566 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
77567 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
77568 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
77569diff -urNp linux-2.6.32.48/net/netfilter/nf_conntrack_netlink.c linux-2.6.32.48/net/netfilter/nf_conntrack_netlink.c
77570--- linux-2.6.32.48/net/netfilter/nf_conntrack_netlink.c 2011-11-08 19:02:43.000000000 -0500
77571+++ linux-2.6.32.48/net/netfilter/nf_conntrack_netlink.c 2011-11-15 19:59:43.000000000 -0500
77572@@ -706,7 +706,7 @@ ctnetlink_parse_tuple_proto(struct nlatt
77573 static int
77574 ctnetlink_parse_tuple(const struct nlattr * const cda[],
77575 struct nf_conntrack_tuple *tuple,
77576- enum ctattr_tuple type, u_int8_t l3num)
77577+ enum ctattr_type type, u_int8_t l3num)
77578 {
77579 struct nlattr *tb[CTA_TUPLE_MAX+1];
77580 int err;
77581diff -urNp linux-2.6.32.48/net/netfilter/nfnetlink_log.c linux-2.6.32.48/net/netfilter/nfnetlink_log.c
77582--- linux-2.6.32.48/net/netfilter/nfnetlink_log.c 2011-11-08 19:02:43.000000000 -0500
77583+++ linux-2.6.32.48/net/netfilter/nfnetlink_log.c 2011-11-15 19:59:43.000000000 -0500
77584@@ -68,7 +68,7 @@ struct nfulnl_instance {
77585 };
77586
77587 static DEFINE_RWLOCK(instances_lock);
77588-static atomic_t global_seq;
77589+static atomic_unchecked_t global_seq;
77590
77591 #define INSTANCE_BUCKETS 16
77592 static struct hlist_head instance_table[INSTANCE_BUCKETS];
77593@@ -493,7 +493,7 @@ __build_packet_message(struct nfulnl_ins
77594 /* global sequence number */
77595 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
77596 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
77597- htonl(atomic_inc_return(&global_seq)));
77598+ htonl(atomic_inc_return_unchecked(&global_seq)));
77599
77600 if (data_len) {
77601 struct nlattr *nla;
77602diff -urNp linux-2.6.32.48/net/netfilter/xt_gradm.c linux-2.6.32.48/net/netfilter/xt_gradm.c
77603--- linux-2.6.32.48/net/netfilter/xt_gradm.c 1969-12-31 19:00:00.000000000 -0500
77604+++ linux-2.6.32.48/net/netfilter/xt_gradm.c 2011-11-15 19:59:43.000000000 -0500
77605@@ -0,0 +1,51 @@
77606+/*
77607+ * gradm match for netfilter
77608