]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.2.2-2.6.32.42-201107062003.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-2.6.32.42-201107062003.patch
CommitLineData
137990bc
PK
1diff -urNp linux-2.6.32.42/arch/alpha/include/asm/elf.h linux-2.6.32.42/arch/alpha/include/asm/elf.h
2--- linux-2.6.32.42/arch/alpha/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
3+++ linux-2.6.32.42/arch/alpha/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
4@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
5
6 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
7
8+#ifdef CONFIG_PAX_ASLR
9+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
10+
11+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
12+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
13+#endif
14+
15 /* $0 is set by ld.so to a pointer to a function which might be
16 registered using atexit. This provides a mean for the dynamic
17 linker to call DT_FINI functions for shared libraries that have
18diff -urNp linux-2.6.32.42/arch/alpha/include/asm/pgtable.h linux-2.6.32.42/arch/alpha/include/asm/pgtable.h
19--- linux-2.6.32.42/arch/alpha/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
20+++ linux-2.6.32.42/arch/alpha/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
21@@ -101,6 +101,17 @@ struct vm_area_struct;
22 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
23 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
24 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
25+
26+#ifdef CONFIG_PAX_PAGEEXEC
27+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
28+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
29+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
30+#else
31+# define PAGE_SHARED_NOEXEC PAGE_SHARED
32+# define PAGE_COPY_NOEXEC PAGE_COPY
33+# define PAGE_READONLY_NOEXEC PAGE_READONLY
34+#endif
35+
36 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
37
38 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
39diff -urNp linux-2.6.32.42/arch/alpha/kernel/module.c linux-2.6.32.42/arch/alpha/kernel/module.c
40--- linux-2.6.32.42/arch/alpha/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
41+++ linux-2.6.32.42/arch/alpha/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
42@@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
43
44 /* The small sections were sorted to the end of the segment.
45 The following should definitely cover them. */
46- gp = (u64)me->module_core + me->core_size - 0x8000;
47+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
48 got = sechdrs[me->arch.gotsecindex].sh_addr;
49
50 for (i = 0; i < n; i++) {
51diff -urNp linux-2.6.32.42/arch/alpha/kernel/osf_sys.c linux-2.6.32.42/arch/alpha/kernel/osf_sys.c
52--- linux-2.6.32.42/arch/alpha/kernel/osf_sys.c 2011-03-27 14:31:47.000000000 -0400
53+++ linux-2.6.32.42/arch/alpha/kernel/osf_sys.c 2011-06-13 17:19:47.000000000 -0400
54@@ -431,7 +431,7 @@ SYSCALL_DEFINE2(osf_getdomainname, char
55 return -EFAULT;
56
57 len = namelen;
58- if (namelen > 32)
59+ if (len > 32)
60 len = 32;
61
62 down_read(&uts_sem);
63@@ -618,7 +618,7 @@ SYSCALL_DEFINE3(osf_sysinfo, int, comman
64 down_read(&uts_sem);
65 res = sysinfo_table[offset];
66 len = strlen(res)+1;
67- if (len > count)
68+ if ((unsigned long)len > (unsigned long)count)
69 len = count;
70 if (copy_to_user(buf, res, len))
71 err = -EFAULT;
72@@ -673,7 +673,7 @@ SYSCALL_DEFINE5(osf_getsysinfo, unsigned
73 return 1;
74
75 case GSI_GET_HWRPB:
76- if (nbytes < sizeof(*hwrpb))
77+ if (nbytes > sizeof(*hwrpb))
78 return -EINVAL;
79 if (copy_to_user(buffer, hwrpb, nbytes) != 0)
80 return -EFAULT;
81@@ -1035,6 +1035,7 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, i
82 {
83 struct rusage r;
84 long ret, err;
85+ unsigned int status = 0;
86 mm_segment_t old_fs;
87
88 if (!ur)
89@@ -1043,13 +1044,15 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, i
90 old_fs = get_fs();
91
92 set_fs (KERNEL_DS);
93- ret = sys_wait4(pid, ustatus, options, (struct rusage __user *) &r);
94+ ret = sys_wait4(pid, (unsigned int __user *) &status, options,
95+ (struct rusage __user *) &r);
96 set_fs (old_fs);
97
98 if (!access_ok(VERIFY_WRITE, ur, sizeof(*ur)))
99 return -EFAULT;
100
101 err = 0;
102+ err |= put_user(status, ustatus);
103 err |= __put_user(r.ru_utime.tv_sec, &ur->ru_utime.tv_sec);
104 err |= __put_user(r.ru_utime.tv_usec, &ur->ru_utime.tv_usec);
105 err |= __put_user(r.ru_stime.tv_sec, &ur->ru_stime.tv_sec);
106@@ -1169,7 +1172,7 @@ arch_get_unmapped_area_1(unsigned long a
107 /* At this point: (!vma || addr < vma->vm_end). */
108 if (limit - len < addr)
109 return -ENOMEM;
110- if (!vma || addr + len <= vma->vm_start)
111+ if (check_heap_stack_gap(vma, addr, len))
112 return addr;
113 addr = vma->vm_end;
114 vma = vma->vm_next;
115@@ -1205,6 +1208,10 @@ arch_get_unmapped_area(struct file *filp
116 merely specific addresses, but regions of memory -- perhaps
117 this feature should be incorporated into all ports? */
118
119+#ifdef CONFIG_PAX_RANDMMAP
120+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
121+#endif
122+
123 if (addr) {
124 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
125 if (addr != (unsigned long) -ENOMEM)
126@@ -1212,8 +1219,8 @@ arch_get_unmapped_area(struct file *filp
127 }
128
129 /* Next, try allocating at TASK_UNMAPPED_BASE. */
130- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
131- len, limit);
132+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
133+
134 if (addr != (unsigned long) -ENOMEM)
135 return addr;
136
137diff -urNp linux-2.6.32.42/arch/alpha/mm/fault.c linux-2.6.32.42/arch/alpha/mm/fault.c
138--- linux-2.6.32.42/arch/alpha/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
139+++ linux-2.6.32.42/arch/alpha/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
140@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
141 __reload_thread(pcb);
142 }
143
144+#ifdef CONFIG_PAX_PAGEEXEC
145+/*
146+ * PaX: decide what to do with offenders (regs->pc = fault address)
147+ *
148+ * returns 1 when task should be killed
149+ * 2 when patched PLT trampoline was detected
150+ * 3 when unpatched PLT trampoline was detected
151+ */
152+static int pax_handle_fetch_fault(struct pt_regs *regs)
153+{
154+
155+#ifdef CONFIG_PAX_EMUPLT
156+ int err;
157+
158+ do { /* PaX: patched PLT emulation #1 */
159+ unsigned int ldah, ldq, jmp;
160+
161+ err = get_user(ldah, (unsigned int *)regs->pc);
162+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
163+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
164+
165+ if (err)
166+ break;
167+
168+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
169+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
170+ jmp == 0x6BFB0000U)
171+ {
172+ unsigned long r27, addr;
173+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
174+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
175+
176+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
177+ err = get_user(r27, (unsigned long *)addr);
178+ if (err)
179+ break;
180+
181+ regs->r27 = r27;
182+ regs->pc = r27;
183+ return 2;
184+ }
185+ } while (0);
186+
187+ do { /* PaX: patched PLT emulation #2 */
188+ unsigned int ldah, lda, br;
189+
190+ err = get_user(ldah, (unsigned int *)regs->pc);
191+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
192+ err |= get_user(br, (unsigned int *)(regs->pc+8));
193+
194+ if (err)
195+ break;
196+
197+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
198+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
199+ (br & 0xFFE00000U) == 0xC3E00000U)
200+ {
201+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
202+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
203+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
204+
205+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
206+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
207+ return 2;
208+ }
209+ } while (0);
210+
211+ do { /* PaX: unpatched PLT emulation */
212+ unsigned int br;
213+
214+ err = get_user(br, (unsigned int *)regs->pc);
215+
216+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
217+ unsigned int br2, ldq, nop, jmp;
218+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
219+
220+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
221+ err = get_user(br2, (unsigned int *)addr);
222+ err |= get_user(ldq, (unsigned int *)(addr+4));
223+ err |= get_user(nop, (unsigned int *)(addr+8));
224+ err |= get_user(jmp, (unsigned int *)(addr+12));
225+ err |= get_user(resolver, (unsigned long *)(addr+16));
226+
227+ if (err)
228+ break;
229+
230+ if (br2 == 0xC3600000U &&
231+ ldq == 0xA77B000CU &&
232+ nop == 0x47FF041FU &&
233+ jmp == 0x6B7B0000U)
234+ {
235+ regs->r28 = regs->pc+4;
236+ regs->r27 = addr+16;
237+ regs->pc = resolver;
238+ return 3;
239+ }
240+ }
241+ } while (0);
242+#endif
243+
244+ return 1;
245+}
246+
247+void pax_report_insns(void *pc, void *sp)
248+{
249+ unsigned long i;
250+
251+ printk(KERN_ERR "PAX: bytes at PC: ");
252+ for (i = 0; i < 5; i++) {
253+ unsigned int c;
254+ if (get_user(c, (unsigned int *)pc+i))
255+ printk(KERN_CONT "???????? ");
256+ else
257+ printk(KERN_CONT "%08x ", c);
258+ }
259+ printk("\n");
260+}
261+#endif
262
263 /*
264 * This routine handles page faults. It determines the address,
265@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
266 good_area:
267 si_code = SEGV_ACCERR;
268 if (cause < 0) {
269- if (!(vma->vm_flags & VM_EXEC))
270+ if (!(vma->vm_flags & VM_EXEC)) {
271+
272+#ifdef CONFIG_PAX_PAGEEXEC
273+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
274+ goto bad_area;
275+
276+ up_read(&mm->mmap_sem);
277+ switch (pax_handle_fetch_fault(regs)) {
278+
279+#ifdef CONFIG_PAX_EMUPLT
280+ case 2:
281+ case 3:
282+ return;
283+#endif
284+
285+ }
286+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
287+ do_group_exit(SIGKILL);
288+#else
289 goto bad_area;
290+#endif
291+
292+ }
293 } else if (!cause) {
294 /* Allow reads even for write-only mappings */
295 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
296diff -urNp linux-2.6.32.42/arch/arm/include/asm/elf.h linux-2.6.32.42/arch/arm/include/asm/elf.h
297--- linux-2.6.32.42/arch/arm/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
298+++ linux-2.6.32.42/arch/arm/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
299@@ -109,7 +109,14 @@ int dump_task_regs(struct task_struct *t
300 the loader. We need to make sure that it is out of the way of the program
301 that it will "exec", and that there is sufficient room for the brk. */
302
303-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
304+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
305+
306+#ifdef CONFIG_PAX_ASLR
307+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
308+
309+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
310+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
311+#endif
312
313 /* When the program starts, a1 contains a pointer to a function to be
314 registered with atexit, as per the SVR4 ABI. A value of 0 means we
315diff -urNp linux-2.6.32.42/arch/arm/include/asm/kmap_types.h linux-2.6.32.42/arch/arm/include/asm/kmap_types.h
316--- linux-2.6.32.42/arch/arm/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
317+++ linux-2.6.32.42/arch/arm/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
318@@ -19,6 +19,7 @@ enum km_type {
319 KM_SOFTIRQ0,
320 KM_SOFTIRQ1,
321 KM_L2_CACHE,
322+ KM_CLEARPAGE,
323 KM_TYPE_NR
324 };
325
326diff -urNp linux-2.6.32.42/arch/arm/include/asm/uaccess.h linux-2.6.32.42/arch/arm/include/asm/uaccess.h
327--- linux-2.6.32.42/arch/arm/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
328+++ linux-2.6.32.42/arch/arm/include/asm/uaccess.h 2011-06-29 21:02:24.000000000 -0400
329@@ -22,6 +22,8 @@
330 #define VERIFY_READ 0
331 #define VERIFY_WRITE 1
332
333+extern void check_object_size(const void *ptr, unsigned long n, bool to);
334+
335 /*
336 * The exception table consists of pairs of addresses: the first is the
337 * address of an instruction that is allowed to fault, and the second is
338@@ -387,8 +389,23 @@ do { \
339
340
341 #ifdef CONFIG_MMU
342-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
343-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
344+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
345+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
346+
347+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
348+{
349+ if (!__builtin_constant_p(n))
350+ check_object_size(to, n, false);
351+ return ___copy_from_user(to, from, n);
352+}
353+
354+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
355+{
356+ if (!__builtin_constant_p(n))
357+ check_object_size(from, n, true);
358+ return ___copy_to_user(to, from, n);
359+}
360+
361 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
362 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
363 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
364@@ -403,6 +420,9 @@ extern unsigned long __must_check __strn
365
366 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
367 {
368+ if ((long)n < 0)
369+ return n;
370+
371 if (access_ok(VERIFY_READ, from, n))
372 n = __copy_from_user(to, from, n);
373 else /* security hole - plug it */
374@@ -412,6 +432,9 @@ static inline unsigned long __must_check
375
376 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
377 {
378+ if ((long)n < 0)
379+ return n;
380+
381 if (access_ok(VERIFY_WRITE, to, n))
382 n = __copy_to_user(to, from, n);
383 return n;
384diff -urNp linux-2.6.32.42/arch/arm/kernel/armksyms.c linux-2.6.32.42/arch/arm/kernel/armksyms.c
385--- linux-2.6.32.42/arch/arm/kernel/armksyms.c 2011-03-27 14:31:47.000000000 -0400
386+++ linux-2.6.32.42/arch/arm/kernel/armksyms.c 2011-07-06 19:51:50.000000000 -0400
387@@ -118,8 +118,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
388 #ifdef CONFIG_MMU
389 EXPORT_SYMBOL(copy_page);
390
391-EXPORT_SYMBOL(__copy_from_user);
392-EXPORT_SYMBOL(__copy_to_user);
393+EXPORT_SYMBOL(___copy_from_user);
394+EXPORT_SYMBOL(___copy_to_user);
395 EXPORT_SYMBOL(__clear_user);
396
397 EXPORT_SYMBOL(__get_user_1);
398diff -urNp linux-2.6.32.42/arch/arm/kernel/kgdb.c linux-2.6.32.42/arch/arm/kernel/kgdb.c
399--- linux-2.6.32.42/arch/arm/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
400+++ linux-2.6.32.42/arch/arm/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
401@@ -190,7 +190,7 @@ void kgdb_arch_exit(void)
402 * and we handle the normal undef case within the do_undefinstr
403 * handler.
404 */
405-struct kgdb_arch arch_kgdb_ops = {
406+const struct kgdb_arch arch_kgdb_ops = {
407 #ifndef __ARMEB__
408 .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7}
409 #else /* ! __ARMEB__ */
410diff -urNp linux-2.6.32.42/arch/arm/kernel/traps.c linux-2.6.32.42/arch/arm/kernel/traps.c
411--- linux-2.6.32.42/arch/arm/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
412+++ linux-2.6.32.42/arch/arm/kernel/traps.c 2011-06-13 21:31:18.000000000 -0400
413@@ -247,6 +247,8 @@ static void __die(const char *str, int e
414
415 DEFINE_SPINLOCK(die_lock);
416
417+extern void gr_handle_kernel_exploit(void);
418+
419 /*
420 * This function is protected against re-entrancy.
421 */
422@@ -271,6 +273,8 @@ NORET_TYPE void die(const char *str, str
423 if (panic_on_oops)
424 panic("Fatal exception");
425
426+ gr_handle_kernel_exploit();
427+
428 do_exit(SIGSEGV);
429 }
430
431diff -urNp linux-2.6.32.42/arch/arm/lib/copy_from_user.S linux-2.6.32.42/arch/arm/lib/copy_from_user.S
432--- linux-2.6.32.42/arch/arm/lib/copy_from_user.S 2011-03-27 14:31:47.000000000 -0400
433+++ linux-2.6.32.42/arch/arm/lib/copy_from_user.S 2011-06-29 20:48:38.000000000 -0400
434@@ -16,7 +16,7 @@
435 /*
436 * Prototype:
437 *
438- * size_t __copy_from_user(void *to, const void *from, size_t n)
439+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
440 *
441 * Purpose:
442 *
443@@ -84,11 +84,11 @@
444
445 .text
446
447-ENTRY(__copy_from_user)
448+ENTRY(___copy_from_user)
449
450 #include "copy_template.S"
451
452-ENDPROC(__copy_from_user)
453+ENDPROC(___copy_from_user)
454
455 .section .fixup,"ax"
456 .align 0
457diff -urNp linux-2.6.32.42/arch/arm/lib/copy_to_user.S linux-2.6.32.42/arch/arm/lib/copy_to_user.S
458--- linux-2.6.32.42/arch/arm/lib/copy_to_user.S 2011-03-27 14:31:47.000000000 -0400
459+++ linux-2.6.32.42/arch/arm/lib/copy_to_user.S 2011-06-29 20:46:49.000000000 -0400
460@@ -16,7 +16,7 @@
461 /*
462 * Prototype:
463 *
464- * size_t __copy_to_user(void *to, const void *from, size_t n)
465+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
466 *
467 * Purpose:
468 *
469@@ -88,11 +88,11 @@
470 .text
471
472 ENTRY(__copy_to_user_std)
473-WEAK(__copy_to_user)
474+WEAK(___copy_to_user)
475
476 #include "copy_template.S"
477
478-ENDPROC(__copy_to_user)
479+ENDPROC(___copy_to_user)
480
481 .section .fixup,"ax"
482 .align 0
483diff -urNp linux-2.6.32.42/arch/arm/lib/uaccess.S linux-2.6.32.42/arch/arm/lib/uaccess.S
484--- linux-2.6.32.42/arch/arm/lib/uaccess.S 2011-03-27 14:31:47.000000000 -0400
485+++ linux-2.6.32.42/arch/arm/lib/uaccess.S 2011-06-29 20:48:53.000000000 -0400
486@@ -19,7 +19,7 @@
487
488 #define PAGE_SHIFT 12
489
490-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
491+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
492 * Purpose : copy a block to user memory from kernel memory
493 * Params : to - user memory
494 * : from - kernel memory
495@@ -39,7 +39,7 @@ USER( strgtbt r3, [r0], #1) @ May fau
496 sub r2, r2, ip
497 b .Lc2u_dest_aligned
498
499-ENTRY(__copy_to_user)
500+ENTRY(___copy_to_user)
501 stmfd sp!, {r2, r4 - r7, lr}
502 cmp r2, #4
503 blt .Lc2u_not_enough
504@@ -277,14 +277,14 @@ USER( strgebt r3, [r0], #1) @ May fau
505 ldrgtb r3, [r1], #0
506 USER( strgtbt r3, [r0], #1) @ May fault
507 b .Lc2u_finished
508-ENDPROC(__copy_to_user)
509+ENDPROC(___copy_to_user)
510
511 .section .fixup,"ax"
512 .align 0
513 9001: ldmfd sp!, {r0, r4 - r7, pc}
514 .previous
515
516-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
517+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
518 * Purpose : copy a block from user memory to kernel memory
519 * Params : to - kernel memory
520 * : from - user memory
521@@ -303,7 +303,7 @@ USER( ldrgtbt r3, [r1], #1) @ May fau
522 sub r2, r2, ip
523 b .Lcfu_dest_aligned
524
525-ENTRY(__copy_from_user)
526+ENTRY(___copy_from_user)
527 stmfd sp!, {r0, r2, r4 - r7, lr}
528 cmp r2, #4
529 blt .Lcfu_not_enough
530@@ -543,7 +543,7 @@ USER( ldrgebt r3, [r1], #1) @ May fau
531 USER( ldrgtbt r3, [r1], #1) @ May fault
532 strgtb r3, [r0], #1
533 b .Lcfu_finished
534-ENDPROC(__copy_from_user)
535+ENDPROC(___copy_from_user)
536
537 .section .fixup,"ax"
538 .align 0
539diff -urNp linux-2.6.32.42/arch/arm/lib/uaccess_with_memcpy.c linux-2.6.32.42/arch/arm/lib/uaccess_with_memcpy.c
540--- linux-2.6.32.42/arch/arm/lib/uaccess_with_memcpy.c 2011-03-27 14:31:47.000000000 -0400
541+++ linux-2.6.32.42/arch/arm/lib/uaccess_with_memcpy.c 2011-06-29 20:44:35.000000000 -0400
542@@ -97,7 +97,7 @@ out:
543 }
544
545 unsigned long
546-__copy_to_user(void __user *to, const void *from, unsigned long n)
547+___copy_to_user(void __user *to, const void *from, unsigned long n)
548 {
549 /*
550 * This test is stubbed out of the main function above to keep
551diff -urNp linux-2.6.32.42/arch/arm/mach-at91/pm.c linux-2.6.32.42/arch/arm/mach-at91/pm.c
552--- linux-2.6.32.42/arch/arm/mach-at91/pm.c 2011-03-27 14:31:47.000000000 -0400
553+++ linux-2.6.32.42/arch/arm/mach-at91/pm.c 2011-04-17 15:56:45.000000000 -0400
554@@ -348,7 +348,7 @@ static void at91_pm_end(void)
555 }
556
557
558-static struct platform_suspend_ops at91_pm_ops ={
559+static const struct platform_suspend_ops at91_pm_ops ={
560 .valid = at91_pm_valid_state,
561 .begin = at91_pm_begin,
562 .enter = at91_pm_enter,
563diff -urNp linux-2.6.32.42/arch/arm/mach-omap1/pm.c linux-2.6.32.42/arch/arm/mach-omap1/pm.c
564--- linux-2.6.32.42/arch/arm/mach-omap1/pm.c 2011-03-27 14:31:47.000000000 -0400
565+++ linux-2.6.32.42/arch/arm/mach-omap1/pm.c 2011-04-17 15:56:45.000000000 -0400
566@@ -647,7 +647,7 @@ static struct irqaction omap_wakeup_irq
567
568
569
570-static struct platform_suspend_ops omap_pm_ops ={
571+static const struct platform_suspend_ops omap_pm_ops ={
572 .prepare = omap_pm_prepare,
573 .enter = omap_pm_enter,
574 .finish = omap_pm_finish,
575diff -urNp linux-2.6.32.42/arch/arm/mach-omap2/pm24xx.c linux-2.6.32.42/arch/arm/mach-omap2/pm24xx.c
576--- linux-2.6.32.42/arch/arm/mach-omap2/pm24xx.c 2011-03-27 14:31:47.000000000 -0400
577+++ linux-2.6.32.42/arch/arm/mach-omap2/pm24xx.c 2011-04-17 15:56:45.000000000 -0400
578@@ -326,7 +326,7 @@ static void omap2_pm_finish(void)
579 enable_hlt();
580 }
581
582-static struct platform_suspend_ops omap_pm_ops = {
583+static const struct platform_suspend_ops omap_pm_ops = {
584 .prepare = omap2_pm_prepare,
585 .enter = omap2_pm_enter,
586 .finish = omap2_pm_finish,
587diff -urNp linux-2.6.32.42/arch/arm/mach-omap2/pm34xx.c linux-2.6.32.42/arch/arm/mach-omap2/pm34xx.c
588--- linux-2.6.32.42/arch/arm/mach-omap2/pm34xx.c 2011-03-27 14:31:47.000000000 -0400
589+++ linux-2.6.32.42/arch/arm/mach-omap2/pm34xx.c 2011-04-17 15:56:45.000000000 -0400
590@@ -401,7 +401,7 @@ static void omap3_pm_end(void)
591 return;
592 }
593
594-static struct platform_suspend_ops omap_pm_ops = {
595+static const struct platform_suspend_ops omap_pm_ops = {
596 .begin = omap3_pm_begin,
597 .end = omap3_pm_end,
598 .prepare = omap3_pm_prepare,
599diff -urNp linux-2.6.32.42/arch/arm/mach-pnx4008/pm.c linux-2.6.32.42/arch/arm/mach-pnx4008/pm.c
600--- linux-2.6.32.42/arch/arm/mach-pnx4008/pm.c 2011-03-27 14:31:47.000000000 -0400
601+++ linux-2.6.32.42/arch/arm/mach-pnx4008/pm.c 2011-04-17 15:56:45.000000000 -0400
602@@ -116,7 +116,7 @@ static int pnx4008_pm_valid(suspend_stat
603 (state == PM_SUSPEND_MEM);
604 }
605
606-static struct platform_suspend_ops pnx4008_pm_ops = {
607+static const struct platform_suspend_ops pnx4008_pm_ops = {
608 .enter = pnx4008_pm_enter,
609 .valid = pnx4008_pm_valid,
610 };
611diff -urNp linux-2.6.32.42/arch/arm/mach-pxa/pm.c linux-2.6.32.42/arch/arm/mach-pxa/pm.c
612--- linux-2.6.32.42/arch/arm/mach-pxa/pm.c 2011-03-27 14:31:47.000000000 -0400
613+++ linux-2.6.32.42/arch/arm/mach-pxa/pm.c 2011-04-17 15:56:45.000000000 -0400
614@@ -95,7 +95,7 @@ void pxa_pm_finish(void)
615 pxa_cpu_pm_fns->finish();
616 }
617
618-static struct platform_suspend_ops pxa_pm_ops = {
619+static const struct platform_suspend_ops pxa_pm_ops = {
620 .valid = pxa_pm_valid,
621 .enter = pxa_pm_enter,
622 .prepare = pxa_pm_prepare,
623diff -urNp linux-2.6.32.42/arch/arm/mach-pxa/sharpsl_pm.c linux-2.6.32.42/arch/arm/mach-pxa/sharpsl_pm.c
624--- linux-2.6.32.42/arch/arm/mach-pxa/sharpsl_pm.c 2011-03-27 14:31:47.000000000 -0400
625+++ linux-2.6.32.42/arch/arm/mach-pxa/sharpsl_pm.c 2011-04-17 15:56:45.000000000 -0400
626@@ -891,7 +891,7 @@ static void sharpsl_apm_get_power_status
627 }
628
629 #ifdef CONFIG_PM
630-static struct platform_suspend_ops sharpsl_pm_ops = {
631+static const struct platform_suspend_ops sharpsl_pm_ops = {
632 .prepare = pxa_pm_prepare,
633 .finish = pxa_pm_finish,
634 .enter = corgi_pxa_pm_enter,
635diff -urNp linux-2.6.32.42/arch/arm/mach-sa1100/pm.c linux-2.6.32.42/arch/arm/mach-sa1100/pm.c
636--- linux-2.6.32.42/arch/arm/mach-sa1100/pm.c 2011-03-27 14:31:47.000000000 -0400
637+++ linux-2.6.32.42/arch/arm/mach-sa1100/pm.c 2011-04-17 15:56:45.000000000 -0400
638@@ -120,7 +120,7 @@ unsigned long sleep_phys_sp(void *sp)
639 return virt_to_phys(sp);
640 }
641
642-static struct platform_suspend_ops sa11x0_pm_ops = {
643+static const struct platform_suspend_ops sa11x0_pm_ops = {
644 .enter = sa11x0_pm_enter,
645 .valid = suspend_valid_only_mem,
646 };
647diff -urNp linux-2.6.32.42/arch/arm/mm/fault.c linux-2.6.32.42/arch/arm/mm/fault.c
648--- linux-2.6.32.42/arch/arm/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
649+++ linux-2.6.32.42/arch/arm/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
650@@ -166,6 +166,13 @@ __do_user_fault(struct task_struct *tsk,
651 }
652 #endif
653
654+#ifdef CONFIG_PAX_PAGEEXEC
655+ if (fsr & FSR_LNX_PF) {
656+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
657+ do_group_exit(SIGKILL);
658+ }
659+#endif
660+
661 tsk->thread.address = addr;
662 tsk->thread.error_code = fsr;
663 tsk->thread.trap_no = 14;
664@@ -357,6 +364,33 @@ do_page_fault(unsigned long addr, unsign
665 }
666 #endif /* CONFIG_MMU */
667
668+#ifdef CONFIG_PAX_PAGEEXEC
669+void pax_report_insns(void *pc, void *sp)
670+{
671+ long i;
672+
673+ printk(KERN_ERR "PAX: bytes at PC: ");
674+ for (i = 0; i < 20; i++) {
675+ unsigned char c;
676+ if (get_user(c, (__force unsigned char __user *)pc+i))
677+ printk(KERN_CONT "?? ");
678+ else
679+ printk(KERN_CONT "%02x ", c);
680+ }
681+ printk("\n");
682+
683+ printk(KERN_ERR "PAX: bytes at SP-4: ");
684+ for (i = -1; i < 20; i++) {
685+ unsigned long c;
686+ if (get_user(c, (__force unsigned long __user *)sp+i))
687+ printk(KERN_CONT "???????? ");
688+ else
689+ printk(KERN_CONT "%08lx ", c);
690+ }
691+ printk("\n");
692+}
693+#endif
694+
695 /*
696 * First Level Translation Fault Handler
697 *
698diff -urNp linux-2.6.32.42/arch/arm/mm/mmap.c linux-2.6.32.42/arch/arm/mm/mmap.c
699--- linux-2.6.32.42/arch/arm/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
700+++ linux-2.6.32.42/arch/arm/mm/mmap.c 2011-04-17 15:56:45.000000000 -0400
701@@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp
702 if (len > TASK_SIZE)
703 return -ENOMEM;
704
705+#ifdef CONFIG_PAX_RANDMMAP
706+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
707+#endif
708+
709 if (addr) {
710 if (do_align)
711 addr = COLOUR_ALIGN(addr, pgoff);
712@@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp
713 addr = PAGE_ALIGN(addr);
714
715 vma = find_vma(mm, addr);
716- if (TASK_SIZE - len >= addr &&
717- (!vma || addr + len <= vma->vm_start))
718+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
719 return addr;
720 }
721 if (len > mm->cached_hole_size) {
722- start_addr = addr = mm->free_area_cache;
723+ start_addr = addr = mm->free_area_cache;
724 } else {
725- start_addr = addr = TASK_UNMAPPED_BASE;
726- mm->cached_hole_size = 0;
727+ start_addr = addr = mm->mmap_base;
728+ mm->cached_hole_size = 0;
729 }
730
731 full_search:
732@@ -94,14 +97,14 @@ full_search:
733 * Start a new search - just in case we missed
734 * some holes.
735 */
736- if (start_addr != TASK_UNMAPPED_BASE) {
737- start_addr = addr = TASK_UNMAPPED_BASE;
738+ if (start_addr != mm->mmap_base) {
739+ start_addr = addr = mm->mmap_base;
740 mm->cached_hole_size = 0;
741 goto full_search;
742 }
743 return -ENOMEM;
744 }
745- if (!vma || addr + len <= vma->vm_start) {
746+ if (check_heap_stack_gap(vma, addr, len)) {
747 /*
748 * Remember the place where we stopped the search:
749 */
750diff -urNp linux-2.6.32.42/arch/arm/plat-s3c/pm.c linux-2.6.32.42/arch/arm/plat-s3c/pm.c
751--- linux-2.6.32.42/arch/arm/plat-s3c/pm.c 2011-03-27 14:31:47.000000000 -0400
752+++ linux-2.6.32.42/arch/arm/plat-s3c/pm.c 2011-04-17 15:56:45.000000000 -0400
753@@ -355,7 +355,7 @@ static void s3c_pm_finish(void)
754 s3c_pm_check_cleanup();
755 }
756
757-static struct platform_suspend_ops s3c_pm_ops = {
758+static const struct platform_suspend_ops s3c_pm_ops = {
759 .enter = s3c_pm_enter,
760 .prepare = s3c_pm_prepare,
761 .finish = s3c_pm_finish,
762diff -urNp linux-2.6.32.42/arch/avr32/include/asm/elf.h linux-2.6.32.42/arch/avr32/include/asm/elf.h
763--- linux-2.6.32.42/arch/avr32/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
764+++ linux-2.6.32.42/arch/avr32/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
765@@ -85,8 +85,14 @@ typedef struct user_fpu_struct elf_fpreg
766 the loader. We need to make sure that it is out of the way of the program
767 that it will "exec", and that there is sufficient room for the brk. */
768
769-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
770+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
771
772+#ifdef CONFIG_PAX_ASLR
773+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
774+
775+#define PAX_DELTA_MMAP_LEN 15
776+#define PAX_DELTA_STACK_LEN 15
777+#endif
778
779 /* This yields a mask that user programs can use to figure out what
780 instruction set this CPU supports. This could be done in user space,
781diff -urNp linux-2.6.32.42/arch/avr32/include/asm/kmap_types.h linux-2.6.32.42/arch/avr32/include/asm/kmap_types.h
782--- linux-2.6.32.42/arch/avr32/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
783+++ linux-2.6.32.42/arch/avr32/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
784@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
785 D(11) KM_IRQ1,
786 D(12) KM_SOFTIRQ0,
787 D(13) KM_SOFTIRQ1,
788-D(14) KM_TYPE_NR
789+D(14) KM_CLEARPAGE,
790+D(15) KM_TYPE_NR
791 };
792
793 #undef D
794diff -urNp linux-2.6.32.42/arch/avr32/mach-at32ap/pm.c linux-2.6.32.42/arch/avr32/mach-at32ap/pm.c
795--- linux-2.6.32.42/arch/avr32/mach-at32ap/pm.c 2011-03-27 14:31:47.000000000 -0400
796+++ linux-2.6.32.42/arch/avr32/mach-at32ap/pm.c 2011-04-17 15:56:45.000000000 -0400
797@@ -176,7 +176,7 @@ out:
798 return 0;
799 }
800
801-static struct platform_suspend_ops avr32_pm_ops = {
802+static const struct platform_suspend_ops avr32_pm_ops = {
803 .valid = avr32_pm_valid_state,
804 .enter = avr32_pm_enter,
805 };
806diff -urNp linux-2.6.32.42/arch/avr32/mm/fault.c linux-2.6.32.42/arch/avr32/mm/fault.c
807--- linux-2.6.32.42/arch/avr32/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
808+++ linux-2.6.32.42/arch/avr32/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
809@@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
810
811 int exception_trace = 1;
812
813+#ifdef CONFIG_PAX_PAGEEXEC
814+void pax_report_insns(void *pc, void *sp)
815+{
816+ unsigned long i;
817+
818+ printk(KERN_ERR "PAX: bytes at PC: ");
819+ for (i = 0; i < 20; i++) {
820+ unsigned char c;
821+ if (get_user(c, (unsigned char *)pc+i))
822+ printk(KERN_CONT "???????? ");
823+ else
824+ printk(KERN_CONT "%02x ", c);
825+ }
826+ printk("\n");
827+}
828+#endif
829+
830 /*
831 * This routine handles page faults. It determines the address and the
832 * problem, and then passes it off to one of the appropriate routines.
833@@ -157,6 +174,16 @@ bad_area:
834 up_read(&mm->mmap_sem);
835
836 if (user_mode(regs)) {
837+
838+#ifdef CONFIG_PAX_PAGEEXEC
839+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
840+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
841+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
842+ do_group_exit(SIGKILL);
843+ }
844+ }
845+#endif
846+
847 if (exception_trace && printk_ratelimit())
848 printk("%s%s[%d]: segfault at %08lx pc %08lx "
849 "sp %08lx ecr %lu\n",
850diff -urNp linux-2.6.32.42/arch/blackfin/kernel/kgdb.c linux-2.6.32.42/arch/blackfin/kernel/kgdb.c
851--- linux-2.6.32.42/arch/blackfin/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
852+++ linux-2.6.32.42/arch/blackfin/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
853@@ -428,7 +428,7 @@ int kgdb_arch_handle_exception(int vecto
854 return -1; /* this means that we do not want to exit from the handler */
855 }
856
857-struct kgdb_arch arch_kgdb_ops = {
858+const struct kgdb_arch arch_kgdb_ops = {
859 .gdb_bpt_instr = {0xa1},
860 #ifdef CONFIG_SMP
861 .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP,
862diff -urNp linux-2.6.32.42/arch/blackfin/mach-common/pm.c linux-2.6.32.42/arch/blackfin/mach-common/pm.c
863--- linux-2.6.32.42/arch/blackfin/mach-common/pm.c 2011-03-27 14:31:47.000000000 -0400
864+++ linux-2.6.32.42/arch/blackfin/mach-common/pm.c 2011-04-17 15:56:45.000000000 -0400
865@@ -255,7 +255,7 @@ static int bfin_pm_enter(suspend_state_t
866 return 0;
867 }
868
869-struct platform_suspend_ops bfin_pm_ops = {
870+const struct platform_suspend_ops bfin_pm_ops = {
871 .enter = bfin_pm_enter,
872 .valid = bfin_pm_valid,
873 };
874diff -urNp linux-2.6.32.42/arch/frv/include/asm/kmap_types.h linux-2.6.32.42/arch/frv/include/asm/kmap_types.h
875--- linux-2.6.32.42/arch/frv/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
876+++ linux-2.6.32.42/arch/frv/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
877@@ -23,6 +23,7 @@ enum km_type {
878 KM_IRQ1,
879 KM_SOFTIRQ0,
880 KM_SOFTIRQ1,
881+ KM_CLEARPAGE,
882 KM_TYPE_NR
883 };
884
885diff -urNp linux-2.6.32.42/arch/frv/mm/elf-fdpic.c linux-2.6.32.42/arch/frv/mm/elf-fdpic.c
886--- linux-2.6.32.42/arch/frv/mm/elf-fdpic.c 2011-03-27 14:31:47.000000000 -0400
887+++ linux-2.6.32.42/arch/frv/mm/elf-fdpic.c 2011-04-17 15:56:45.000000000 -0400
888@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
889 if (addr) {
890 addr = PAGE_ALIGN(addr);
891 vma = find_vma(current->mm, addr);
892- if (TASK_SIZE - len >= addr &&
893- (!vma || addr + len <= vma->vm_start))
894+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
895 goto success;
896 }
897
898@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
899 for (; vma; vma = vma->vm_next) {
900 if (addr > limit)
901 break;
902- if (addr + len <= vma->vm_start)
903+ if (check_heap_stack_gap(vma, addr, len))
904 goto success;
905 addr = vma->vm_end;
906 }
907@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
908 for (; vma; vma = vma->vm_next) {
909 if (addr > limit)
910 break;
911- if (addr + len <= vma->vm_start)
912+ if (check_heap_stack_gap(vma, addr, len))
913 goto success;
914 addr = vma->vm_end;
915 }
916diff -urNp linux-2.6.32.42/arch/ia64/hp/common/hwsw_iommu.c linux-2.6.32.42/arch/ia64/hp/common/hwsw_iommu.c
917--- linux-2.6.32.42/arch/ia64/hp/common/hwsw_iommu.c 2011-03-27 14:31:47.000000000 -0400
918+++ linux-2.6.32.42/arch/ia64/hp/common/hwsw_iommu.c 2011-04-17 15:56:45.000000000 -0400
919@@ -17,7 +17,7 @@
920 #include <linux/swiotlb.h>
921 #include <asm/machvec.h>
922
923-extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
924+extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
925
926 /* swiotlb declarations & definitions: */
927 extern int swiotlb_late_init_with_default_size (size_t size);
928@@ -33,7 +33,7 @@ static inline int use_swiotlb(struct dev
929 !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
930 }
931
932-struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
933+const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
934 {
935 if (use_swiotlb(dev))
936 return &swiotlb_dma_ops;
937diff -urNp linux-2.6.32.42/arch/ia64/hp/common/sba_iommu.c linux-2.6.32.42/arch/ia64/hp/common/sba_iommu.c
938--- linux-2.6.32.42/arch/ia64/hp/common/sba_iommu.c 2011-03-27 14:31:47.000000000 -0400
939+++ linux-2.6.32.42/arch/ia64/hp/common/sba_iommu.c 2011-04-17 15:56:45.000000000 -0400
940@@ -2097,7 +2097,7 @@ static struct acpi_driver acpi_sba_ioc_d
941 },
942 };
943
944-extern struct dma_map_ops swiotlb_dma_ops;
945+extern const struct dma_map_ops swiotlb_dma_ops;
946
947 static int __init
948 sba_init(void)
949@@ -2211,7 +2211,7 @@ sba_page_override(char *str)
950
951 __setup("sbapagesize=",sba_page_override);
952
953-struct dma_map_ops sba_dma_ops = {
954+const struct dma_map_ops sba_dma_ops = {
955 .alloc_coherent = sba_alloc_coherent,
956 .free_coherent = sba_free_coherent,
957 .map_page = sba_map_page,
958diff -urNp linux-2.6.32.42/arch/ia64/ia32/binfmt_elf32.c linux-2.6.32.42/arch/ia64/ia32/binfmt_elf32.c
959--- linux-2.6.32.42/arch/ia64/ia32/binfmt_elf32.c 2011-03-27 14:31:47.000000000 -0400
960+++ linux-2.6.32.42/arch/ia64/ia32/binfmt_elf32.c 2011-04-17 15:56:45.000000000 -0400
961@@ -45,6 +45,13 @@ randomize_stack_top(unsigned long stack_
962
963 #define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
964
965+#ifdef CONFIG_PAX_ASLR
966+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
967+
968+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
969+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
970+#endif
971+
972 /* Ugly but avoids duplication */
973 #include "../../../fs/binfmt_elf.c"
974
975diff -urNp linux-2.6.32.42/arch/ia64/ia32/ia32priv.h linux-2.6.32.42/arch/ia64/ia32/ia32priv.h
976--- linux-2.6.32.42/arch/ia64/ia32/ia32priv.h 2011-03-27 14:31:47.000000000 -0400
977+++ linux-2.6.32.42/arch/ia64/ia32/ia32priv.h 2011-04-17 15:56:45.000000000 -0400
978@@ -296,7 +296,14 @@ typedef struct compat_siginfo {
979 #define ELF_DATA ELFDATA2LSB
980 #define ELF_ARCH EM_386
981
982-#define IA32_STACK_TOP IA32_PAGE_OFFSET
983+#ifdef CONFIG_PAX_RANDUSTACK
984+#define __IA32_DELTA_STACK (current->mm->delta_stack)
985+#else
986+#define __IA32_DELTA_STACK 0UL
987+#endif
988+
989+#define IA32_STACK_TOP (IA32_PAGE_OFFSET - __IA32_DELTA_STACK)
990+
991 #define IA32_GATE_OFFSET IA32_PAGE_OFFSET
992 #define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE
993
994diff -urNp linux-2.6.32.42/arch/ia64/include/asm/dma-mapping.h linux-2.6.32.42/arch/ia64/include/asm/dma-mapping.h
995--- linux-2.6.32.42/arch/ia64/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
996+++ linux-2.6.32.42/arch/ia64/include/asm/dma-mapping.h 2011-04-17 15:56:45.000000000 -0400
997@@ -12,7 +12,7 @@
998
999 #define ARCH_HAS_DMA_GET_REQUIRED_MASK
1000
1001-extern struct dma_map_ops *dma_ops;
1002+extern const struct dma_map_ops *dma_ops;
1003 extern struct ia64_machine_vector ia64_mv;
1004 extern void set_iommu_machvec(void);
1005
1006@@ -24,7 +24,7 @@ extern void machvec_dma_sync_sg(struct d
1007 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
1008 dma_addr_t *daddr, gfp_t gfp)
1009 {
1010- struct dma_map_ops *ops = platform_dma_get_ops(dev);
1011+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1012 void *caddr;
1013
1014 caddr = ops->alloc_coherent(dev, size, daddr, gfp);
1015@@ -35,7 +35,7 @@ static inline void *dma_alloc_coherent(s
1016 static inline void dma_free_coherent(struct device *dev, size_t size,
1017 void *caddr, dma_addr_t daddr)
1018 {
1019- struct dma_map_ops *ops = platform_dma_get_ops(dev);
1020+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1021 debug_dma_free_coherent(dev, size, caddr, daddr);
1022 ops->free_coherent(dev, size, caddr, daddr);
1023 }
1024@@ -49,13 +49,13 @@ static inline void dma_free_coherent(str
1025
1026 static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
1027 {
1028- struct dma_map_ops *ops = platform_dma_get_ops(dev);
1029+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1030 return ops->mapping_error(dev, daddr);
1031 }
1032
1033 static inline int dma_supported(struct device *dev, u64 mask)
1034 {
1035- struct dma_map_ops *ops = platform_dma_get_ops(dev);
1036+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1037 return ops->dma_supported(dev, mask);
1038 }
1039
1040diff -urNp linux-2.6.32.42/arch/ia64/include/asm/elf.h linux-2.6.32.42/arch/ia64/include/asm/elf.h
1041--- linux-2.6.32.42/arch/ia64/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1042+++ linux-2.6.32.42/arch/ia64/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1043@@ -43,6 +43,13 @@
1044 */
1045 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
1046
1047+#ifdef CONFIG_PAX_ASLR
1048+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1049+
1050+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1051+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1052+#endif
1053+
1054 #define PT_IA_64_UNWIND 0x70000001
1055
1056 /* IA-64 relocations: */
1057diff -urNp linux-2.6.32.42/arch/ia64/include/asm/machvec.h linux-2.6.32.42/arch/ia64/include/asm/machvec.h
1058--- linux-2.6.32.42/arch/ia64/include/asm/machvec.h 2011-03-27 14:31:47.000000000 -0400
1059+++ linux-2.6.32.42/arch/ia64/include/asm/machvec.h 2011-04-17 15:56:45.000000000 -0400
1060@@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event
1061 /* DMA-mapping interface: */
1062 typedef void ia64_mv_dma_init (void);
1063 typedef u64 ia64_mv_dma_get_required_mask (struct device *);
1064-typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1065+typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1066
1067 /*
1068 * WARNING: The legacy I/O space is _architected_. Platforms are
1069@@ -251,7 +251,7 @@ extern void machvec_init_from_cmdline(co
1070 # endif /* CONFIG_IA64_GENERIC */
1071
1072 extern void swiotlb_dma_init(void);
1073-extern struct dma_map_ops *dma_get_ops(struct device *);
1074+extern const struct dma_map_ops *dma_get_ops(struct device *);
1075
1076 /*
1077 * Define default versions so we can extend machvec for new platforms without having
1078diff -urNp linux-2.6.32.42/arch/ia64/include/asm/pgtable.h linux-2.6.32.42/arch/ia64/include/asm/pgtable.h
1079--- linux-2.6.32.42/arch/ia64/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
1080+++ linux-2.6.32.42/arch/ia64/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
1081@@ -12,7 +12,7 @@
1082 * David Mosberger-Tang <davidm@hpl.hp.com>
1083 */
1084
1085-
1086+#include <linux/const.h>
1087 #include <asm/mman.h>
1088 #include <asm/page.h>
1089 #include <asm/processor.h>
1090@@ -143,6 +143,17 @@
1091 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1092 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1093 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
1094+
1095+#ifdef CONFIG_PAX_PAGEEXEC
1096+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
1097+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1098+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1099+#else
1100+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1101+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1102+# define PAGE_COPY_NOEXEC PAGE_COPY
1103+#endif
1104+
1105 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
1106 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
1107 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
1108diff -urNp linux-2.6.32.42/arch/ia64/include/asm/spinlock.h linux-2.6.32.42/arch/ia64/include/asm/spinlock.h
1109--- linux-2.6.32.42/arch/ia64/include/asm/spinlock.h 2011-03-27 14:31:47.000000000 -0400
1110+++ linux-2.6.32.42/arch/ia64/include/asm/spinlock.h 2011-04-17 15:56:45.000000000 -0400
1111@@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
1112 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
1113
1114 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
1115- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
1116+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
1117 }
1118
1119 static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
1120diff -urNp linux-2.6.32.42/arch/ia64/include/asm/uaccess.h linux-2.6.32.42/arch/ia64/include/asm/uaccess.h
1121--- linux-2.6.32.42/arch/ia64/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
1122+++ linux-2.6.32.42/arch/ia64/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
1123@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
1124 const void *__cu_from = (from); \
1125 long __cu_len = (n); \
1126 \
1127- if (__access_ok(__cu_to, __cu_len, get_fs())) \
1128+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
1129 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
1130 __cu_len; \
1131 })
1132@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
1133 long __cu_len = (n); \
1134 \
1135 __chk_user_ptr(__cu_from); \
1136- if (__access_ok(__cu_from, __cu_len, get_fs())) \
1137+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
1138 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
1139 __cu_len; \
1140 })
1141diff -urNp linux-2.6.32.42/arch/ia64/kernel/dma-mapping.c linux-2.6.32.42/arch/ia64/kernel/dma-mapping.c
1142--- linux-2.6.32.42/arch/ia64/kernel/dma-mapping.c 2011-03-27 14:31:47.000000000 -0400
1143+++ linux-2.6.32.42/arch/ia64/kernel/dma-mapping.c 2011-04-17 15:56:45.000000000 -0400
1144@@ -3,7 +3,7 @@
1145 /* Set this to 1 if there is a HW IOMMU in the system */
1146 int iommu_detected __read_mostly;
1147
1148-struct dma_map_ops *dma_ops;
1149+const struct dma_map_ops *dma_ops;
1150 EXPORT_SYMBOL(dma_ops);
1151
1152 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
1153@@ -16,7 +16,7 @@ static int __init dma_init(void)
1154 }
1155 fs_initcall(dma_init);
1156
1157-struct dma_map_ops *dma_get_ops(struct device *dev)
1158+const struct dma_map_ops *dma_get_ops(struct device *dev)
1159 {
1160 return dma_ops;
1161 }
1162diff -urNp linux-2.6.32.42/arch/ia64/kernel/module.c linux-2.6.32.42/arch/ia64/kernel/module.c
1163--- linux-2.6.32.42/arch/ia64/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
1164+++ linux-2.6.32.42/arch/ia64/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
1165@@ -315,8 +315,7 @@ module_alloc (unsigned long size)
1166 void
1167 module_free (struct module *mod, void *module_region)
1168 {
1169- if (mod && mod->arch.init_unw_table &&
1170- module_region == mod->module_init) {
1171+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1172 unw_remove_unwind_table(mod->arch.init_unw_table);
1173 mod->arch.init_unw_table = NULL;
1174 }
1175@@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
1176 }
1177
1178 static inline int
1179+in_init_rx (const struct module *mod, uint64_t addr)
1180+{
1181+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1182+}
1183+
1184+static inline int
1185+in_init_rw (const struct module *mod, uint64_t addr)
1186+{
1187+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1188+}
1189+
1190+static inline int
1191 in_init (const struct module *mod, uint64_t addr)
1192 {
1193- return addr - (uint64_t) mod->module_init < mod->init_size;
1194+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1195+}
1196+
1197+static inline int
1198+in_core_rx (const struct module *mod, uint64_t addr)
1199+{
1200+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1201+}
1202+
1203+static inline int
1204+in_core_rw (const struct module *mod, uint64_t addr)
1205+{
1206+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1207 }
1208
1209 static inline int
1210 in_core (const struct module *mod, uint64_t addr)
1211 {
1212- return addr - (uint64_t) mod->module_core < mod->core_size;
1213+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1214 }
1215
1216 static inline int
1217@@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
1218 break;
1219
1220 case RV_BDREL:
1221- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1222+ if (in_init_rx(mod, val))
1223+ val -= (uint64_t) mod->module_init_rx;
1224+ else if (in_init_rw(mod, val))
1225+ val -= (uint64_t) mod->module_init_rw;
1226+ else if (in_core_rx(mod, val))
1227+ val -= (uint64_t) mod->module_core_rx;
1228+ else if (in_core_rw(mod, val))
1229+ val -= (uint64_t) mod->module_core_rw;
1230 break;
1231
1232 case RV_LTV:
1233@@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
1234 * addresses have been selected...
1235 */
1236 uint64_t gp;
1237- if (mod->core_size > MAX_LTOFF)
1238+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1239 /*
1240 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1241 * at the end of the module.
1242 */
1243- gp = mod->core_size - MAX_LTOFF / 2;
1244+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1245 else
1246- gp = mod->core_size / 2;
1247- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1248+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1249+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1250 mod->arch.gp = gp;
1251 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1252 }
1253diff -urNp linux-2.6.32.42/arch/ia64/kernel/pci-dma.c linux-2.6.32.42/arch/ia64/kernel/pci-dma.c
1254--- linux-2.6.32.42/arch/ia64/kernel/pci-dma.c 2011-03-27 14:31:47.000000000 -0400
1255+++ linux-2.6.32.42/arch/ia64/kernel/pci-dma.c 2011-04-17 15:56:45.000000000 -0400
1256@@ -43,7 +43,7 @@ struct device fallback_dev = {
1257 .dma_mask = &fallback_dev.coherent_dma_mask,
1258 };
1259
1260-extern struct dma_map_ops intel_dma_ops;
1261+extern const struct dma_map_ops intel_dma_ops;
1262
1263 static int __init pci_iommu_init(void)
1264 {
1265@@ -96,15 +96,34 @@ int iommu_dma_supported(struct device *d
1266 }
1267 EXPORT_SYMBOL(iommu_dma_supported);
1268
1269+extern void *intel_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags);
1270+extern void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
1271+extern int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1272+extern void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1273+extern dma_addr_t intel_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1274+extern void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1275+extern int intel_mapping_error(struct device *dev, dma_addr_t dma_addr);
1276+
1277+static const struct dma_map_ops intel_iommu_dma_ops = {
1278+ /* from drivers/pci/intel-iommu.c:intel_dma_ops */
1279+ .alloc_coherent = intel_alloc_coherent,
1280+ .free_coherent = intel_free_coherent,
1281+ .map_sg = intel_map_sg,
1282+ .unmap_sg = intel_unmap_sg,
1283+ .map_page = intel_map_page,
1284+ .unmap_page = intel_unmap_page,
1285+ .mapping_error = intel_mapping_error,
1286+
1287+ .sync_single_for_cpu = machvec_dma_sync_single,
1288+ .sync_sg_for_cpu = machvec_dma_sync_sg,
1289+ .sync_single_for_device = machvec_dma_sync_single,
1290+ .sync_sg_for_device = machvec_dma_sync_sg,
1291+ .dma_supported = iommu_dma_supported,
1292+};
1293+
1294 void __init pci_iommu_alloc(void)
1295 {
1296- dma_ops = &intel_dma_ops;
1297-
1298- dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
1299- dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
1300- dma_ops->sync_single_for_device = machvec_dma_sync_single;
1301- dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
1302- dma_ops->dma_supported = iommu_dma_supported;
1303+ dma_ops = &intel_iommu_dma_ops;
1304
1305 /*
1306 * The order of these functions is important for
1307diff -urNp linux-2.6.32.42/arch/ia64/kernel/pci-swiotlb.c linux-2.6.32.42/arch/ia64/kernel/pci-swiotlb.c
1308--- linux-2.6.32.42/arch/ia64/kernel/pci-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
1309+++ linux-2.6.32.42/arch/ia64/kernel/pci-swiotlb.c 2011-04-17 15:56:45.000000000 -0400
1310@@ -21,7 +21,7 @@ static void *ia64_swiotlb_alloc_coherent
1311 return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
1312 }
1313
1314-struct dma_map_ops swiotlb_dma_ops = {
1315+const struct dma_map_ops swiotlb_dma_ops = {
1316 .alloc_coherent = ia64_swiotlb_alloc_coherent,
1317 .free_coherent = swiotlb_free_coherent,
1318 .map_page = swiotlb_map_page,
1319diff -urNp linux-2.6.32.42/arch/ia64/kernel/sys_ia64.c linux-2.6.32.42/arch/ia64/kernel/sys_ia64.c
1320--- linux-2.6.32.42/arch/ia64/kernel/sys_ia64.c 2011-03-27 14:31:47.000000000 -0400
1321+++ linux-2.6.32.42/arch/ia64/kernel/sys_ia64.c 2011-04-17 15:56:45.000000000 -0400
1322@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
1323 if (REGION_NUMBER(addr) == RGN_HPAGE)
1324 addr = 0;
1325 #endif
1326+
1327+#ifdef CONFIG_PAX_RANDMMAP
1328+ if (mm->pax_flags & MF_PAX_RANDMMAP)
1329+ addr = mm->free_area_cache;
1330+ else
1331+#endif
1332+
1333 if (!addr)
1334 addr = mm->free_area_cache;
1335
1336@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
1337 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1338 /* At this point: (!vma || addr < vma->vm_end). */
1339 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1340- if (start_addr != TASK_UNMAPPED_BASE) {
1341+ if (start_addr != mm->mmap_base) {
1342 /* Start a new search --- just in case we missed some holes. */
1343- addr = TASK_UNMAPPED_BASE;
1344+ addr = mm->mmap_base;
1345 goto full_search;
1346 }
1347 return -ENOMEM;
1348 }
1349- if (!vma || addr + len <= vma->vm_start) {
1350+ if (check_heap_stack_gap(vma, addr, len)) {
1351 /* Remember the address where we stopped this search: */
1352 mm->free_area_cache = addr + len;
1353 return addr;
1354diff -urNp linux-2.6.32.42/arch/ia64/kernel/topology.c linux-2.6.32.42/arch/ia64/kernel/topology.c
1355--- linux-2.6.32.42/arch/ia64/kernel/topology.c 2011-03-27 14:31:47.000000000 -0400
1356+++ linux-2.6.32.42/arch/ia64/kernel/topology.c 2011-04-17 15:56:45.000000000 -0400
1357@@ -282,7 +282,7 @@ static ssize_t cache_show(struct kobject
1358 return ret;
1359 }
1360
1361-static struct sysfs_ops cache_sysfs_ops = {
1362+static const struct sysfs_ops cache_sysfs_ops = {
1363 .show = cache_show
1364 };
1365
1366diff -urNp linux-2.6.32.42/arch/ia64/kernel/vmlinux.lds.S linux-2.6.32.42/arch/ia64/kernel/vmlinux.lds.S
1367--- linux-2.6.32.42/arch/ia64/kernel/vmlinux.lds.S 2011-03-27 14:31:47.000000000 -0400
1368+++ linux-2.6.32.42/arch/ia64/kernel/vmlinux.lds.S 2011-04-17 15:56:45.000000000 -0400
1369@@ -190,7 +190,7 @@ SECTIONS
1370 /* Per-cpu data: */
1371 . = ALIGN(PERCPU_PAGE_SIZE);
1372 PERCPU_VADDR(PERCPU_ADDR, :percpu)
1373- __phys_per_cpu_start = __per_cpu_load;
1374+ __phys_per_cpu_start = per_cpu_load;
1375 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
1376 * into percpu page size
1377 */
1378diff -urNp linux-2.6.32.42/arch/ia64/mm/fault.c linux-2.6.32.42/arch/ia64/mm/fault.c
1379--- linux-2.6.32.42/arch/ia64/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1380+++ linux-2.6.32.42/arch/ia64/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1381@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned
1382 return pte_present(pte);
1383 }
1384
1385+#ifdef CONFIG_PAX_PAGEEXEC
1386+void pax_report_insns(void *pc, void *sp)
1387+{
1388+ unsigned long i;
1389+
1390+ printk(KERN_ERR "PAX: bytes at PC: ");
1391+ for (i = 0; i < 8; i++) {
1392+ unsigned int c;
1393+ if (get_user(c, (unsigned int *)pc+i))
1394+ printk(KERN_CONT "???????? ");
1395+ else
1396+ printk(KERN_CONT "%08x ", c);
1397+ }
1398+ printk("\n");
1399+}
1400+#endif
1401+
1402 void __kprobes
1403 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1404 {
1405@@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long addres
1406 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1407 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1408
1409- if ((vma->vm_flags & mask) != mask)
1410+ if ((vma->vm_flags & mask) != mask) {
1411+
1412+#ifdef CONFIG_PAX_PAGEEXEC
1413+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1414+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1415+ goto bad_area;
1416+
1417+ up_read(&mm->mmap_sem);
1418+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1419+ do_group_exit(SIGKILL);
1420+ }
1421+#endif
1422+
1423 goto bad_area;
1424
1425+ }
1426+
1427 survive:
1428 /*
1429 * If for any reason at all we couldn't handle the fault, make
1430diff -urNp linux-2.6.32.42/arch/ia64/mm/hugetlbpage.c linux-2.6.32.42/arch/ia64/mm/hugetlbpage.c
1431--- linux-2.6.32.42/arch/ia64/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
1432+++ linux-2.6.32.42/arch/ia64/mm/hugetlbpage.c 2011-04-17 15:56:45.000000000 -0400
1433@@ -172,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(
1434 /* At this point: (!vmm || addr < vmm->vm_end). */
1435 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1436 return -ENOMEM;
1437- if (!vmm || (addr + len) <= vmm->vm_start)
1438+ if (check_heap_stack_gap(vmm, addr, len))
1439 return addr;
1440 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1441 }
1442diff -urNp linux-2.6.32.42/arch/ia64/mm/init.c linux-2.6.32.42/arch/ia64/mm/init.c
1443--- linux-2.6.32.42/arch/ia64/mm/init.c 2011-03-27 14:31:47.000000000 -0400
1444+++ linux-2.6.32.42/arch/ia64/mm/init.c 2011-04-17 15:56:45.000000000 -0400
1445@@ -122,6 +122,19 @@ ia64_init_addr_space (void)
1446 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1447 vma->vm_end = vma->vm_start + PAGE_SIZE;
1448 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1449+
1450+#ifdef CONFIG_PAX_PAGEEXEC
1451+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1452+ vma->vm_flags &= ~VM_EXEC;
1453+
1454+#ifdef CONFIG_PAX_MPROTECT
1455+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
1456+ vma->vm_flags &= ~VM_MAYEXEC;
1457+#endif
1458+
1459+ }
1460+#endif
1461+
1462 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1463 down_write(&current->mm->mmap_sem);
1464 if (insert_vm_struct(current->mm, vma)) {
1465diff -urNp linux-2.6.32.42/arch/ia64/sn/pci/pci_dma.c linux-2.6.32.42/arch/ia64/sn/pci/pci_dma.c
1466--- linux-2.6.32.42/arch/ia64/sn/pci/pci_dma.c 2011-03-27 14:31:47.000000000 -0400
1467+++ linux-2.6.32.42/arch/ia64/sn/pci/pci_dma.c 2011-04-17 15:56:45.000000000 -0400
1468@@ -464,7 +464,7 @@ int sn_pci_legacy_write(struct pci_bus *
1469 return ret;
1470 }
1471
1472-static struct dma_map_ops sn_dma_ops = {
1473+static const struct dma_map_ops sn_dma_ops = {
1474 .alloc_coherent = sn_dma_alloc_coherent,
1475 .free_coherent = sn_dma_free_coherent,
1476 .map_page = sn_dma_map_page,
1477diff -urNp linux-2.6.32.42/arch/m32r/lib/usercopy.c linux-2.6.32.42/arch/m32r/lib/usercopy.c
1478--- linux-2.6.32.42/arch/m32r/lib/usercopy.c 2011-03-27 14:31:47.000000000 -0400
1479+++ linux-2.6.32.42/arch/m32r/lib/usercopy.c 2011-04-17 15:56:45.000000000 -0400
1480@@ -14,6 +14,9 @@
1481 unsigned long
1482 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1483 {
1484+ if ((long)n < 0)
1485+ return n;
1486+
1487 prefetch(from);
1488 if (access_ok(VERIFY_WRITE, to, n))
1489 __copy_user(to,from,n);
1490@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1491 unsigned long
1492 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1493 {
1494+ if ((long)n < 0)
1495+ return n;
1496+
1497 prefetchw(to);
1498 if (access_ok(VERIFY_READ, from, n))
1499 __copy_user_zeroing(to,from,n);
1500diff -urNp linux-2.6.32.42/arch/mips/alchemy/devboards/pm.c linux-2.6.32.42/arch/mips/alchemy/devboards/pm.c
1501--- linux-2.6.32.42/arch/mips/alchemy/devboards/pm.c 2011-03-27 14:31:47.000000000 -0400
1502+++ linux-2.6.32.42/arch/mips/alchemy/devboards/pm.c 2011-04-17 15:56:45.000000000 -0400
1503@@ -78,7 +78,7 @@ static void db1x_pm_end(void)
1504
1505 }
1506
1507-static struct platform_suspend_ops db1x_pm_ops = {
1508+static const struct platform_suspend_ops db1x_pm_ops = {
1509 .valid = suspend_valid_only_mem,
1510 .begin = db1x_pm_begin,
1511 .enter = db1x_pm_enter,
1512diff -urNp linux-2.6.32.42/arch/mips/include/asm/elf.h linux-2.6.32.42/arch/mips/include/asm/elf.h
1513--- linux-2.6.32.42/arch/mips/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1514+++ linux-2.6.32.42/arch/mips/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1515@@ -368,4 +368,11 @@ extern int dump_task_fpu(struct task_str
1516 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1517 #endif
1518
1519+#ifdef CONFIG_PAX_ASLR
1520+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1521+
1522+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1523+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1524+#endif
1525+
1526 #endif /* _ASM_ELF_H */
1527diff -urNp linux-2.6.32.42/arch/mips/include/asm/page.h linux-2.6.32.42/arch/mips/include/asm/page.h
1528--- linux-2.6.32.42/arch/mips/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
1529+++ linux-2.6.32.42/arch/mips/include/asm/page.h 2011-04-17 15:56:45.000000000 -0400
1530@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1531 #ifdef CONFIG_CPU_MIPS32
1532 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1533 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1534- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1535+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1536 #else
1537 typedef struct { unsigned long long pte; } pte_t;
1538 #define pte_val(x) ((x).pte)
1539diff -urNp linux-2.6.32.42/arch/mips/include/asm/system.h linux-2.6.32.42/arch/mips/include/asm/system.h
1540--- linux-2.6.32.42/arch/mips/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
1541+++ linux-2.6.32.42/arch/mips/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
1542@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1543 */
1544 #define __ARCH_WANT_UNLOCKED_CTXSW
1545
1546-extern unsigned long arch_align_stack(unsigned long sp);
1547+#define arch_align_stack(x) ((x) & ~0xfUL)
1548
1549 #endif /* _ASM_SYSTEM_H */
1550diff -urNp linux-2.6.32.42/arch/mips/kernel/binfmt_elfn32.c linux-2.6.32.42/arch/mips/kernel/binfmt_elfn32.c
1551--- linux-2.6.32.42/arch/mips/kernel/binfmt_elfn32.c 2011-03-27 14:31:47.000000000 -0400
1552+++ linux-2.6.32.42/arch/mips/kernel/binfmt_elfn32.c 2011-04-17 15:56:45.000000000 -0400
1553@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1554 #undef ELF_ET_DYN_BASE
1555 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1556
1557+#ifdef CONFIG_PAX_ASLR
1558+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1559+
1560+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1561+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1562+#endif
1563+
1564 #include <asm/processor.h>
1565 #include <linux/module.h>
1566 #include <linux/elfcore.h>
1567diff -urNp linux-2.6.32.42/arch/mips/kernel/binfmt_elfo32.c linux-2.6.32.42/arch/mips/kernel/binfmt_elfo32.c
1568--- linux-2.6.32.42/arch/mips/kernel/binfmt_elfo32.c 2011-03-27 14:31:47.000000000 -0400
1569+++ linux-2.6.32.42/arch/mips/kernel/binfmt_elfo32.c 2011-04-17 15:56:45.000000000 -0400
1570@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1571 #undef ELF_ET_DYN_BASE
1572 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1573
1574+#ifdef CONFIG_PAX_ASLR
1575+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1576+
1577+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1578+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1579+#endif
1580+
1581 #include <asm/processor.h>
1582
1583 /*
1584diff -urNp linux-2.6.32.42/arch/mips/kernel/kgdb.c linux-2.6.32.42/arch/mips/kernel/kgdb.c
1585--- linux-2.6.32.42/arch/mips/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
1586+++ linux-2.6.32.42/arch/mips/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
1587@@ -245,6 +245,7 @@ int kgdb_arch_handle_exception(int vecto
1588 return -1;
1589 }
1590
1591+/* cannot be const */
1592 struct kgdb_arch arch_kgdb_ops;
1593
1594 /*
1595diff -urNp linux-2.6.32.42/arch/mips/kernel/process.c linux-2.6.32.42/arch/mips/kernel/process.c
1596--- linux-2.6.32.42/arch/mips/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
1597+++ linux-2.6.32.42/arch/mips/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
1598@@ -470,15 +470,3 @@ unsigned long get_wchan(struct task_stru
1599 out:
1600 return pc;
1601 }
1602-
1603-/*
1604- * Don't forget that the stack pointer must be aligned on a 8 bytes
1605- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1606- */
1607-unsigned long arch_align_stack(unsigned long sp)
1608-{
1609- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1610- sp -= get_random_int() & ~PAGE_MASK;
1611-
1612- return sp & ALMASK;
1613-}
1614diff -urNp linux-2.6.32.42/arch/mips/kernel/syscall.c linux-2.6.32.42/arch/mips/kernel/syscall.c
1615--- linux-2.6.32.42/arch/mips/kernel/syscall.c 2011-03-27 14:31:47.000000000 -0400
1616+++ linux-2.6.32.42/arch/mips/kernel/syscall.c 2011-04-17 15:56:45.000000000 -0400
1617@@ -102,17 +102,21 @@ unsigned long arch_get_unmapped_area(str
1618 do_color_align = 0;
1619 if (filp || (flags & MAP_SHARED))
1620 do_color_align = 1;
1621+
1622+#ifdef CONFIG_PAX_RANDMMAP
1623+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1624+#endif
1625+
1626 if (addr) {
1627 if (do_color_align)
1628 addr = COLOUR_ALIGN(addr, pgoff);
1629 else
1630 addr = PAGE_ALIGN(addr);
1631 vmm = find_vma(current->mm, addr);
1632- if (task_size - len >= addr &&
1633- (!vmm || addr + len <= vmm->vm_start))
1634+ if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
1635 return addr;
1636 }
1637- addr = TASK_UNMAPPED_BASE;
1638+ addr = current->mm->mmap_base;
1639 if (do_color_align)
1640 addr = COLOUR_ALIGN(addr, pgoff);
1641 else
1642@@ -122,7 +126,7 @@ unsigned long arch_get_unmapped_area(str
1643 /* At this point: (!vmm || addr < vmm->vm_end). */
1644 if (task_size - len < addr)
1645 return -ENOMEM;
1646- if (!vmm || addr + len <= vmm->vm_start)
1647+ if (check_heap_stack_gap(vmm, addr, len))
1648 return addr;
1649 addr = vmm->vm_end;
1650 if (do_color_align)
1651diff -urNp linux-2.6.32.42/arch/mips/mm/fault.c linux-2.6.32.42/arch/mips/mm/fault.c
1652--- linux-2.6.32.42/arch/mips/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1653+++ linux-2.6.32.42/arch/mips/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1654@@ -26,6 +26,23 @@
1655 #include <asm/ptrace.h>
1656 #include <asm/highmem.h> /* For VMALLOC_END */
1657
1658+#ifdef CONFIG_PAX_PAGEEXEC
1659+void pax_report_insns(void *pc, void *sp)
1660+{
1661+ unsigned long i;
1662+
1663+ printk(KERN_ERR "PAX: bytes at PC: ");
1664+ for (i = 0; i < 5; i++) {
1665+ unsigned int c;
1666+ if (get_user(c, (unsigned int *)pc+i))
1667+ printk(KERN_CONT "???????? ");
1668+ else
1669+ printk(KERN_CONT "%08x ", c);
1670+ }
1671+ printk("\n");
1672+}
1673+#endif
1674+
1675 /*
1676 * This routine handles page faults. It determines the address,
1677 * and the problem, and then passes it off to one of the appropriate
1678diff -urNp linux-2.6.32.42/arch/parisc/include/asm/elf.h linux-2.6.32.42/arch/parisc/include/asm/elf.h
1679--- linux-2.6.32.42/arch/parisc/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1680+++ linux-2.6.32.42/arch/parisc/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1681@@ -343,6 +343,13 @@ struct pt_regs; /* forward declaration..
1682
1683 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1684
1685+#ifdef CONFIG_PAX_ASLR
1686+#define PAX_ELF_ET_DYN_BASE 0x10000UL
1687+
1688+#define PAX_DELTA_MMAP_LEN 16
1689+#define PAX_DELTA_STACK_LEN 16
1690+#endif
1691+
1692 /* This yields a mask that user programs can use to figure out what
1693 instruction set this CPU supports. This could be done in user space,
1694 but it's not easy, and we've already done it here. */
1695diff -urNp linux-2.6.32.42/arch/parisc/include/asm/pgtable.h linux-2.6.32.42/arch/parisc/include/asm/pgtable.h
1696--- linux-2.6.32.42/arch/parisc/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
1697+++ linux-2.6.32.42/arch/parisc/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
1698@@ -207,6 +207,17 @@
1699 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1700 #define PAGE_COPY PAGE_EXECREAD
1701 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1702+
1703+#ifdef CONFIG_PAX_PAGEEXEC
1704+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1705+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1706+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1707+#else
1708+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1709+# define PAGE_COPY_NOEXEC PAGE_COPY
1710+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1711+#endif
1712+
1713 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1714 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
1715 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
1716diff -urNp linux-2.6.32.42/arch/parisc/kernel/module.c linux-2.6.32.42/arch/parisc/kernel/module.c
1717--- linux-2.6.32.42/arch/parisc/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
1718+++ linux-2.6.32.42/arch/parisc/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
1719@@ -95,16 +95,38 @@
1720
1721 /* three functions to determine where in the module core
1722 * or init pieces the location is */
1723+static inline int in_init_rx(struct module *me, void *loc)
1724+{
1725+ return (loc >= me->module_init_rx &&
1726+ loc < (me->module_init_rx + me->init_size_rx));
1727+}
1728+
1729+static inline int in_init_rw(struct module *me, void *loc)
1730+{
1731+ return (loc >= me->module_init_rw &&
1732+ loc < (me->module_init_rw + me->init_size_rw));
1733+}
1734+
1735 static inline int in_init(struct module *me, void *loc)
1736 {
1737- return (loc >= me->module_init &&
1738- loc <= (me->module_init + me->init_size));
1739+ return in_init_rx(me, loc) || in_init_rw(me, loc);
1740+}
1741+
1742+static inline int in_core_rx(struct module *me, void *loc)
1743+{
1744+ return (loc >= me->module_core_rx &&
1745+ loc < (me->module_core_rx + me->core_size_rx));
1746+}
1747+
1748+static inline int in_core_rw(struct module *me, void *loc)
1749+{
1750+ return (loc >= me->module_core_rw &&
1751+ loc < (me->module_core_rw + me->core_size_rw));
1752 }
1753
1754 static inline int in_core(struct module *me, void *loc)
1755 {
1756- return (loc >= me->module_core &&
1757- loc <= (me->module_core + me->core_size));
1758+ return in_core_rx(me, loc) || in_core_rw(me, loc);
1759 }
1760
1761 static inline int in_local(struct module *me, void *loc)
1762@@ -364,13 +386,13 @@ int module_frob_arch_sections(CONST Elf_
1763 }
1764
1765 /* align things a bit */
1766- me->core_size = ALIGN(me->core_size, 16);
1767- me->arch.got_offset = me->core_size;
1768- me->core_size += gots * sizeof(struct got_entry);
1769-
1770- me->core_size = ALIGN(me->core_size, 16);
1771- me->arch.fdesc_offset = me->core_size;
1772- me->core_size += fdescs * sizeof(Elf_Fdesc);
1773+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1774+ me->arch.got_offset = me->core_size_rw;
1775+ me->core_size_rw += gots * sizeof(struct got_entry);
1776+
1777+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1778+ me->arch.fdesc_offset = me->core_size_rw;
1779+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1780
1781 me->arch.got_max = gots;
1782 me->arch.fdesc_max = fdescs;
1783@@ -388,7 +410,7 @@ static Elf64_Word get_got(struct module
1784
1785 BUG_ON(value == 0);
1786
1787- got = me->module_core + me->arch.got_offset;
1788+ got = me->module_core_rw + me->arch.got_offset;
1789 for (i = 0; got[i].addr; i++)
1790 if (got[i].addr == value)
1791 goto out;
1792@@ -406,7 +428,7 @@ static Elf64_Word get_got(struct module
1793 #ifdef CONFIG_64BIT
1794 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1795 {
1796- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1797+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1798
1799 if (!value) {
1800 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1801@@ -424,7 +446,7 @@ static Elf_Addr get_fdesc(struct module
1802
1803 /* Create new one */
1804 fdesc->addr = value;
1805- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1806+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1807 return (Elf_Addr)fdesc;
1808 }
1809 #endif /* CONFIG_64BIT */
1810@@ -848,7 +870,7 @@ register_unwind_table(struct module *me,
1811
1812 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1813 end = table + sechdrs[me->arch.unwind_section].sh_size;
1814- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1815+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1816
1817 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1818 me->arch.unwind_section, table, end, gp);
1819diff -urNp linux-2.6.32.42/arch/parisc/kernel/sys_parisc.c linux-2.6.32.42/arch/parisc/kernel/sys_parisc.c
1820--- linux-2.6.32.42/arch/parisc/kernel/sys_parisc.c 2011-03-27 14:31:47.000000000 -0400
1821+++ linux-2.6.32.42/arch/parisc/kernel/sys_parisc.c 2011-04-17 15:56:45.000000000 -0400
1822@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1823 /* At this point: (!vma || addr < vma->vm_end). */
1824 if (TASK_SIZE - len < addr)
1825 return -ENOMEM;
1826- if (!vma || addr + len <= vma->vm_start)
1827+ if (check_heap_stack_gap(vma, addr, len))
1828 return addr;
1829 addr = vma->vm_end;
1830 }
1831@@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1832 /* At this point: (!vma || addr < vma->vm_end). */
1833 if (TASK_SIZE - len < addr)
1834 return -ENOMEM;
1835- if (!vma || addr + len <= vma->vm_start)
1836+ if (check_heap_stack_gap(vma, addr, len))
1837 return addr;
1838 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1839 if (addr < vma->vm_end) /* handle wraparound */
1840@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1841 if (flags & MAP_FIXED)
1842 return addr;
1843 if (!addr)
1844- addr = TASK_UNMAPPED_BASE;
1845+ addr = current->mm->mmap_base;
1846
1847 if (filp) {
1848 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1849diff -urNp linux-2.6.32.42/arch/parisc/kernel/traps.c linux-2.6.32.42/arch/parisc/kernel/traps.c
1850--- linux-2.6.32.42/arch/parisc/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
1851+++ linux-2.6.32.42/arch/parisc/kernel/traps.c 2011-04-17 15:56:45.000000000 -0400
1852@@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1853
1854 down_read(&current->mm->mmap_sem);
1855 vma = find_vma(current->mm,regs->iaoq[0]);
1856- if (vma && (regs->iaoq[0] >= vma->vm_start)
1857- && (vma->vm_flags & VM_EXEC)) {
1858-
1859+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1860 fault_address = regs->iaoq[0];
1861 fault_space = regs->iasq[0];
1862
1863diff -urNp linux-2.6.32.42/arch/parisc/mm/fault.c linux-2.6.32.42/arch/parisc/mm/fault.c
1864--- linux-2.6.32.42/arch/parisc/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1865+++ linux-2.6.32.42/arch/parisc/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1866@@ -15,6 +15,7 @@
1867 #include <linux/sched.h>
1868 #include <linux/interrupt.h>
1869 #include <linux/module.h>
1870+#include <linux/unistd.h>
1871
1872 #include <asm/uaccess.h>
1873 #include <asm/traps.h>
1874@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1875 static unsigned long
1876 parisc_acctyp(unsigned long code, unsigned int inst)
1877 {
1878- if (code == 6 || code == 16)
1879+ if (code == 6 || code == 7 || code == 16)
1880 return VM_EXEC;
1881
1882 switch (inst & 0xf0000000) {
1883@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1884 }
1885 #endif
1886
1887+#ifdef CONFIG_PAX_PAGEEXEC
1888+/*
1889+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1890+ *
1891+ * returns 1 when task should be killed
1892+ * 2 when rt_sigreturn trampoline was detected
1893+ * 3 when unpatched PLT trampoline was detected
1894+ */
1895+static int pax_handle_fetch_fault(struct pt_regs *regs)
1896+{
1897+
1898+#ifdef CONFIG_PAX_EMUPLT
1899+ int err;
1900+
1901+ do { /* PaX: unpatched PLT emulation */
1902+ unsigned int bl, depwi;
1903+
1904+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1905+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1906+
1907+ if (err)
1908+ break;
1909+
1910+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1911+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1912+
1913+ err = get_user(ldw, (unsigned int *)addr);
1914+ err |= get_user(bv, (unsigned int *)(addr+4));
1915+ err |= get_user(ldw2, (unsigned int *)(addr+8));
1916+
1917+ if (err)
1918+ break;
1919+
1920+ if (ldw == 0x0E801096U &&
1921+ bv == 0xEAC0C000U &&
1922+ ldw2 == 0x0E881095U)
1923+ {
1924+ unsigned int resolver, map;
1925+
1926+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1927+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1928+ if (err)
1929+ break;
1930+
1931+ regs->gr[20] = instruction_pointer(regs)+8;
1932+ regs->gr[21] = map;
1933+ regs->gr[22] = resolver;
1934+ regs->iaoq[0] = resolver | 3UL;
1935+ regs->iaoq[1] = regs->iaoq[0] + 4;
1936+ return 3;
1937+ }
1938+ }
1939+ } while (0);
1940+#endif
1941+
1942+#ifdef CONFIG_PAX_EMUTRAMP
1943+
1944+#ifndef CONFIG_PAX_EMUSIGRT
1945+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1946+ return 1;
1947+#endif
1948+
1949+ do { /* PaX: rt_sigreturn emulation */
1950+ unsigned int ldi1, ldi2, bel, nop;
1951+
1952+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1953+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1954+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1955+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1956+
1957+ if (err)
1958+ break;
1959+
1960+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1961+ ldi2 == 0x3414015AU &&
1962+ bel == 0xE4008200U &&
1963+ nop == 0x08000240U)
1964+ {
1965+ regs->gr[25] = (ldi1 & 2) >> 1;
1966+ regs->gr[20] = __NR_rt_sigreturn;
1967+ regs->gr[31] = regs->iaoq[1] + 16;
1968+ regs->sr[0] = regs->iasq[1];
1969+ regs->iaoq[0] = 0x100UL;
1970+ regs->iaoq[1] = regs->iaoq[0] + 4;
1971+ regs->iasq[0] = regs->sr[2];
1972+ regs->iasq[1] = regs->sr[2];
1973+ return 2;
1974+ }
1975+ } while (0);
1976+#endif
1977+
1978+ return 1;
1979+}
1980+
1981+void pax_report_insns(void *pc, void *sp)
1982+{
1983+ unsigned long i;
1984+
1985+ printk(KERN_ERR "PAX: bytes at PC: ");
1986+ for (i = 0; i < 5; i++) {
1987+ unsigned int c;
1988+ if (get_user(c, (unsigned int *)pc+i))
1989+ printk(KERN_CONT "???????? ");
1990+ else
1991+ printk(KERN_CONT "%08x ", c);
1992+ }
1993+ printk("\n");
1994+}
1995+#endif
1996+
1997 int fixup_exception(struct pt_regs *regs)
1998 {
1999 const struct exception_table_entry *fix;
2000@@ -192,8 +303,33 @@ good_area:
2001
2002 acc_type = parisc_acctyp(code,regs->iir);
2003
2004- if ((vma->vm_flags & acc_type) != acc_type)
2005+ if ((vma->vm_flags & acc_type) != acc_type) {
2006+
2007+#ifdef CONFIG_PAX_PAGEEXEC
2008+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
2009+ (address & ~3UL) == instruction_pointer(regs))
2010+ {
2011+ up_read(&mm->mmap_sem);
2012+ switch (pax_handle_fetch_fault(regs)) {
2013+
2014+#ifdef CONFIG_PAX_EMUPLT
2015+ case 3:
2016+ return;
2017+#endif
2018+
2019+#ifdef CONFIG_PAX_EMUTRAMP
2020+ case 2:
2021+ return;
2022+#endif
2023+
2024+ }
2025+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
2026+ do_group_exit(SIGKILL);
2027+ }
2028+#endif
2029+
2030 goto bad_area;
2031+ }
2032
2033 /*
2034 * If for any reason at all we couldn't handle the fault, make
2035diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/device.h linux-2.6.32.42/arch/powerpc/include/asm/device.h
2036--- linux-2.6.32.42/arch/powerpc/include/asm/device.h 2011-03-27 14:31:47.000000000 -0400
2037+++ linux-2.6.32.42/arch/powerpc/include/asm/device.h 2011-04-17 15:56:45.000000000 -0400
2038@@ -14,7 +14,7 @@ struct dev_archdata {
2039 struct device_node *of_node;
2040
2041 /* DMA operations on that device */
2042- struct dma_map_ops *dma_ops;
2043+ const struct dma_map_ops *dma_ops;
2044
2045 /*
2046 * When an iommu is in use, dma_data is used as a ptr to the base of the
2047diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/dma-mapping.h linux-2.6.32.42/arch/powerpc/include/asm/dma-mapping.h
2048--- linux-2.6.32.42/arch/powerpc/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
2049+++ linux-2.6.32.42/arch/powerpc/include/asm/dma-mapping.h 2011-04-17 15:56:45.000000000 -0400
2050@@ -69,9 +69,9 @@ static inline unsigned long device_to_ma
2051 #ifdef CONFIG_PPC64
2052 extern struct dma_map_ops dma_iommu_ops;
2053 #endif
2054-extern struct dma_map_ops dma_direct_ops;
2055+extern const struct dma_map_ops dma_direct_ops;
2056
2057-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
2058+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
2059 {
2060 /* We don't handle the NULL dev case for ISA for now. We could
2061 * do it via an out of line call but it is not needed for now. The
2062@@ -84,7 +84,7 @@ static inline struct dma_map_ops *get_dm
2063 return dev->archdata.dma_ops;
2064 }
2065
2066-static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
2067+static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
2068 {
2069 dev->archdata.dma_ops = ops;
2070 }
2071@@ -118,7 +118,7 @@ static inline void set_dma_offset(struct
2072
2073 static inline int dma_supported(struct device *dev, u64 mask)
2074 {
2075- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2076+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2077
2078 if (unlikely(dma_ops == NULL))
2079 return 0;
2080@@ -132,7 +132,7 @@ static inline int dma_supported(struct d
2081
2082 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
2083 {
2084- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2085+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2086
2087 if (unlikely(dma_ops == NULL))
2088 return -EIO;
2089@@ -147,7 +147,7 @@ static inline int dma_set_mask(struct de
2090 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2091 dma_addr_t *dma_handle, gfp_t flag)
2092 {
2093- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2094+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2095 void *cpu_addr;
2096
2097 BUG_ON(!dma_ops);
2098@@ -162,7 +162,7 @@ static inline void *dma_alloc_coherent(s
2099 static inline void dma_free_coherent(struct device *dev, size_t size,
2100 void *cpu_addr, dma_addr_t dma_handle)
2101 {
2102- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2103+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2104
2105 BUG_ON(!dma_ops);
2106
2107@@ -173,7 +173,7 @@ static inline void dma_free_coherent(str
2108
2109 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
2110 {
2111- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2112+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2113
2114 if (dma_ops->mapping_error)
2115 return dma_ops->mapping_error(dev, dma_addr);
2116diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/elf.h linux-2.6.32.42/arch/powerpc/include/asm/elf.h
2117--- linux-2.6.32.42/arch/powerpc/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
2118+++ linux-2.6.32.42/arch/powerpc/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
2119@@ -179,8 +179,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
2120 the loader. We need to make sure that it is out of the way of the program
2121 that it will "exec", and that there is sufficient room for the brk. */
2122
2123-extern unsigned long randomize_et_dyn(unsigned long base);
2124-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
2125+#define ELF_ET_DYN_BASE (0x20000000)
2126+
2127+#ifdef CONFIG_PAX_ASLR
2128+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
2129+
2130+#ifdef __powerpc64__
2131+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2132+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2133+#else
2134+#define PAX_DELTA_MMAP_LEN 15
2135+#define PAX_DELTA_STACK_LEN 15
2136+#endif
2137+#endif
2138
2139 /*
2140 * Our registers are always unsigned longs, whether we're a 32 bit
2141@@ -275,9 +286,6 @@ extern int arch_setup_additional_pages(s
2142 (0x7ff >> (PAGE_SHIFT - 12)) : \
2143 (0x3ffff >> (PAGE_SHIFT - 12)))
2144
2145-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2146-#define arch_randomize_brk arch_randomize_brk
2147-
2148 #endif /* __KERNEL__ */
2149
2150 /*
2151diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/iommu.h linux-2.6.32.42/arch/powerpc/include/asm/iommu.h
2152--- linux-2.6.32.42/arch/powerpc/include/asm/iommu.h 2011-03-27 14:31:47.000000000 -0400
2153+++ linux-2.6.32.42/arch/powerpc/include/asm/iommu.h 2011-04-17 15:56:45.000000000 -0400
2154@@ -116,6 +116,9 @@ extern void iommu_init_early_iSeries(voi
2155 extern void iommu_init_early_dart(void);
2156 extern void iommu_init_early_pasemi(void);
2157
2158+/* dma-iommu.c */
2159+extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
2160+
2161 #ifdef CONFIG_PCI
2162 extern void pci_iommu_init(void);
2163 extern void pci_direct_iommu_init(void);
2164diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/kmap_types.h linux-2.6.32.42/arch/powerpc/include/asm/kmap_types.h
2165--- linux-2.6.32.42/arch/powerpc/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
2166+++ linux-2.6.32.42/arch/powerpc/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
2167@@ -26,6 +26,7 @@ enum km_type {
2168 KM_SOFTIRQ1,
2169 KM_PPC_SYNC_PAGE,
2170 KM_PPC_SYNC_ICACHE,
2171+ KM_CLEARPAGE,
2172 KM_TYPE_NR
2173 };
2174
2175diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/page_64.h linux-2.6.32.42/arch/powerpc/include/asm/page_64.h
2176--- linux-2.6.32.42/arch/powerpc/include/asm/page_64.h 2011-03-27 14:31:47.000000000 -0400
2177+++ linux-2.6.32.42/arch/powerpc/include/asm/page_64.h 2011-04-17 15:56:45.000000000 -0400
2178@@ -180,15 +180,18 @@ do { \
2179 * stack by default, so in the absense of a PT_GNU_STACK program header
2180 * we turn execute permission off.
2181 */
2182-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2183- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2184+#define VM_STACK_DEFAULT_FLAGS32 \
2185+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2186+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2187
2188 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2189 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2190
2191+#ifndef CONFIG_PAX_PAGEEXEC
2192 #define VM_STACK_DEFAULT_FLAGS \
2193 (test_thread_flag(TIF_32BIT) ? \
2194 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2195+#endif
2196
2197 #include <asm-generic/getorder.h>
2198
2199diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/page.h linux-2.6.32.42/arch/powerpc/include/asm/page.h
2200--- linux-2.6.32.42/arch/powerpc/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
2201+++ linux-2.6.32.42/arch/powerpc/include/asm/page.h 2011-04-17 15:56:45.000000000 -0400
2202@@ -116,8 +116,9 @@ extern phys_addr_t kernstart_addr;
2203 * and needs to be executable. This means the whole heap ends
2204 * up being executable.
2205 */
2206-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2207- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2208+#define VM_DATA_DEFAULT_FLAGS32 \
2209+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2210+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2211
2212 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2213 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2214@@ -145,6 +146,9 @@ extern phys_addr_t kernstart_addr;
2215 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2216 #endif
2217
2218+#define ktla_ktva(addr) (addr)
2219+#define ktva_ktla(addr) (addr)
2220+
2221 #ifndef __ASSEMBLY__
2222
2223 #undef STRICT_MM_TYPECHECKS
2224diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/pci.h linux-2.6.32.42/arch/powerpc/include/asm/pci.h
2225--- linux-2.6.32.42/arch/powerpc/include/asm/pci.h 2011-03-27 14:31:47.000000000 -0400
2226+++ linux-2.6.32.42/arch/powerpc/include/asm/pci.h 2011-04-17 15:56:45.000000000 -0400
2227@@ -65,8 +65,8 @@ static inline int pci_get_legacy_ide_irq
2228 }
2229
2230 #ifdef CONFIG_PCI
2231-extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
2232-extern struct dma_map_ops *get_pci_dma_ops(void);
2233+extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
2234+extern const struct dma_map_ops *get_pci_dma_ops(void);
2235 #else /* CONFIG_PCI */
2236 #define set_pci_dma_ops(d)
2237 #define get_pci_dma_ops() NULL
2238diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/pgtable.h linux-2.6.32.42/arch/powerpc/include/asm/pgtable.h
2239--- linux-2.6.32.42/arch/powerpc/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
2240+++ linux-2.6.32.42/arch/powerpc/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
2241@@ -2,6 +2,7 @@
2242 #define _ASM_POWERPC_PGTABLE_H
2243 #ifdef __KERNEL__
2244
2245+#include <linux/const.h>
2246 #ifndef __ASSEMBLY__
2247 #include <asm/processor.h> /* For TASK_SIZE */
2248 #include <asm/mmu.h>
2249diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/pte-hash32.h linux-2.6.32.42/arch/powerpc/include/asm/pte-hash32.h
2250--- linux-2.6.32.42/arch/powerpc/include/asm/pte-hash32.h 2011-03-27 14:31:47.000000000 -0400
2251+++ linux-2.6.32.42/arch/powerpc/include/asm/pte-hash32.h 2011-04-17 15:56:45.000000000 -0400
2252@@ -21,6 +21,7 @@
2253 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2254 #define _PAGE_USER 0x004 /* usermode access allowed */
2255 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2256+#define _PAGE_EXEC _PAGE_GUARDED
2257 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2258 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2259 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2260diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/reg.h linux-2.6.32.42/arch/powerpc/include/asm/reg.h
2261--- linux-2.6.32.42/arch/powerpc/include/asm/reg.h 2011-03-27 14:31:47.000000000 -0400
2262+++ linux-2.6.32.42/arch/powerpc/include/asm/reg.h 2011-04-17 15:56:45.000000000 -0400
2263@@ -191,6 +191,7 @@
2264 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2265 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2266 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2267+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2268 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2269 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2270 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2271diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/swiotlb.h linux-2.6.32.42/arch/powerpc/include/asm/swiotlb.h
2272--- linux-2.6.32.42/arch/powerpc/include/asm/swiotlb.h 2011-03-27 14:31:47.000000000 -0400
2273+++ linux-2.6.32.42/arch/powerpc/include/asm/swiotlb.h 2011-04-17 15:56:45.000000000 -0400
2274@@ -13,7 +13,7 @@
2275
2276 #include <linux/swiotlb.h>
2277
2278-extern struct dma_map_ops swiotlb_dma_ops;
2279+extern const struct dma_map_ops swiotlb_dma_ops;
2280
2281 static inline void dma_mark_clean(void *addr, size_t size) {}
2282
2283diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/system.h linux-2.6.32.42/arch/powerpc/include/asm/system.h
2284--- linux-2.6.32.42/arch/powerpc/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
2285+++ linux-2.6.32.42/arch/powerpc/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
2286@@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsi
2287 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2288 #endif
2289
2290-extern unsigned long arch_align_stack(unsigned long sp);
2291+#define arch_align_stack(x) ((x) & ~0xfUL)
2292
2293 /* Used in very early kernel initialization. */
2294 extern unsigned long reloc_offset(void);
2295diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/uaccess.h linux-2.6.32.42/arch/powerpc/include/asm/uaccess.h
2296--- linux-2.6.32.42/arch/powerpc/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
2297+++ linux-2.6.32.42/arch/powerpc/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
2298@@ -13,6 +13,8 @@
2299 #define VERIFY_READ 0
2300 #define VERIFY_WRITE 1
2301
2302+extern void check_object_size(const void *ptr, unsigned long n, bool to);
2303+
2304 /*
2305 * The fs value determines whether argument validity checking should be
2306 * performed or not. If get_fs() == USER_DS, checking is performed, with
2307@@ -327,52 +329,6 @@ do { \
2308 extern unsigned long __copy_tofrom_user(void __user *to,
2309 const void __user *from, unsigned long size);
2310
2311-#ifndef __powerpc64__
2312-
2313-static inline unsigned long copy_from_user(void *to,
2314- const void __user *from, unsigned long n)
2315-{
2316- unsigned long over;
2317-
2318- if (access_ok(VERIFY_READ, from, n))
2319- return __copy_tofrom_user((__force void __user *)to, from, n);
2320- if ((unsigned long)from < TASK_SIZE) {
2321- over = (unsigned long)from + n - TASK_SIZE;
2322- return __copy_tofrom_user((__force void __user *)to, from,
2323- n - over) + over;
2324- }
2325- return n;
2326-}
2327-
2328-static inline unsigned long copy_to_user(void __user *to,
2329- const void *from, unsigned long n)
2330-{
2331- unsigned long over;
2332-
2333- if (access_ok(VERIFY_WRITE, to, n))
2334- return __copy_tofrom_user(to, (__force void __user *)from, n);
2335- if ((unsigned long)to < TASK_SIZE) {
2336- over = (unsigned long)to + n - TASK_SIZE;
2337- return __copy_tofrom_user(to, (__force void __user *)from,
2338- n - over) + over;
2339- }
2340- return n;
2341-}
2342-
2343-#else /* __powerpc64__ */
2344-
2345-#define __copy_in_user(to, from, size) \
2346- __copy_tofrom_user((to), (from), (size))
2347-
2348-extern unsigned long copy_from_user(void *to, const void __user *from,
2349- unsigned long n);
2350-extern unsigned long copy_to_user(void __user *to, const void *from,
2351- unsigned long n);
2352-extern unsigned long copy_in_user(void __user *to, const void __user *from,
2353- unsigned long n);
2354-
2355-#endif /* __powerpc64__ */
2356-
2357 static inline unsigned long __copy_from_user_inatomic(void *to,
2358 const void __user *from, unsigned long n)
2359 {
2360@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
2361 if (ret == 0)
2362 return 0;
2363 }
2364+
2365+ if (!__builtin_constant_p(n))
2366+ check_object_size(to, n, false);
2367+
2368 return __copy_tofrom_user((__force void __user *)to, from, n);
2369 }
2370
2371@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
2372 if (ret == 0)
2373 return 0;
2374 }
2375+
2376+ if (!__builtin_constant_p(n))
2377+ check_object_size(from, n, true);
2378+
2379 return __copy_tofrom_user(to, (__force const void __user *)from, n);
2380 }
2381
2382@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
2383 return __copy_to_user_inatomic(to, from, size);
2384 }
2385
2386+#ifndef __powerpc64__
2387+
2388+static inline unsigned long __must_check copy_from_user(void *to,
2389+ const void __user *from, unsigned long n)
2390+{
2391+ unsigned long over;
2392+
2393+ if ((long)n < 0)
2394+ return n;
2395+
2396+ if (access_ok(VERIFY_READ, from, n)) {
2397+ if (!__builtin_constant_p(n))
2398+ check_object_size(to, n, false);
2399+ return __copy_tofrom_user((__force void __user *)to, from, n);
2400+ }
2401+ if ((unsigned long)from < TASK_SIZE) {
2402+ over = (unsigned long)from + n - TASK_SIZE;
2403+ if (!__builtin_constant_p(n - over))
2404+ check_object_size(to, n - over, false);
2405+ return __copy_tofrom_user((__force void __user *)to, from,
2406+ n - over) + over;
2407+ }
2408+ return n;
2409+}
2410+
2411+static inline unsigned long __must_check copy_to_user(void __user *to,
2412+ const void *from, unsigned long n)
2413+{
2414+ unsigned long over;
2415+
2416+ if ((long)n < 0)
2417+ return n;
2418+
2419+ if (access_ok(VERIFY_WRITE, to, n)) {
2420+ if (!__builtin_constant_p(n))
2421+ check_object_size(from, n, true);
2422+ return __copy_tofrom_user(to, (__force void __user *)from, n);
2423+ }
2424+ if ((unsigned long)to < TASK_SIZE) {
2425+ over = (unsigned long)to + n - TASK_SIZE;
2426+ if (!__builtin_constant_p(n))
2427+ check_object_size(from, n - over, true);
2428+ return __copy_tofrom_user(to, (__force void __user *)from,
2429+ n - over) + over;
2430+ }
2431+ return n;
2432+}
2433+
2434+#else /* __powerpc64__ */
2435+
2436+#define __copy_in_user(to, from, size) \
2437+ __copy_tofrom_user((to), (from), (size))
2438+
2439+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2440+{
2441+ if ((long)n < 0 || n > INT_MAX)
2442+ return n;
2443+
2444+ if (!__builtin_constant_p(n))
2445+ check_object_size(to, n, false);
2446+
2447+ if (likely(access_ok(VERIFY_READ, from, n)))
2448+ n = __copy_from_user(to, from, n);
2449+ else
2450+ memset(to, 0, n);
2451+ return n;
2452+}
2453+
2454+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2455+{
2456+ if ((long)n < 0 || n > INT_MAX)
2457+ return n;
2458+
2459+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
2460+ if (!__builtin_constant_p(n))
2461+ check_object_size(from, n, true);
2462+ n = __copy_to_user(to, from, n);
2463+ }
2464+ return n;
2465+}
2466+
2467+extern unsigned long copy_in_user(void __user *to, const void __user *from,
2468+ unsigned long n);
2469+
2470+#endif /* __powerpc64__ */
2471+
2472 extern unsigned long __clear_user(void __user *addr, unsigned long size);
2473
2474 static inline unsigned long clear_user(void __user *addr, unsigned long size)
2475diff -urNp linux-2.6.32.42/arch/powerpc/kernel/cacheinfo.c linux-2.6.32.42/arch/powerpc/kernel/cacheinfo.c
2476--- linux-2.6.32.42/arch/powerpc/kernel/cacheinfo.c 2011-03-27 14:31:47.000000000 -0400
2477+++ linux-2.6.32.42/arch/powerpc/kernel/cacheinfo.c 2011-04-17 15:56:45.000000000 -0400
2478@@ -642,7 +642,7 @@ static struct kobj_attribute *cache_inde
2479 &cache_assoc_attr,
2480 };
2481
2482-static struct sysfs_ops cache_index_ops = {
2483+static const struct sysfs_ops cache_index_ops = {
2484 .show = cache_index_show,
2485 };
2486
2487diff -urNp linux-2.6.32.42/arch/powerpc/kernel/dma.c linux-2.6.32.42/arch/powerpc/kernel/dma.c
2488--- linux-2.6.32.42/arch/powerpc/kernel/dma.c 2011-03-27 14:31:47.000000000 -0400
2489+++ linux-2.6.32.42/arch/powerpc/kernel/dma.c 2011-04-17 15:56:45.000000000 -0400
2490@@ -134,7 +134,7 @@ static inline void dma_direct_sync_singl
2491 }
2492 #endif
2493
2494-struct dma_map_ops dma_direct_ops = {
2495+const struct dma_map_ops dma_direct_ops = {
2496 .alloc_coherent = dma_direct_alloc_coherent,
2497 .free_coherent = dma_direct_free_coherent,
2498 .map_sg = dma_direct_map_sg,
2499diff -urNp linux-2.6.32.42/arch/powerpc/kernel/dma-iommu.c linux-2.6.32.42/arch/powerpc/kernel/dma-iommu.c
2500--- linux-2.6.32.42/arch/powerpc/kernel/dma-iommu.c 2011-03-27 14:31:47.000000000 -0400
2501+++ linux-2.6.32.42/arch/powerpc/kernel/dma-iommu.c 2011-04-17 15:56:45.000000000 -0400
2502@@ -70,7 +70,7 @@ static void dma_iommu_unmap_sg(struct de
2503 }
2504
2505 /* We support DMA to/from any memory page via the iommu */
2506-static int dma_iommu_dma_supported(struct device *dev, u64 mask)
2507+int dma_iommu_dma_supported(struct device *dev, u64 mask)
2508 {
2509 struct iommu_table *tbl = get_iommu_table_base(dev);
2510
2511diff -urNp linux-2.6.32.42/arch/powerpc/kernel/dma-swiotlb.c linux-2.6.32.42/arch/powerpc/kernel/dma-swiotlb.c
2512--- linux-2.6.32.42/arch/powerpc/kernel/dma-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
2513+++ linux-2.6.32.42/arch/powerpc/kernel/dma-swiotlb.c 2011-04-17 15:56:45.000000000 -0400
2514@@ -31,7 +31,7 @@ unsigned int ppc_swiotlb_enable;
2515 * map_page, and unmap_page on highmem, use normal dma_ops
2516 * for everything else.
2517 */
2518-struct dma_map_ops swiotlb_dma_ops = {
2519+const struct dma_map_ops swiotlb_dma_ops = {
2520 .alloc_coherent = dma_direct_alloc_coherent,
2521 .free_coherent = dma_direct_free_coherent,
2522 .map_sg = swiotlb_map_sg_attrs,
2523diff -urNp linux-2.6.32.42/arch/powerpc/kernel/exceptions-64e.S linux-2.6.32.42/arch/powerpc/kernel/exceptions-64e.S
2524--- linux-2.6.32.42/arch/powerpc/kernel/exceptions-64e.S 2011-03-27 14:31:47.000000000 -0400
2525+++ linux-2.6.32.42/arch/powerpc/kernel/exceptions-64e.S 2011-04-17 15:56:45.000000000 -0400
2526@@ -455,6 +455,7 @@ storage_fault_common:
2527 std r14,_DAR(r1)
2528 std r15,_DSISR(r1)
2529 addi r3,r1,STACK_FRAME_OVERHEAD
2530+ bl .save_nvgprs
2531 mr r4,r14
2532 mr r5,r15
2533 ld r14,PACA_EXGEN+EX_R14(r13)
2534@@ -464,8 +465,7 @@ storage_fault_common:
2535 cmpdi r3,0
2536 bne- 1f
2537 b .ret_from_except_lite
2538-1: bl .save_nvgprs
2539- mr r5,r3
2540+1: mr r5,r3
2541 addi r3,r1,STACK_FRAME_OVERHEAD
2542 ld r4,_DAR(r1)
2543 bl .bad_page_fault
2544diff -urNp linux-2.6.32.42/arch/powerpc/kernel/exceptions-64s.S linux-2.6.32.42/arch/powerpc/kernel/exceptions-64s.S
2545--- linux-2.6.32.42/arch/powerpc/kernel/exceptions-64s.S 2011-03-27 14:31:47.000000000 -0400
2546+++ linux-2.6.32.42/arch/powerpc/kernel/exceptions-64s.S 2011-04-17 15:56:45.000000000 -0400
2547@@ -818,10 +818,10 @@ handle_page_fault:
2548 11: ld r4,_DAR(r1)
2549 ld r5,_DSISR(r1)
2550 addi r3,r1,STACK_FRAME_OVERHEAD
2551+ bl .save_nvgprs
2552 bl .do_page_fault
2553 cmpdi r3,0
2554 beq+ 13f
2555- bl .save_nvgprs
2556 mr r5,r3
2557 addi r3,r1,STACK_FRAME_OVERHEAD
2558 lwz r4,_DAR(r1)
2559diff -urNp linux-2.6.32.42/arch/powerpc/kernel/ibmebus.c linux-2.6.32.42/arch/powerpc/kernel/ibmebus.c
2560--- linux-2.6.32.42/arch/powerpc/kernel/ibmebus.c 2011-03-27 14:31:47.000000000 -0400
2561+++ linux-2.6.32.42/arch/powerpc/kernel/ibmebus.c 2011-04-17 15:56:45.000000000 -0400
2562@@ -127,7 +127,7 @@ static int ibmebus_dma_supported(struct
2563 return 1;
2564 }
2565
2566-static struct dma_map_ops ibmebus_dma_ops = {
2567+static const struct dma_map_ops ibmebus_dma_ops = {
2568 .alloc_coherent = ibmebus_alloc_coherent,
2569 .free_coherent = ibmebus_free_coherent,
2570 .map_sg = ibmebus_map_sg,
2571diff -urNp linux-2.6.32.42/arch/powerpc/kernel/kgdb.c linux-2.6.32.42/arch/powerpc/kernel/kgdb.c
2572--- linux-2.6.32.42/arch/powerpc/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
2573+++ linux-2.6.32.42/arch/powerpc/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
2574@@ -126,7 +126,7 @@ static int kgdb_handle_breakpoint(struct
2575 if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
2576 return 0;
2577
2578- if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
2579+ if (*(u32 *) (regs->nip) == *(const u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
2580 regs->nip += 4;
2581
2582 return 1;
2583@@ -353,7 +353,7 @@ int kgdb_arch_handle_exception(int vecto
2584 /*
2585 * Global data
2586 */
2587-struct kgdb_arch arch_kgdb_ops = {
2588+const struct kgdb_arch arch_kgdb_ops = {
2589 .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
2590 };
2591
2592diff -urNp linux-2.6.32.42/arch/powerpc/kernel/module_32.c linux-2.6.32.42/arch/powerpc/kernel/module_32.c
2593--- linux-2.6.32.42/arch/powerpc/kernel/module_32.c 2011-03-27 14:31:47.000000000 -0400
2594+++ linux-2.6.32.42/arch/powerpc/kernel/module_32.c 2011-04-17 15:56:45.000000000 -0400
2595@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
2596 me->arch.core_plt_section = i;
2597 }
2598 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2599- printk("Module doesn't contain .plt or .init.plt sections.\n");
2600+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2601 return -ENOEXEC;
2602 }
2603
2604@@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
2605
2606 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2607 /* Init, or core PLT? */
2608- if (location >= mod->module_core
2609- && location < mod->module_core + mod->core_size)
2610+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2611+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2612 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2613- else
2614+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2615+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2616 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2617+ else {
2618+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2619+ return ~0UL;
2620+ }
2621
2622 /* Find this entry, or if that fails, the next avail. entry */
2623 while (entry->jump[0]) {
2624diff -urNp linux-2.6.32.42/arch/powerpc/kernel/module.c linux-2.6.32.42/arch/powerpc/kernel/module.c
2625--- linux-2.6.32.42/arch/powerpc/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
2626+++ linux-2.6.32.42/arch/powerpc/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
2627@@ -31,11 +31,24 @@
2628
2629 LIST_HEAD(module_bug_list);
2630
2631+#ifdef CONFIG_PAX_KERNEXEC
2632 void *module_alloc(unsigned long size)
2633 {
2634 if (size == 0)
2635 return NULL;
2636
2637+ return vmalloc(size);
2638+}
2639+
2640+void *module_alloc_exec(unsigned long size)
2641+#else
2642+void *module_alloc(unsigned long size)
2643+#endif
2644+
2645+{
2646+ if (size == 0)
2647+ return NULL;
2648+
2649 return vmalloc_exec(size);
2650 }
2651
2652@@ -45,6 +58,13 @@ void module_free(struct module *mod, voi
2653 vfree(module_region);
2654 }
2655
2656+#ifdef CONFIG_PAX_KERNEXEC
2657+void module_free_exec(struct module *mod, void *module_region)
2658+{
2659+ module_free(mod, module_region);
2660+}
2661+#endif
2662+
2663 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
2664 const Elf_Shdr *sechdrs,
2665 const char *name)
2666diff -urNp linux-2.6.32.42/arch/powerpc/kernel/pci-common.c linux-2.6.32.42/arch/powerpc/kernel/pci-common.c
2667--- linux-2.6.32.42/arch/powerpc/kernel/pci-common.c 2011-03-27 14:31:47.000000000 -0400
2668+++ linux-2.6.32.42/arch/powerpc/kernel/pci-common.c 2011-04-17 15:56:45.000000000 -0400
2669@@ -50,14 +50,14 @@ resource_size_t isa_mem_base;
2670 unsigned int ppc_pci_flags = 0;
2671
2672
2673-static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
2674+static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
2675
2676-void set_pci_dma_ops(struct dma_map_ops *dma_ops)
2677+void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
2678 {
2679 pci_dma_ops = dma_ops;
2680 }
2681
2682-struct dma_map_ops *get_pci_dma_ops(void)
2683+const struct dma_map_ops *get_pci_dma_ops(void)
2684 {
2685 return pci_dma_ops;
2686 }
2687diff -urNp linux-2.6.32.42/arch/powerpc/kernel/process.c linux-2.6.32.42/arch/powerpc/kernel/process.c
2688--- linux-2.6.32.42/arch/powerpc/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
2689+++ linux-2.6.32.42/arch/powerpc/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
2690@@ -539,8 +539,8 @@ void show_regs(struct pt_regs * regs)
2691 * Lookup NIP late so we have the best change of getting the
2692 * above info out without failing
2693 */
2694- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2695- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2696+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2697+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2698 #endif
2699 show_stack(current, (unsigned long *) regs->gpr[1]);
2700 if (!user_mode(regs))
2701@@ -1034,10 +1034,10 @@ void show_stack(struct task_struct *tsk,
2702 newsp = stack[0];
2703 ip = stack[STACK_FRAME_LR_SAVE];
2704 if (!firstframe || ip != lr) {
2705- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2706+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2707 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2708 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2709- printk(" (%pS)",
2710+ printk(" (%pA)",
2711 (void *)current->ret_stack[curr_frame].ret);
2712 curr_frame--;
2713 }
2714@@ -1057,7 +1057,7 @@ void show_stack(struct task_struct *tsk,
2715 struct pt_regs *regs = (struct pt_regs *)
2716 (sp + STACK_FRAME_OVERHEAD);
2717 lr = regs->link;
2718- printk("--- Exception: %lx at %pS\n LR = %pS\n",
2719+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
2720 regs->trap, (void *)regs->nip, (void *)lr);
2721 firstframe = 1;
2722 }
2723@@ -1134,58 +1134,3 @@ void thread_info_cache_init(void)
2724 }
2725
2726 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2727-
2728-unsigned long arch_align_stack(unsigned long sp)
2729-{
2730- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2731- sp -= get_random_int() & ~PAGE_MASK;
2732- return sp & ~0xf;
2733-}
2734-
2735-static inline unsigned long brk_rnd(void)
2736-{
2737- unsigned long rnd = 0;
2738-
2739- /* 8MB for 32bit, 1GB for 64bit */
2740- if (is_32bit_task())
2741- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2742- else
2743- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2744-
2745- return rnd << PAGE_SHIFT;
2746-}
2747-
2748-unsigned long arch_randomize_brk(struct mm_struct *mm)
2749-{
2750- unsigned long base = mm->brk;
2751- unsigned long ret;
2752-
2753-#ifdef CONFIG_PPC_STD_MMU_64
2754- /*
2755- * If we are using 1TB segments and we are allowed to randomise
2756- * the heap, we can put it above 1TB so it is backed by a 1TB
2757- * segment. Otherwise the heap will be in the bottom 1TB
2758- * which always uses 256MB segments and this may result in a
2759- * performance penalty.
2760- */
2761- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2762- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2763-#endif
2764-
2765- ret = PAGE_ALIGN(base + brk_rnd());
2766-
2767- if (ret < mm->brk)
2768- return mm->brk;
2769-
2770- return ret;
2771-}
2772-
2773-unsigned long randomize_et_dyn(unsigned long base)
2774-{
2775- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2776-
2777- if (ret < base)
2778- return base;
2779-
2780- return ret;
2781-}
2782diff -urNp linux-2.6.32.42/arch/powerpc/kernel/signal_32.c linux-2.6.32.42/arch/powerpc/kernel/signal_32.c
2783--- linux-2.6.32.42/arch/powerpc/kernel/signal_32.c 2011-03-27 14:31:47.000000000 -0400
2784+++ linux-2.6.32.42/arch/powerpc/kernel/signal_32.c 2011-04-17 15:56:45.000000000 -0400
2785@@ -857,7 +857,7 @@ int handle_rt_signal32(unsigned long sig
2786 /* Save user registers on the stack */
2787 frame = &rt_sf->uc.uc_mcontext;
2788 addr = frame;
2789- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2790+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2791 if (save_user_regs(regs, frame, 0, 1))
2792 goto badframe;
2793 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2794diff -urNp linux-2.6.32.42/arch/powerpc/kernel/signal_64.c linux-2.6.32.42/arch/powerpc/kernel/signal_64.c
2795--- linux-2.6.32.42/arch/powerpc/kernel/signal_64.c 2011-03-27 14:31:47.000000000 -0400
2796+++ linux-2.6.32.42/arch/powerpc/kernel/signal_64.c 2011-04-17 15:56:45.000000000 -0400
2797@@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct
2798 current->thread.fpscr.val = 0;
2799
2800 /* Set up to return from userspace. */
2801- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2802+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2803 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2804 } else {
2805 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2806diff -urNp linux-2.6.32.42/arch/powerpc/kernel/sys_ppc32.c linux-2.6.32.42/arch/powerpc/kernel/sys_ppc32.c
2807--- linux-2.6.32.42/arch/powerpc/kernel/sys_ppc32.c 2011-03-27 14:31:47.000000000 -0400
2808+++ linux-2.6.32.42/arch/powerpc/kernel/sys_ppc32.c 2011-04-17 15:56:45.000000000 -0400
2809@@ -563,10 +563,10 @@ asmlinkage long compat_sys_sysctl(struct
2810 if (oldlenp) {
2811 if (!error) {
2812 if (get_user(oldlen, oldlenp) ||
2813- put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)))
2814+ put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)) ||
2815+ copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)))
2816 error = -EFAULT;
2817 }
2818- copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
2819 }
2820 return error;
2821 }
2822diff -urNp linux-2.6.32.42/arch/powerpc/kernel/traps.c linux-2.6.32.42/arch/powerpc/kernel/traps.c
2823--- linux-2.6.32.42/arch/powerpc/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
2824+++ linux-2.6.32.42/arch/powerpc/kernel/traps.c 2011-06-13 21:33:37.000000000 -0400
2825@@ -99,6 +99,8 @@ static void pmac_backlight_unblank(void)
2826 static inline void pmac_backlight_unblank(void) { }
2827 #endif
2828
2829+extern void gr_handle_kernel_exploit(void);
2830+
2831 int die(const char *str, struct pt_regs *regs, long err)
2832 {
2833 static struct {
2834@@ -168,6 +170,8 @@ int die(const char *str, struct pt_regs
2835 if (panic_on_oops)
2836 panic("Fatal exception");
2837
2838+ gr_handle_kernel_exploit();
2839+
2840 oops_exit();
2841 do_exit(err);
2842
2843diff -urNp linux-2.6.32.42/arch/powerpc/kernel/vdso.c linux-2.6.32.42/arch/powerpc/kernel/vdso.c
2844--- linux-2.6.32.42/arch/powerpc/kernel/vdso.c 2011-03-27 14:31:47.000000000 -0400
2845+++ linux-2.6.32.42/arch/powerpc/kernel/vdso.c 2011-04-17 15:56:45.000000000 -0400
2846@@ -36,6 +36,7 @@
2847 #include <asm/firmware.h>
2848 #include <asm/vdso.h>
2849 #include <asm/vdso_datapage.h>
2850+#include <asm/mman.h>
2851
2852 #include "setup.h"
2853
2854@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2855 vdso_base = VDSO32_MBASE;
2856 #endif
2857
2858- current->mm->context.vdso_base = 0;
2859+ current->mm->context.vdso_base = ~0UL;
2860
2861 /* vDSO has a problem and was disabled, just don't "enable" it for the
2862 * process
2863@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2864 vdso_base = get_unmapped_area(NULL, vdso_base,
2865 (vdso_pages << PAGE_SHIFT) +
2866 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2867- 0, 0);
2868+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
2869 if (IS_ERR_VALUE(vdso_base)) {
2870 rc = vdso_base;
2871 goto fail_mmapsem;
2872diff -urNp linux-2.6.32.42/arch/powerpc/kernel/vio.c linux-2.6.32.42/arch/powerpc/kernel/vio.c
2873--- linux-2.6.32.42/arch/powerpc/kernel/vio.c 2011-03-27 14:31:47.000000000 -0400
2874+++ linux-2.6.32.42/arch/powerpc/kernel/vio.c 2011-04-17 15:56:45.000000000 -0400
2875@@ -601,11 +601,12 @@ static void vio_dma_iommu_unmap_sg(struc
2876 vio_cmo_dealloc(viodev, alloc_size);
2877 }
2878
2879-struct dma_map_ops vio_dma_mapping_ops = {
2880+static const struct dma_map_ops vio_dma_mapping_ops = {
2881 .alloc_coherent = vio_dma_iommu_alloc_coherent,
2882 .free_coherent = vio_dma_iommu_free_coherent,
2883 .map_sg = vio_dma_iommu_map_sg,
2884 .unmap_sg = vio_dma_iommu_unmap_sg,
2885+ .dma_supported = dma_iommu_dma_supported,
2886 .map_page = vio_dma_iommu_map_page,
2887 .unmap_page = vio_dma_iommu_unmap_page,
2888
2889@@ -857,7 +858,6 @@ static void vio_cmo_bus_remove(struct vi
2890
2891 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
2892 {
2893- vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
2894 viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
2895 }
2896
2897diff -urNp linux-2.6.32.42/arch/powerpc/lib/usercopy_64.c linux-2.6.32.42/arch/powerpc/lib/usercopy_64.c
2898--- linux-2.6.32.42/arch/powerpc/lib/usercopy_64.c 2011-03-27 14:31:47.000000000 -0400
2899+++ linux-2.6.32.42/arch/powerpc/lib/usercopy_64.c 2011-04-17 15:56:45.000000000 -0400
2900@@ -9,22 +9,6 @@
2901 #include <linux/module.h>
2902 #include <asm/uaccess.h>
2903
2904-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2905-{
2906- if (likely(access_ok(VERIFY_READ, from, n)))
2907- n = __copy_from_user(to, from, n);
2908- else
2909- memset(to, 0, n);
2910- return n;
2911-}
2912-
2913-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2914-{
2915- if (likely(access_ok(VERIFY_WRITE, to, n)))
2916- n = __copy_to_user(to, from, n);
2917- return n;
2918-}
2919-
2920 unsigned long copy_in_user(void __user *to, const void __user *from,
2921 unsigned long n)
2922 {
2923@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2924 return n;
2925 }
2926
2927-EXPORT_SYMBOL(copy_from_user);
2928-EXPORT_SYMBOL(copy_to_user);
2929 EXPORT_SYMBOL(copy_in_user);
2930
2931diff -urNp linux-2.6.32.42/arch/powerpc/mm/fault.c linux-2.6.32.42/arch/powerpc/mm/fault.c
2932--- linux-2.6.32.42/arch/powerpc/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
2933+++ linux-2.6.32.42/arch/powerpc/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
2934@@ -30,6 +30,10 @@
2935 #include <linux/kprobes.h>
2936 #include <linux/kdebug.h>
2937 #include <linux/perf_event.h>
2938+#include <linux/slab.h>
2939+#include <linux/pagemap.h>
2940+#include <linux/compiler.h>
2941+#include <linux/unistd.h>
2942
2943 #include <asm/firmware.h>
2944 #include <asm/page.h>
2945@@ -40,6 +44,7 @@
2946 #include <asm/uaccess.h>
2947 #include <asm/tlbflush.h>
2948 #include <asm/siginfo.h>
2949+#include <asm/ptrace.h>
2950
2951
2952 #ifdef CONFIG_KPROBES
2953@@ -64,6 +69,33 @@ static inline int notify_page_fault(stru
2954 }
2955 #endif
2956
2957+#ifdef CONFIG_PAX_PAGEEXEC
2958+/*
2959+ * PaX: decide what to do with offenders (regs->nip = fault address)
2960+ *
2961+ * returns 1 when task should be killed
2962+ */
2963+static int pax_handle_fetch_fault(struct pt_regs *regs)
2964+{
2965+ return 1;
2966+}
2967+
2968+void pax_report_insns(void *pc, void *sp)
2969+{
2970+ unsigned long i;
2971+
2972+ printk(KERN_ERR "PAX: bytes at PC: ");
2973+ for (i = 0; i < 5; i++) {
2974+ unsigned int c;
2975+ if (get_user(c, (unsigned int __user *)pc+i))
2976+ printk(KERN_CONT "???????? ");
2977+ else
2978+ printk(KERN_CONT "%08x ", c);
2979+ }
2980+ printk("\n");
2981+}
2982+#endif
2983+
2984 /*
2985 * Check whether the instruction at regs->nip is a store using
2986 * an update addressing form which will update r1.
2987@@ -134,7 +166,7 @@ int __kprobes do_page_fault(struct pt_re
2988 * indicate errors in DSISR but can validly be set in SRR1.
2989 */
2990 if (trap == 0x400)
2991- error_code &= 0x48200000;
2992+ error_code &= 0x58200000;
2993 else
2994 is_write = error_code & DSISR_ISSTORE;
2995 #else
2996@@ -250,7 +282,7 @@ good_area:
2997 * "undefined". Of those that can be set, this is the only
2998 * one which seems bad.
2999 */
3000- if (error_code & 0x10000000)
3001+ if (error_code & DSISR_GUARDED)
3002 /* Guarded storage error. */
3003 goto bad_area;
3004 #endif /* CONFIG_8xx */
3005@@ -265,7 +297,7 @@ good_area:
3006 * processors use the same I/D cache coherency mechanism
3007 * as embedded.
3008 */
3009- if (error_code & DSISR_PROTFAULT)
3010+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
3011 goto bad_area;
3012 #endif /* CONFIG_PPC_STD_MMU */
3013
3014@@ -335,6 +367,23 @@ bad_area:
3015 bad_area_nosemaphore:
3016 /* User mode accesses cause a SIGSEGV */
3017 if (user_mode(regs)) {
3018+
3019+#ifdef CONFIG_PAX_PAGEEXEC
3020+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
3021+#ifdef CONFIG_PPC_STD_MMU
3022+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
3023+#else
3024+ if (is_exec && regs->nip == address) {
3025+#endif
3026+ switch (pax_handle_fetch_fault(regs)) {
3027+ }
3028+
3029+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
3030+ do_group_exit(SIGKILL);
3031+ }
3032+ }
3033+#endif
3034+
3035 _exception(SIGSEGV, regs, code, address);
3036 return 0;
3037 }
3038diff -urNp linux-2.6.32.42/arch/powerpc/mm/mmap_64.c linux-2.6.32.42/arch/powerpc/mm/mmap_64.c
3039--- linux-2.6.32.42/arch/powerpc/mm/mmap_64.c 2011-03-27 14:31:47.000000000 -0400
3040+++ linux-2.6.32.42/arch/powerpc/mm/mmap_64.c 2011-04-17 15:56:45.000000000 -0400
3041@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
3042 */
3043 if (mmap_is_legacy()) {
3044 mm->mmap_base = TASK_UNMAPPED_BASE;
3045+
3046+#ifdef CONFIG_PAX_RANDMMAP
3047+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3048+ mm->mmap_base += mm->delta_mmap;
3049+#endif
3050+
3051 mm->get_unmapped_area = arch_get_unmapped_area;
3052 mm->unmap_area = arch_unmap_area;
3053 } else {
3054 mm->mmap_base = mmap_base();
3055+
3056+#ifdef CONFIG_PAX_RANDMMAP
3057+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3058+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3059+#endif
3060+
3061 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3062 mm->unmap_area = arch_unmap_area_topdown;
3063 }
3064diff -urNp linux-2.6.32.42/arch/powerpc/mm/slice.c linux-2.6.32.42/arch/powerpc/mm/slice.c
3065--- linux-2.6.32.42/arch/powerpc/mm/slice.c 2011-03-27 14:31:47.000000000 -0400
3066+++ linux-2.6.32.42/arch/powerpc/mm/slice.c 2011-04-17 15:56:45.000000000 -0400
3067@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
3068 if ((mm->task_size - len) < addr)
3069 return 0;
3070 vma = find_vma(mm, addr);
3071- return (!vma || (addr + len) <= vma->vm_start);
3072+ return check_heap_stack_gap(vma, addr, len);
3073 }
3074
3075 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
3076@@ -256,7 +256,7 @@ full_search:
3077 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
3078 continue;
3079 }
3080- if (!vma || addr + len <= vma->vm_start) {
3081+ if (check_heap_stack_gap(vma, addr, len)) {
3082 /*
3083 * Remember the place where we stopped the search:
3084 */
3085@@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
3086 }
3087 }
3088
3089- addr = mm->mmap_base;
3090- while (addr > len) {
3091+ if (mm->mmap_base < len)
3092+ addr = -ENOMEM;
3093+ else
3094+ addr = mm->mmap_base - len;
3095+
3096+ while (!IS_ERR_VALUE(addr)) {
3097 /* Go down by chunk size */
3098- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
3099+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
3100
3101 /* Check for hit with different page size */
3102 mask = slice_range_to_mask(addr, len);
3103@@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
3104 * return with success:
3105 */
3106 vma = find_vma(mm, addr);
3107- if (!vma || (addr + len) <= vma->vm_start) {
3108+ if (check_heap_stack_gap(vma, addr, len)) {
3109 /* remember the address as a hint for next time */
3110 if (use_cache)
3111 mm->free_area_cache = addr;
3112@@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
3113 mm->cached_hole_size = vma->vm_start - addr;
3114
3115 /* try just below the current vma->vm_start */
3116- addr = vma->vm_start;
3117+ addr = skip_heap_stack_gap(vma, len);
3118 }
3119
3120 /*
3121@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
3122 if (fixed && addr > (mm->task_size - len))
3123 return -EINVAL;
3124
3125+#ifdef CONFIG_PAX_RANDMMAP
3126+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
3127+ addr = 0;
3128+#endif
3129+
3130 /* If hint, make sure it matches our alignment restrictions */
3131 if (!fixed && addr) {
3132 addr = _ALIGN_UP(addr, 1ul << pshift);
3133diff -urNp linux-2.6.32.42/arch/powerpc/platforms/52xx/lite5200_pm.c linux-2.6.32.42/arch/powerpc/platforms/52xx/lite5200_pm.c
3134--- linux-2.6.32.42/arch/powerpc/platforms/52xx/lite5200_pm.c 2011-03-27 14:31:47.000000000 -0400
3135+++ linux-2.6.32.42/arch/powerpc/platforms/52xx/lite5200_pm.c 2011-04-17 15:56:45.000000000 -0400
3136@@ -235,7 +235,7 @@ static void lite5200_pm_end(void)
3137 lite5200_pm_target_state = PM_SUSPEND_ON;
3138 }
3139
3140-static struct platform_suspend_ops lite5200_pm_ops = {
3141+static const struct platform_suspend_ops lite5200_pm_ops = {
3142 .valid = lite5200_pm_valid,
3143 .begin = lite5200_pm_begin,
3144 .prepare = lite5200_pm_prepare,
3145diff -urNp linux-2.6.32.42/arch/powerpc/platforms/52xx/mpc52xx_pm.c linux-2.6.32.42/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3146--- linux-2.6.32.42/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2011-03-27 14:31:47.000000000 -0400
3147+++ linux-2.6.32.42/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2011-04-17 15:56:45.000000000 -0400
3148@@ -180,7 +180,7 @@ void mpc52xx_pm_finish(void)
3149 iounmap(mbar);
3150 }
3151
3152-static struct platform_suspend_ops mpc52xx_pm_ops = {
3153+static const struct platform_suspend_ops mpc52xx_pm_ops = {
3154 .valid = mpc52xx_pm_valid,
3155 .prepare = mpc52xx_pm_prepare,
3156 .enter = mpc52xx_pm_enter,
3157diff -urNp linux-2.6.32.42/arch/powerpc/platforms/83xx/suspend.c linux-2.6.32.42/arch/powerpc/platforms/83xx/suspend.c
3158--- linux-2.6.32.42/arch/powerpc/platforms/83xx/suspend.c 2011-03-27 14:31:47.000000000 -0400
3159+++ linux-2.6.32.42/arch/powerpc/platforms/83xx/suspend.c 2011-04-17 15:56:45.000000000 -0400
3160@@ -273,7 +273,7 @@ static int mpc83xx_is_pci_agent(void)
3161 return ret;
3162 }
3163
3164-static struct platform_suspend_ops mpc83xx_suspend_ops = {
3165+static const struct platform_suspend_ops mpc83xx_suspend_ops = {
3166 .valid = mpc83xx_suspend_valid,
3167 .begin = mpc83xx_suspend_begin,
3168 .enter = mpc83xx_suspend_enter,
3169diff -urNp linux-2.6.32.42/arch/powerpc/platforms/cell/iommu.c linux-2.6.32.42/arch/powerpc/platforms/cell/iommu.c
3170--- linux-2.6.32.42/arch/powerpc/platforms/cell/iommu.c 2011-03-27 14:31:47.000000000 -0400
3171+++ linux-2.6.32.42/arch/powerpc/platforms/cell/iommu.c 2011-04-17 15:56:45.000000000 -0400
3172@@ -642,7 +642,7 @@ static int dma_fixed_dma_supported(struc
3173
3174 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
3175
3176-struct dma_map_ops dma_iommu_fixed_ops = {
3177+const struct dma_map_ops dma_iommu_fixed_ops = {
3178 .alloc_coherent = dma_fixed_alloc_coherent,
3179 .free_coherent = dma_fixed_free_coherent,
3180 .map_sg = dma_fixed_map_sg,
3181diff -urNp linux-2.6.32.42/arch/powerpc/platforms/ps3/system-bus.c linux-2.6.32.42/arch/powerpc/platforms/ps3/system-bus.c
3182--- linux-2.6.32.42/arch/powerpc/platforms/ps3/system-bus.c 2011-03-27 14:31:47.000000000 -0400
3183+++ linux-2.6.32.42/arch/powerpc/platforms/ps3/system-bus.c 2011-04-17 15:56:45.000000000 -0400
3184@@ -694,7 +694,7 @@ static int ps3_dma_supported(struct devi
3185 return mask >= DMA_BIT_MASK(32);
3186 }
3187
3188-static struct dma_map_ops ps3_sb_dma_ops = {
3189+static const struct dma_map_ops ps3_sb_dma_ops = {
3190 .alloc_coherent = ps3_alloc_coherent,
3191 .free_coherent = ps3_free_coherent,
3192 .map_sg = ps3_sb_map_sg,
3193@@ -704,7 +704,7 @@ static struct dma_map_ops ps3_sb_dma_ops
3194 .unmap_page = ps3_unmap_page,
3195 };
3196
3197-static struct dma_map_ops ps3_ioc0_dma_ops = {
3198+static const struct dma_map_ops ps3_ioc0_dma_ops = {
3199 .alloc_coherent = ps3_alloc_coherent,
3200 .free_coherent = ps3_free_coherent,
3201 .map_sg = ps3_ioc0_map_sg,
3202diff -urNp linux-2.6.32.42/arch/powerpc/platforms/pseries/Kconfig linux-2.6.32.42/arch/powerpc/platforms/pseries/Kconfig
3203--- linux-2.6.32.42/arch/powerpc/platforms/pseries/Kconfig 2011-03-27 14:31:47.000000000 -0400
3204+++ linux-2.6.32.42/arch/powerpc/platforms/pseries/Kconfig 2011-04-17 15:56:45.000000000 -0400
3205@@ -2,6 +2,8 @@ config PPC_PSERIES
3206 depends on PPC64 && PPC_BOOK3S
3207 bool "IBM pSeries & new (POWER5-based) iSeries"
3208 select MPIC
3209+ select PCI_MSI
3210+ select XICS
3211 select PPC_I8259
3212 select PPC_RTAS
3213 select RTAS_ERROR_LOGGING
3214diff -urNp linux-2.6.32.42/arch/s390/include/asm/elf.h linux-2.6.32.42/arch/s390/include/asm/elf.h
3215--- linux-2.6.32.42/arch/s390/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
3216+++ linux-2.6.32.42/arch/s390/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
3217@@ -164,6 +164,13 @@ extern unsigned int vdso_enabled;
3218 that it will "exec", and that there is sufficient room for the brk. */
3219 #define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
3220
3221+#ifdef CONFIG_PAX_ASLR
3222+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
3223+
3224+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3225+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3226+#endif
3227+
3228 /* This yields a mask that user programs can use to figure out what
3229 instruction set this CPU supports. */
3230
3231diff -urNp linux-2.6.32.42/arch/s390/include/asm/setup.h linux-2.6.32.42/arch/s390/include/asm/setup.h
3232--- linux-2.6.32.42/arch/s390/include/asm/setup.h 2011-03-27 14:31:47.000000000 -0400
3233+++ linux-2.6.32.42/arch/s390/include/asm/setup.h 2011-04-17 15:56:45.000000000 -0400
3234@@ -50,13 +50,13 @@ extern unsigned long memory_end;
3235 void detect_memory_layout(struct mem_chunk chunk[]);
3236
3237 #ifdef CONFIG_S390_SWITCH_AMODE
3238-extern unsigned int switch_amode;
3239+#define switch_amode (1)
3240 #else
3241 #define switch_amode (0)
3242 #endif
3243
3244 #ifdef CONFIG_S390_EXEC_PROTECT
3245-extern unsigned int s390_noexec;
3246+#define s390_noexec (1)
3247 #else
3248 #define s390_noexec (0)
3249 #endif
3250diff -urNp linux-2.6.32.42/arch/s390/include/asm/uaccess.h linux-2.6.32.42/arch/s390/include/asm/uaccess.h
3251--- linux-2.6.32.42/arch/s390/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
3252+++ linux-2.6.32.42/arch/s390/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
3253@@ -232,6 +232,10 @@ static inline unsigned long __must_check
3254 copy_to_user(void __user *to, const void *from, unsigned long n)
3255 {
3256 might_fault();
3257+
3258+ if ((long)n < 0)
3259+ return n;
3260+
3261 if (access_ok(VERIFY_WRITE, to, n))
3262 n = __copy_to_user(to, from, n);
3263 return n;
3264@@ -257,6 +261,9 @@ copy_to_user(void __user *to, const void
3265 static inline unsigned long __must_check
3266 __copy_from_user(void *to, const void __user *from, unsigned long n)
3267 {
3268+ if ((long)n < 0)
3269+ return n;
3270+
3271 if (__builtin_constant_p(n) && (n <= 256))
3272 return uaccess.copy_from_user_small(n, from, to);
3273 else
3274@@ -283,6 +290,10 @@ static inline unsigned long __must_check
3275 copy_from_user(void *to, const void __user *from, unsigned long n)
3276 {
3277 might_fault();
3278+
3279+ if ((long)n < 0)
3280+ return n;
3281+
3282 if (access_ok(VERIFY_READ, from, n))
3283 n = __copy_from_user(to, from, n);
3284 else
3285diff -urNp linux-2.6.32.42/arch/s390/Kconfig linux-2.6.32.42/arch/s390/Kconfig
3286--- linux-2.6.32.42/arch/s390/Kconfig 2011-03-27 14:31:47.000000000 -0400
3287+++ linux-2.6.32.42/arch/s390/Kconfig 2011-04-17 15:56:45.000000000 -0400
3288@@ -194,28 +194,26 @@ config AUDIT_ARCH
3289
3290 config S390_SWITCH_AMODE
3291 bool "Switch kernel/user addressing modes"
3292+ default y
3293 help
3294 This option allows to switch the addressing modes of kernel and user
3295- space. The kernel parameter switch_amode=on will enable this feature,
3296- default is disabled. Enabling this (via kernel parameter) on machines
3297- earlier than IBM System z9-109 EC/BC will reduce system performance.
3298+ space. Enabling this on machines earlier than IBM System z9-109 EC/BC
3299+ will reduce system performance.
3300
3301 Note that this option will also be selected by selecting the execute
3302- protection option below. Enabling the execute protection via the
3303- noexec kernel parameter will also switch the addressing modes,
3304- independent of the switch_amode kernel parameter.
3305+ protection option below. Enabling the execute protection will also
3306+ switch the addressing modes, independent of this option.
3307
3308
3309 config S390_EXEC_PROTECT
3310 bool "Data execute protection"
3311+ default y
3312 select S390_SWITCH_AMODE
3313 help
3314 This option allows to enable a buffer overflow protection for user
3315 space programs and it also selects the addressing mode option above.
3316- The kernel parameter noexec=on will enable this feature and also
3317- switch the addressing modes, default is disabled. Enabling this (via
3318- kernel parameter) on machines earlier than IBM System z9-109 EC/BC
3319- will reduce system performance.
3320+ Enabling this on machines earlier than IBM System z9-109 EC/BC will
3321+ reduce system performance.
3322
3323 comment "Code generation options"
3324
3325diff -urNp linux-2.6.32.42/arch/s390/kernel/module.c linux-2.6.32.42/arch/s390/kernel/module.c
3326--- linux-2.6.32.42/arch/s390/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
3327+++ linux-2.6.32.42/arch/s390/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
3328@@ -166,11 +166,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
3329
3330 /* Increase core size by size of got & plt and set start
3331 offsets for got and plt. */
3332- me->core_size = ALIGN(me->core_size, 4);
3333- me->arch.got_offset = me->core_size;
3334- me->core_size += me->arch.got_size;
3335- me->arch.plt_offset = me->core_size;
3336- me->core_size += me->arch.plt_size;
3337+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
3338+ me->arch.got_offset = me->core_size_rw;
3339+ me->core_size_rw += me->arch.got_size;
3340+ me->arch.plt_offset = me->core_size_rx;
3341+ me->core_size_rx += me->arch.plt_size;
3342 return 0;
3343 }
3344
3345@@ -256,7 +256,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3346 if (info->got_initialized == 0) {
3347 Elf_Addr *gotent;
3348
3349- gotent = me->module_core + me->arch.got_offset +
3350+ gotent = me->module_core_rw + me->arch.got_offset +
3351 info->got_offset;
3352 *gotent = val;
3353 info->got_initialized = 1;
3354@@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3355 else if (r_type == R_390_GOTENT ||
3356 r_type == R_390_GOTPLTENT)
3357 *(unsigned int *) loc =
3358- (val + (Elf_Addr) me->module_core - loc) >> 1;
3359+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
3360 else if (r_type == R_390_GOT64 ||
3361 r_type == R_390_GOTPLT64)
3362 *(unsigned long *) loc = val;
3363@@ -294,7 +294,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3364 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
3365 if (info->plt_initialized == 0) {
3366 unsigned int *ip;
3367- ip = me->module_core + me->arch.plt_offset +
3368+ ip = me->module_core_rx + me->arch.plt_offset +
3369 info->plt_offset;
3370 #ifndef CONFIG_64BIT
3371 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
3372@@ -319,7 +319,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3373 val - loc + 0xffffUL < 0x1ffffeUL) ||
3374 (r_type == R_390_PLT32DBL &&
3375 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
3376- val = (Elf_Addr) me->module_core +
3377+ val = (Elf_Addr) me->module_core_rx +
3378 me->arch.plt_offset +
3379 info->plt_offset;
3380 val += rela->r_addend - loc;
3381@@ -341,7 +341,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3382 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
3383 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
3384 val = val + rela->r_addend -
3385- ((Elf_Addr) me->module_core + me->arch.got_offset);
3386+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
3387 if (r_type == R_390_GOTOFF16)
3388 *(unsigned short *) loc = val;
3389 else if (r_type == R_390_GOTOFF32)
3390@@ -351,7 +351,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3391 break;
3392 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
3393 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
3394- val = (Elf_Addr) me->module_core + me->arch.got_offset +
3395+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
3396 rela->r_addend - loc;
3397 if (r_type == R_390_GOTPC)
3398 *(unsigned int *) loc = val;
3399diff -urNp linux-2.6.32.42/arch/s390/kernel/setup.c linux-2.6.32.42/arch/s390/kernel/setup.c
3400--- linux-2.6.32.42/arch/s390/kernel/setup.c 2011-03-27 14:31:47.000000000 -0400
3401+++ linux-2.6.32.42/arch/s390/kernel/setup.c 2011-04-17 15:56:45.000000000 -0400
3402@@ -306,9 +306,6 @@ static int __init early_parse_mem(char *
3403 early_param("mem", early_parse_mem);
3404
3405 #ifdef CONFIG_S390_SWITCH_AMODE
3406-unsigned int switch_amode = 0;
3407-EXPORT_SYMBOL_GPL(switch_amode);
3408-
3409 static int set_amode_and_uaccess(unsigned long user_amode,
3410 unsigned long user32_amode)
3411 {
3412@@ -334,17 +331,6 @@ static int set_amode_and_uaccess(unsigne
3413 return 0;
3414 }
3415 }
3416-
3417-/*
3418- * Switch kernel/user addressing modes?
3419- */
3420-static int __init early_parse_switch_amode(char *p)
3421-{
3422- switch_amode = 1;
3423- return 0;
3424-}
3425-early_param("switch_amode", early_parse_switch_amode);
3426-
3427 #else /* CONFIG_S390_SWITCH_AMODE */
3428 static inline int set_amode_and_uaccess(unsigned long user_amode,
3429 unsigned long user32_amode)
3430@@ -353,24 +339,6 @@ static inline int set_amode_and_uaccess(
3431 }
3432 #endif /* CONFIG_S390_SWITCH_AMODE */
3433
3434-#ifdef CONFIG_S390_EXEC_PROTECT
3435-unsigned int s390_noexec = 0;
3436-EXPORT_SYMBOL_GPL(s390_noexec);
3437-
3438-/*
3439- * Enable execute protection?
3440- */
3441-static int __init early_parse_noexec(char *p)
3442-{
3443- if (!strncmp(p, "off", 3))
3444- return 0;
3445- switch_amode = 1;
3446- s390_noexec = 1;
3447- return 0;
3448-}
3449-early_param("noexec", early_parse_noexec);
3450-#endif /* CONFIG_S390_EXEC_PROTECT */
3451-
3452 static void setup_addressing_mode(void)
3453 {
3454 if (s390_noexec) {
3455diff -urNp linux-2.6.32.42/arch/s390/mm/mmap.c linux-2.6.32.42/arch/s390/mm/mmap.c
3456--- linux-2.6.32.42/arch/s390/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
3457+++ linux-2.6.32.42/arch/s390/mm/mmap.c 2011-04-17 15:56:45.000000000 -0400
3458@@ -78,10 +78,22 @@ void arch_pick_mmap_layout(struct mm_str
3459 */
3460 if (mmap_is_legacy()) {
3461 mm->mmap_base = TASK_UNMAPPED_BASE;
3462+
3463+#ifdef CONFIG_PAX_RANDMMAP
3464+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3465+ mm->mmap_base += mm->delta_mmap;
3466+#endif
3467+
3468 mm->get_unmapped_area = arch_get_unmapped_area;
3469 mm->unmap_area = arch_unmap_area;
3470 } else {
3471 mm->mmap_base = mmap_base();
3472+
3473+#ifdef CONFIG_PAX_RANDMMAP
3474+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3475+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3476+#endif
3477+
3478 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3479 mm->unmap_area = arch_unmap_area_topdown;
3480 }
3481@@ -153,10 +165,22 @@ void arch_pick_mmap_layout(struct mm_str
3482 */
3483 if (mmap_is_legacy()) {
3484 mm->mmap_base = TASK_UNMAPPED_BASE;
3485+
3486+#ifdef CONFIG_PAX_RANDMMAP
3487+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3488+ mm->mmap_base += mm->delta_mmap;
3489+#endif
3490+
3491 mm->get_unmapped_area = s390_get_unmapped_area;
3492 mm->unmap_area = arch_unmap_area;
3493 } else {
3494 mm->mmap_base = mmap_base();
3495+
3496+#ifdef CONFIG_PAX_RANDMMAP
3497+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3498+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3499+#endif
3500+
3501 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
3502 mm->unmap_area = arch_unmap_area_topdown;
3503 }
3504diff -urNp linux-2.6.32.42/arch/score/include/asm/system.h linux-2.6.32.42/arch/score/include/asm/system.h
3505--- linux-2.6.32.42/arch/score/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
3506+++ linux-2.6.32.42/arch/score/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
3507@@ -17,7 +17,7 @@ do { \
3508 #define finish_arch_switch(prev) do {} while (0)
3509
3510 typedef void (*vi_handler_t)(void);
3511-extern unsigned long arch_align_stack(unsigned long sp);
3512+#define arch_align_stack(x) (x)
3513
3514 #define mb() barrier()
3515 #define rmb() barrier()
3516diff -urNp linux-2.6.32.42/arch/score/kernel/process.c linux-2.6.32.42/arch/score/kernel/process.c
3517--- linux-2.6.32.42/arch/score/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
3518+++ linux-2.6.32.42/arch/score/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
3519@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
3520
3521 return task_pt_regs(task)->cp0_epc;
3522 }
3523-
3524-unsigned long arch_align_stack(unsigned long sp)
3525-{
3526- return sp;
3527-}
3528diff -urNp linux-2.6.32.42/arch/sh/boards/mach-hp6xx/pm.c linux-2.6.32.42/arch/sh/boards/mach-hp6xx/pm.c
3529--- linux-2.6.32.42/arch/sh/boards/mach-hp6xx/pm.c 2011-03-27 14:31:47.000000000 -0400
3530+++ linux-2.6.32.42/arch/sh/boards/mach-hp6xx/pm.c 2011-04-17 15:56:45.000000000 -0400
3531@@ -143,7 +143,7 @@ static int hp6x0_pm_enter(suspend_state_
3532 return 0;
3533 }
3534
3535-static struct platform_suspend_ops hp6x0_pm_ops = {
3536+static const struct platform_suspend_ops hp6x0_pm_ops = {
3537 .enter = hp6x0_pm_enter,
3538 .valid = suspend_valid_only_mem,
3539 };
3540diff -urNp linux-2.6.32.42/arch/sh/kernel/cpu/sh4/sq.c linux-2.6.32.42/arch/sh/kernel/cpu/sh4/sq.c
3541--- linux-2.6.32.42/arch/sh/kernel/cpu/sh4/sq.c 2011-03-27 14:31:47.000000000 -0400
3542+++ linux-2.6.32.42/arch/sh/kernel/cpu/sh4/sq.c 2011-04-17 15:56:46.000000000 -0400
3543@@ -327,7 +327,7 @@ static struct attribute *sq_sysfs_attrs[
3544 NULL,
3545 };
3546
3547-static struct sysfs_ops sq_sysfs_ops = {
3548+static const struct sysfs_ops sq_sysfs_ops = {
3549 .show = sq_sysfs_show,
3550 .store = sq_sysfs_store,
3551 };
3552diff -urNp linux-2.6.32.42/arch/sh/kernel/cpu/shmobile/pm.c linux-2.6.32.42/arch/sh/kernel/cpu/shmobile/pm.c
3553--- linux-2.6.32.42/arch/sh/kernel/cpu/shmobile/pm.c 2011-03-27 14:31:47.000000000 -0400
3554+++ linux-2.6.32.42/arch/sh/kernel/cpu/shmobile/pm.c 2011-04-17 15:56:46.000000000 -0400
3555@@ -58,7 +58,7 @@ static int sh_pm_enter(suspend_state_t s
3556 return 0;
3557 }
3558
3559-static struct platform_suspend_ops sh_pm_ops = {
3560+static const struct platform_suspend_ops sh_pm_ops = {
3561 .enter = sh_pm_enter,
3562 .valid = suspend_valid_only_mem,
3563 };
3564diff -urNp linux-2.6.32.42/arch/sh/kernel/kgdb.c linux-2.6.32.42/arch/sh/kernel/kgdb.c
3565--- linux-2.6.32.42/arch/sh/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
3566+++ linux-2.6.32.42/arch/sh/kernel/kgdb.c 2011-04-17 15:56:46.000000000 -0400
3567@@ -271,7 +271,7 @@ void kgdb_arch_exit(void)
3568 {
3569 }
3570
3571-struct kgdb_arch arch_kgdb_ops = {
3572+const struct kgdb_arch arch_kgdb_ops = {
3573 /* Breakpoint instruction: trapa #0x3c */
3574 #ifdef CONFIG_CPU_LITTLE_ENDIAN
3575 .gdb_bpt_instr = { 0x3c, 0xc3 },
3576diff -urNp linux-2.6.32.42/arch/sh/mm/mmap.c linux-2.6.32.42/arch/sh/mm/mmap.c
3577--- linux-2.6.32.42/arch/sh/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
3578+++ linux-2.6.32.42/arch/sh/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
3579@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
3580 addr = PAGE_ALIGN(addr);
3581
3582 vma = find_vma(mm, addr);
3583- if (TASK_SIZE - len >= addr &&
3584- (!vma || addr + len <= vma->vm_start))
3585+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3586 return addr;
3587 }
3588
3589@@ -106,7 +105,7 @@ full_search:
3590 }
3591 return -ENOMEM;
3592 }
3593- if (likely(!vma || addr + len <= vma->vm_start)) {
3594+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3595 /*
3596 * Remember the place where we stopped the search:
3597 */
3598@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
3599 addr = PAGE_ALIGN(addr);
3600
3601 vma = find_vma(mm, addr);
3602- if (TASK_SIZE - len >= addr &&
3603- (!vma || addr + len <= vma->vm_start))
3604+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3605 return addr;
3606 }
3607
3608@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
3609 /* make sure it can fit in the remaining address space */
3610 if (likely(addr > len)) {
3611 vma = find_vma(mm, addr-len);
3612- if (!vma || addr <= vma->vm_start) {
3613+ if (check_heap_stack_gap(vma, addr - len, len)) {
3614 /* remember the address as a hint for next time */
3615 return (mm->free_area_cache = addr-len);
3616 }
3617@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
3618 if (unlikely(mm->mmap_base < len))
3619 goto bottomup;
3620
3621- addr = mm->mmap_base-len;
3622- if (do_colour_align)
3623- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3624+ addr = mm->mmap_base - len;
3625
3626 do {
3627+ if (do_colour_align)
3628+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3629 /*
3630 * Lookup failure means no vma is above this address,
3631 * else if new region fits below vma->vm_start,
3632 * return with success:
3633 */
3634 vma = find_vma(mm, addr);
3635- if (likely(!vma || addr+len <= vma->vm_start)) {
3636+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3637 /* remember the address as a hint for next time */
3638 return (mm->free_area_cache = addr);
3639 }
3640@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
3641 mm->cached_hole_size = vma->vm_start - addr;
3642
3643 /* try just below the current vma->vm_start */
3644- addr = vma->vm_start-len;
3645- if (do_colour_align)
3646- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3647- } while (likely(len < vma->vm_start));
3648+ addr = skip_heap_stack_gap(vma, len);
3649+ } while (!IS_ERR_VALUE(addr));
3650
3651 bottomup:
3652 /*
3653diff -urNp linux-2.6.32.42/arch/sparc/include/asm/atomic_64.h linux-2.6.32.42/arch/sparc/include/asm/atomic_64.h
3654--- linux-2.6.32.42/arch/sparc/include/asm/atomic_64.h 2011-03-27 14:31:47.000000000 -0400
3655+++ linux-2.6.32.42/arch/sparc/include/asm/atomic_64.h 2011-05-04 17:56:20.000000000 -0400
3656@@ -14,18 +14,40 @@
3657 #define ATOMIC64_INIT(i) { (i) }
3658
3659 #define atomic_read(v) ((v)->counter)
3660+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
3661+{
3662+ return v->counter;
3663+}
3664 #define atomic64_read(v) ((v)->counter)
3665+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
3666+{
3667+ return v->counter;
3668+}
3669
3670 #define atomic_set(v, i) (((v)->counter) = i)
3671+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
3672+{
3673+ v->counter = i;
3674+}
3675 #define atomic64_set(v, i) (((v)->counter) = i)
3676+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
3677+{
3678+ v->counter = i;
3679+}
3680
3681 extern void atomic_add(int, atomic_t *);
3682+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
3683 extern void atomic64_add(long, atomic64_t *);
3684+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
3685 extern void atomic_sub(int, atomic_t *);
3686+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
3687 extern void atomic64_sub(long, atomic64_t *);
3688+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
3689
3690 extern int atomic_add_ret(int, atomic_t *);
3691+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
3692 extern long atomic64_add_ret(long, atomic64_t *);
3693+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
3694 extern int atomic_sub_ret(int, atomic_t *);
3695 extern long atomic64_sub_ret(long, atomic64_t *);
3696
3697@@ -33,7 +55,15 @@ extern long atomic64_sub_ret(long, atomi
3698 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
3699
3700 #define atomic_inc_return(v) atomic_add_ret(1, v)
3701+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
3702+{
3703+ return atomic_add_ret_unchecked(1, v);
3704+}
3705 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
3706+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
3707+{
3708+ return atomic64_add_ret_unchecked(1, v);
3709+}
3710
3711 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
3712 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
3713@@ -50,6 +80,7 @@ extern long atomic64_sub_ret(long, atomi
3714 * other cases.
3715 */
3716 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
3717+#define atomic_inc_and_test_unchecked(v) (atomic_inc_return_unchecked(v) == 0)
3718 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
3719
3720 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
3721@@ -59,30 +90,59 @@ extern long atomic64_sub_ret(long, atomi
3722 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
3723
3724 #define atomic_inc(v) atomic_add(1, v)
3725+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
3726+{
3727+ atomic_add_unchecked(1, v);
3728+}
3729 #define atomic64_inc(v) atomic64_add(1, v)
3730+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
3731+{
3732+ atomic64_add_unchecked(1, v);
3733+}
3734
3735 #define atomic_dec(v) atomic_sub(1, v)
3736+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
3737+{
3738+ atomic_sub_unchecked(1, v);
3739+}
3740 #define atomic64_dec(v) atomic64_sub(1, v)
3741+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
3742+{
3743+ atomic64_sub_unchecked(1, v);
3744+}
3745
3746 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
3747 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
3748
3749 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3750+#define atomic_cmpxchg_unchecked(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3751 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
3752+#define atomic_xchg_unchecked(v, new) (xchg(&((v)->counter), new))
3753
3754 static inline int atomic_add_unless(atomic_t *v, int a, int u)
3755 {
3756- int c, old;
3757+ int c, old, new;
3758 c = atomic_read(v);
3759 for (;;) {
3760- if (unlikely(c == (u)))
3761+ if (unlikely(c == u))
3762 break;
3763- old = atomic_cmpxchg((v), c, c + (a));
3764+
3765+ asm volatile("addcc %2, %0, %0\n"
3766+
3767+#ifdef CONFIG_PAX_REFCOUNT
3768+ "tvs %%icc, 6\n"
3769+#endif
3770+
3771+ : "=r" (new)
3772+ : "0" (c), "ir" (a)
3773+ : "cc");
3774+
3775+ old = atomic_cmpxchg(v, c, new);
3776 if (likely(old == c))
3777 break;
3778 c = old;
3779 }
3780- return c != (u);
3781+ return c != u;
3782 }
3783
3784 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
3785@@ -93,17 +153,28 @@ static inline int atomic_add_unless(atom
3786
3787 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
3788 {
3789- long c, old;
3790+ long c, old, new;
3791 c = atomic64_read(v);
3792 for (;;) {
3793- if (unlikely(c == (u)))
3794+ if (unlikely(c == u))
3795 break;
3796- old = atomic64_cmpxchg((v), c, c + (a));
3797+
3798+ asm volatile("addcc %2, %0, %0\n"
3799+
3800+#ifdef CONFIG_PAX_REFCOUNT
3801+ "tvs %%xcc, 6\n"
3802+#endif
3803+
3804+ : "=r" (new)
3805+ : "0" (c), "ir" (a)
3806+ : "cc");
3807+
3808+ old = atomic64_cmpxchg(v, c, new);
3809 if (likely(old == c))
3810 break;
3811 c = old;
3812 }
3813- return c != (u);
3814+ return c != u;
3815 }
3816
3817 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3818diff -urNp linux-2.6.32.42/arch/sparc/include/asm/cache.h linux-2.6.32.42/arch/sparc/include/asm/cache.h
3819--- linux-2.6.32.42/arch/sparc/include/asm/cache.h 2011-03-27 14:31:47.000000000 -0400
3820+++ linux-2.6.32.42/arch/sparc/include/asm/cache.h 2011-07-06 19:53:33.000000000 -0400
3821@@ -8,7 +8,7 @@
3822 #define _SPARC_CACHE_H
3823
3824 #define L1_CACHE_SHIFT 5
3825-#define L1_CACHE_BYTES 32
3826+#define L1_CACHE_BYTES 32UL
3827 #define L1_CACHE_ALIGN(x) ((((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)))
3828
3829 #ifdef CONFIG_SPARC32
3830diff -urNp linux-2.6.32.42/arch/sparc/include/asm/dma-mapping.h linux-2.6.32.42/arch/sparc/include/asm/dma-mapping.h
3831--- linux-2.6.32.42/arch/sparc/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
3832+++ linux-2.6.32.42/arch/sparc/include/asm/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
3833@@ -14,10 +14,10 @@ extern int dma_set_mask(struct device *d
3834 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
3835 #define dma_is_consistent(d, h) (1)
3836
3837-extern struct dma_map_ops *dma_ops, pci32_dma_ops;
3838+extern const struct dma_map_ops *dma_ops, pci32_dma_ops;
3839 extern struct bus_type pci_bus_type;
3840
3841-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
3842+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
3843 {
3844 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
3845 if (dev->bus == &pci_bus_type)
3846@@ -31,7 +31,7 @@ static inline struct dma_map_ops *get_dm
3847 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
3848 dma_addr_t *dma_handle, gfp_t flag)
3849 {
3850- struct dma_map_ops *ops = get_dma_ops(dev);
3851+ const struct dma_map_ops *ops = get_dma_ops(dev);
3852 void *cpu_addr;
3853
3854 cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
3855@@ -42,7 +42,7 @@ static inline void *dma_alloc_coherent(s
3856 static inline void dma_free_coherent(struct device *dev, size_t size,
3857 void *cpu_addr, dma_addr_t dma_handle)
3858 {
3859- struct dma_map_ops *ops = get_dma_ops(dev);
3860+ const struct dma_map_ops *ops = get_dma_ops(dev);
3861
3862 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
3863 ops->free_coherent(dev, size, cpu_addr, dma_handle);
3864diff -urNp linux-2.6.32.42/arch/sparc/include/asm/elf_32.h linux-2.6.32.42/arch/sparc/include/asm/elf_32.h
3865--- linux-2.6.32.42/arch/sparc/include/asm/elf_32.h 2011-03-27 14:31:47.000000000 -0400
3866+++ linux-2.6.32.42/arch/sparc/include/asm/elf_32.h 2011-04-17 15:56:46.000000000 -0400
3867@@ -116,6 +116,13 @@ typedef struct {
3868
3869 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3870
3871+#ifdef CONFIG_PAX_ASLR
3872+#define PAX_ELF_ET_DYN_BASE 0x10000UL
3873+
3874+#define PAX_DELTA_MMAP_LEN 16
3875+#define PAX_DELTA_STACK_LEN 16
3876+#endif
3877+
3878 /* This yields a mask that user programs can use to figure out what
3879 instruction set this cpu supports. This can NOT be done in userspace
3880 on Sparc. */
3881diff -urNp linux-2.6.32.42/arch/sparc/include/asm/elf_64.h linux-2.6.32.42/arch/sparc/include/asm/elf_64.h
3882--- linux-2.6.32.42/arch/sparc/include/asm/elf_64.h 2011-03-27 14:31:47.000000000 -0400
3883+++ linux-2.6.32.42/arch/sparc/include/asm/elf_64.h 2011-04-17 15:56:46.000000000 -0400
3884@@ -163,6 +163,12 @@ typedef struct {
3885 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3886 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3887
3888+#ifdef CONFIG_PAX_ASLR
3889+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3890+
3891+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3892+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3893+#endif
3894
3895 /* This yields a mask that user programs can use to figure out what
3896 instruction set this cpu supports. */
3897diff -urNp linux-2.6.32.42/arch/sparc/include/asm/pgtable_32.h linux-2.6.32.42/arch/sparc/include/asm/pgtable_32.h
3898--- linux-2.6.32.42/arch/sparc/include/asm/pgtable_32.h 2011-03-27 14:31:47.000000000 -0400
3899+++ linux-2.6.32.42/arch/sparc/include/asm/pgtable_32.h 2011-04-17 15:56:46.000000000 -0400
3900@@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3901 BTFIXUPDEF_INT(page_none)
3902 BTFIXUPDEF_INT(page_copy)
3903 BTFIXUPDEF_INT(page_readonly)
3904+
3905+#ifdef CONFIG_PAX_PAGEEXEC
3906+BTFIXUPDEF_INT(page_shared_noexec)
3907+BTFIXUPDEF_INT(page_copy_noexec)
3908+BTFIXUPDEF_INT(page_readonly_noexec)
3909+#endif
3910+
3911 BTFIXUPDEF_INT(page_kernel)
3912
3913 #define PMD_SHIFT SUN4C_PMD_SHIFT
3914@@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
3915 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3916 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3917
3918+#ifdef CONFIG_PAX_PAGEEXEC
3919+extern pgprot_t PAGE_SHARED_NOEXEC;
3920+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3921+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3922+#else
3923+# define PAGE_SHARED_NOEXEC PAGE_SHARED
3924+# define PAGE_COPY_NOEXEC PAGE_COPY
3925+# define PAGE_READONLY_NOEXEC PAGE_READONLY
3926+#endif
3927+
3928 extern unsigned long page_kernel;
3929
3930 #ifdef MODULE
3931diff -urNp linux-2.6.32.42/arch/sparc/include/asm/pgtsrmmu.h linux-2.6.32.42/arch/sparc/include/asm/pgtsrmmu.h
3932--- linux-2.6.32.42/arch/sparc/include/asm/pgtsrmmu.h 2011-03-27 14:31:47.000000000 -0400
3933+++ linux-2.6.32.42/arch/sparc/include/asm/pgtsrmmu.h 2011-04-17 15:56:46.000000000 -0400
3934@@ -115,6 +115,13 @@
3935 SRMMU_EXEC | SRMMU_REF)
3936 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3937 SRMMU_EXEC | SRMMU_REF)
3938+
3939+#ifdef CONFIG_PAX_PAGEEXEC
3940+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3941+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3942+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3943+#endif
3944+
3945 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3946 SRMMU_DIRTY | SRMMU_REF)
3947
3948diff -urNp linux-2.6.32.42/arch/sparc/include/asm/spinlock_64.h linux-2.6.32.42/arch/sparc/include/asm/spinlock_64.h
3949--- linux-2.6.32.42/arch/sparc/include/asm/spinlock_64.h 2011-03-27 14:31:47.000000000 -0400
3950+++ linux-2.6.32.42/arch/sparc/include/asm/spinlock_64.h 2011-05-04 17:56:20.000000000 -0400
3951@@ -92,14 +92,19 @@ static inline void __raw_spin_lock_flags
3952
3953 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3954
3955-static void inline arch_read_lock(raw_rwlock_t *lock)
3956+static inline void arch_read_lock(raw_rwlock_t *lock)
3957 {
3958 unsigned long tmp1, tmp2;
3959
3960 __asm__ __volatile__ (
3961 "1: ldsw [%2], %0\n"
3962 " brlz,pn %0, 2f\n"
3963-"4: add %0, 1, %1\n"
3964+"4: addcc %0, 1, %1\n"
3965+
3966+#ifdef CONFIG_PAX_REFCOUNT
3967+" tvs %%icc, 6\n"
3968+#endif
3969+
3970 " cas [%2], %0, %1\n"
3971 " cmp %0, %1\n"
3972 " bne,pn %%icc, 1b\n"
3973@@ -112,7 +117,7 @@ static void inline arch_read_lock(raw_rw
3974 " .previous"
3975 : "=&r" (tmp1), "=&r" (tmp2)
3976 : "r" (lock)
3977- : "memory");
3978+ : "memory", "cc");
3979 }
3980
3981 static int inline arch_read_trylock(raw_rwlock_t *lock)
3982@@ -123,7 +128,12 @@ static int inline arch_read_trylock(raw_
3983 "1: ldsw [%2], %0\n"
3984 " brlz,a,pn %0, 2f\n"
3985 " mov 0, %0\n"
3986-" add %0, 1, %1\n"
3987+" addcc %0, 1, %1\n"
3988+
3989+#ifdef CONFIG_PAX_REFCOUNT
3990+" tvs %%icc, 6\n"
3991+#endif
3992+
3993 " cas [%2], %0, %1\n"
3994 " cmp %0, %1\n"
3995 " bne,pn %%icc, 1b\n"
3996@@ -136,13 +146,18 @@ static int inline arch_read_trylock(raw_
3997 return tmp1;
3998 }
3999
4000-static void inline arch_read_unlock(raw_rwlock_t *lock)
4001+static inline void arch_read_unlock(raw_rwlock_t *lock)
4002 {
4003 unsigned long tmp1, tmp2;
4004
4005 __asm__ __volatile__(
4006 "1: lduw [%2], %0\n"
4007-" sub %0, 1, %1\n"
4008+" subcc %0, 1, %1\n"
4009+
4010+#ifdef CONFIG_PAX_REFCOUNT
4011+" tvs %%icc, 6\n"
4012+#endif
4013+
4014 " cas [%2], %0, %1\n"
4015 " cmp %0, %1\n"
4016 " bne,pn %%xcc, 1b\n"
4017@@ -152,7 +167,7 @@ static void inline arch_read_unlock(raw_
4018 : "memory");
4019 }
4020
4021-static void inline arch_write_lock(raw_rwlock_t *lock)
4022+static inline void arch_write_lock(raw_rwlock_t *lock)
4023 {
4024 unsigned long mask, tmp1, tmp2;
4025
4026@@ -177,7 +192,7 @@ static void inline arch_write_lock(raw_r
4027 : "memory");
4028 }
4029
4030-static void inline arch_write_unlock(raw_rwlock_t *lock)
4031+static inline void arch_write_unlock(raw_rwlock_t *lock)
4032 {
4033 __asm__ __volatile__(
4034 " stw %%g0, [%0]"
4035diff -urNp linux-2.6.32.42/arch/sparc/include/asm/thread_info_32.h linux-2.6.32.42/arch/sparc/include/asm/thread_info_32.h
4036--- linux-2.6.32.42/arch/sparc/include/asm/thread_info_32.h 2011-03-27 14:31:47.000000000 -0400
4037+++ linux-2.6.32.42/arch/sparc/include/asm/thread_info_32.h 2011-06-04 20:46:01.000000000 -0400
4038@@ -50,6 +50,8 @@ struct thread_info {
4039 unsigned long w_saved;
4040
4041 struct restart_block restart_block;
4042+
4043+ unsigned long lowest_stack;
4044 };
4045
4046 /*
4047diff -urNp linux-2.6.32.42/arch/sparc/include/asm/thread_info_64.h linux-2.6.32.42/arch/sparc/include/asm/thread_info_64.h
4048--- linux-2.6.32.42/arch/sparc/include/asm/thread_info_64.h 2011-03-27 14:31:47.000000000 -0400
4049+++ linux-2.6.32.42/arch/sparc/include/asm/thread_info_64.h 2011-06-04 20:46:21.000000000 -0400
4050@@ -68,6 +68,8 @@ struct thread_info {
4051 struct pt_regs *kern_una_regs;
4052 unsigned int kern_una_insn;
4053
4054+ unsigned long lowest_stack;
4055+
4056 unsigned long fpregs[0] __attribute__ ((aligned(64)));
4057 };
4058
4059diff -urNp linux-2.6.32.42/arch/sparc/include/asm/uaccess_32.h linux-2.6.32.42/arch/sparc/include/asm/uaccess_32.h
4060--- linux-2.6.32.42/arch/sparc/include/asm/uaccess_32.h 2011-03-27 14:31:47.000000000 -0400
4061+++ linux-2.6.32.42/arch/sparc/include/asm/uaccess_32.h 2011-04-17 15:56:46.000000000 -0400
4062@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
4063
4064 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4065 {
4066- if (n && __access_ok((unsigned long) to, n))
4067+ if ((long)n < 0)
4068+ return n;
4069+
4070+ if (n && __access_ok((unsigned long) to, n)) {
4071+ if (!__builtin_constant_p(n))
4072+ check_object_size(from, n, true);
4073 return __copy_user(to, (__force void __user *) from, n);
4074- else
4075+ } else
4076 return n;
4077 }
4078
4079 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
4080 {
4081+ if ((long)n < 0)
4082+ return n;
4083+
4084+ if (!__builtin_constant_p(n))
4085+ check_object_size(from, n, true);
4086+
4087 return __copy_user(to, (__force void __user *) from, n);
4088 }
4089
4090 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4091 {
4092- if (n && __access_ok((unsigned long) from, n))
4093+ if ((long)n < 0)
4094+ return n;
4095+
4096+ if (n && __access_ok((unsigned long) from, n)) {
4097+ if (!__builtin_constant_p(n))
4098+ check_object_size(to, n, false);
4099 return __copy_user((__force void __user *) to, from, n);
4100- else
4101+ } else
4102 return n;
4103 }
4104
4105 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
4106 {
4107+ if ((long)n < 0)
4108+ return n;
4109+
4110 return __copy_user((__force void __user *) to, from, n);
4111 }
4112
4113diff -urNp linux-2.6.32.42/arch/sparc/include/asm/uaccess_64.h linux-2.6.32.42/arch/sparc/include/asm/uaccess_64.h
4114--- linux-2.6.32.42/arch/sparc/include/asm/uaccess_64.h 2011-03-27 14:31:47.000000000 -0400
4115+++ linux-2.6.32.42/arch/sparc/include/asm/uaccess_64.h 2011-04-17 15:56:46.000000000 -0400
4116@@ -9,6 +9,7 @@
4117 #include <linux/compiler.h>
4118 #include <linux/string.h>
4119 #include <linux/thread_info.h>
4120+#include <linux/kernel.h>
4121 #include <asm/asi.h>
4122 #include <asm/system.h>
4123 #include <asm/spitfire.h>
4124@@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixu
4125 static inline unsigned long __must_check
4126 copy_from_user(void *to, const void __user *from, unsigned long size)
4127 {
4128- unsigned long ret = ___copy_from_user(to, from, size);
4129+ unsigned long ret;
4130
4131+ if ((long)size < 0 || size > INT_MAX)
4132+ return size;
4133+
4134+ if (!__builtin_constant_p(size))
4135+ check_object_size(to, size, false);
4136+
4137+ ret = ___copy_from_user(to, from, size);
4138 if (unlikely(ret))
4139 ret = copy_from_user_fixup(to, from, size);
4140 return ret;
4141@@ -228,8 +236,15 @@ extern unsigned long copy_to_user_fixup(
4142 static inline unsigned long __must_check
4143 copy_to_user(void __user *to, const void *from, unsigned long size)
4144 {
4145- unsigned long ret = ___copy_to_user(to, from, size);
4146+ unsigned long ret;
4147+
4148+ if ((long)size < 0 || size > INT_MAX)
4149+ return size;
4150+
4151+ if (!__builtin_constant_p(size))
4152+ check_object_size(from, size, true);
4153
4154+ ret = ___copy_to_user(to, from, size);
4155 if (unlikely(ret))
4156 ret = copy_to_user_fixup(to, from, size);
4157 return ret;
4158diff -urNp linux-2.6.32.42/arch/sparc/include/asm/uaccess.h linux-2.6.32.42/arch/sparc/include/asm/uaccess.h
4159--- linux-2.6.32.42/arch/sparc/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
4160+++ linux-2.6.32.42/arch/sparc/include/asm/uaccess.h 2011-04-17 15:56:46.000000000 -0400
4161@@ -1,5 +1,13 @@
4162 #ifndef ___ASM_SPARC_UACCESS_H
4163 #define ___ASM_SPARC_UACCESS_H
4164+
4165+#ifdef __KERNEL__
4166+#ifndef __ASSEMBLY__
4167+#include <linux/types.h>
4168+extern void check_object_size(const void *ptr, unsigned long n, bool to);
4169+#endif
4170+#endif
4171+
4172 #if defined(__sparc__) && defined(__arch64__)
4173 #include <asm/uaccess_64.h>
4174 #else
4175diff -urNp linux-2.6.32.42/arch/sparc/kernel/iommu.c linux-2.6.32.42/arch/sparc/kernel/iommu.c
4176--- linux-2.6.32.42/arch/sparc/kernel/iommu.c 2011-03-27 14:31:47.000000000 -0400
4177+++ linux-2.6.32.42/arch/sparc/kernel/iommu.c 2011-04-17 15:56:46.000000000 -0400
4178@@ -826,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struc
4179 spin_unlock_irqrestore(&iommu->lock, flags);
4180 }
4181
4182-static struct dma_map_ops sun4u_dma_ops = {
4183+static const struct dma_map_ops sun4u_dma_ops = {
4184 .alloc_coherent = dma_4u_alloc_coherent,
4185 .free_coherent = dma_4u_free_coherent,
4186 .map_page = dma_4u_map_page,
4187@@ -837,7 +837,7 @@ static struct dma_map_ops sun4u_dma_ops
4188 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
4189 };
4190
4191-struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4192+const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4193 EXPORT_SYMBOL(dma_ops);
4194
4195 extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
4196diff -urNp linux-2.6.32.42/arch/sparc/kernel/ioport.c linux-2.6.32.42/arch/sparc/kernel/ioport.c
4197--- linux-2.6.32.42/arch/sparc/kernel/ioport.c 2011-03-27 14:31:47.000000000 -0400
4198+++ linux-2.6.32.42/arch/sparc/kernel/ioport.c 2011-04-17 15:56:46.000000000 -0400
4199@@ -392,7 +392,7 @@ static void sbus_sync_sg_for_device(stru
4200 BUG();
4201 }
4202
4203-struct dma_map_ops sbus_dma_ops = {
4204+const struct dma_map_ops sbus_dma_ops = {
4205 .alloc_coherent = sbus_alloc_coherent,
4206 .free_coherent = sbus_free_coherent,
4207 .map_page = sbus_map_page,
4208@@ -403,7 +403,7 @@ struct dma_map_ops sbus_dma_ops = {
4209 .sync_sg_for_device = sbus_sync_sg_for_device,
4210 };
4211
4212-struct dma_map_ops *dma_ops = &sbus_dma_ops;
4213+const struct dma_map_ops *dma_ops = &sbus_dma_ops;
4214 EXPORT_SYMBOL(dma_ops);
4215
4216 static int __init sparc_register_ioport(void)
4217@@ -640,7 +640,7 @@ static void pci32_sync_sg_for_device(str
4218 }
4219 }
4220
4221-struct dma_map_ops pci32_dma_ops = {
4222+const struct dma_map_ops pci32_dma_ops = {
4223 .alloc_coherent = pci32_alloc_coherent,
4224 .free_coherent = pci32_free_coherent,
4225 .map_page = pci32_map_page,
4226diff -urNp linux-2.6.32.42/arch/sparc/kernel/kgdb_32.c linux-2.6.32.42/arch/sparc/kernel/kgdb_32.c
4227--- linux-2.6.32.42/arch/sparc/kernel/kgdb_32.c 2011-03-27 14:31:47.000000000 -0400
4228+++ linux-2.6.32.42/arch/sparc/kernel/kgdb_32.c 2011-04-17 15:56:46.000000000 -0400
4229@@ -158,7 +158,7 @@ void kgdb_arch_exit(void)
4230 {
4231 }
4232
4233-struct kgdb_arch arch_kgdb_ops = {
4234+const struct kgdb_arch arch_kgdb_ops = {
4235 /* Breakpoint instruction: ta 0x7d */
4236 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
4237 };
4238diff -urNp linux-2.6.32.42/arch/sparc/kernel/kgdb_64.c linux-2.6.32.42/arch/sparc/kernel/kgdb_64.c
4239--- linux-2.6.32.42/arch/sparc/kernel/kgdb_64.c 2011-03-27 14:31:47.000000000 -0400
4240+++ linux-2.6.32.42/arch/sparc/kernel/kgdb_64.c 2011-04-17 15:56:46.000000000 -0400
4241@@ -180,7 +180,7 @@ void kgdb_arch_exit(void)
4242 {
4243 }
4244
4245-struct kgdb_arch arch_kgdb_ops = {
4246+const struct kgdb_arch arch_kgdb_ops = {
4247 /* Breakpoint instruction: ta 0x72 */
4248 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
4249 };
4250diff -urNp linux-2.6.32.42/arch/sparc/kernel/Makefile linux-2.6.32.42/arch/sparc/kernel/Makefile
4251--- linux-2.6.32.42/arch/sparc/kernel/Makefile 2011-03-27 14:31:47.000000000 -0400
4252+++ linux-2.6.32.42/arch/sparc/kernel/Makefile 2011-04-17 15:56:46.000000000 -0400
4253@@ -3,7 +3,7 @@
4254 #
4255
4256 asflags-y := -ansi
4257-ccflags-y := -Werror
4258+#ccflags-y := -Werror
4259
4260 extra-y := head_$(BITS).o
4261 extra-y += init_task.o
4262diff -urNp linux-2.6.32.42/arch/sparc/kernel/pci_sun4v.c linux-2.6.32.42/arch/sparc/kernel/pci_sun4v.c
4263--- linux-2.6.32.42/arch/sparc/kernel/pci_sun4v.c 2011-03-27 14:31:47.000000000 -0400
4264+++ linux-2.6.32.42/arch/sparc/kernel/pci_sun4v.c 2011-04-17 15:56:46.000000000 -0400
4265@@ -525,7 +525,7 @@ static void dma_4v_unmap_sg(struct devic
4266 spin_unlock_irqrestore(&iommu->lock, flags);
4267 }
4268
4269-static struct dma_map_ops sun4v_dma_ops = {
4270+static const struct dma_map_ops sun4v_dma_ops = {
4271 .alloc_coherent = dma_4v_alloc_coherent,
4272 .free_coherent = dma_4v_free_coherent,
4273 .map_page = dma_4v_map_page,
4274diff -urNp linux-2.6.32.42/arch/sparc/kernel/process_32.c linux-2.6.32.42/arch/sparc/kernel/process_32.c
4275--- linux-2.6.32.42/arch/sparc/kernel/process_32.c 2011-03-27 14:31:47.000000000 -0400
4276+++ linux-2.6.32.42/arch/sparc/kernel/process_32.c 2011-04-17 15:56:46.000000000 -0400
4277@@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp)
4278 rw->ins[4], rw->ins[5],
4279 rw->ins[6],
4280 rw->ins[7]);
4281- printk("%pS\n", (void *) rw->ins[7]);
4282+ printk("%pA\n", (void *) rw->ins[7]);
4283 rw = (struct reg_window32 *) rw->ins[6];
4284 }
4285 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
4286@@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r)
4287
4288 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
4289 r->psr, r->pc, r->npc, r->y, print_tainted());
4290- printk("PC: <%pS>\n", (void *) r->pc);
4291+ printk("PC: <%pA>\n", (void *) r->pc);
4292 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4293 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
4294 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
4295 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4296 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
4297 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
4298- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
4299+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
4300
4301 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4302 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
4303@@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk,
4304 rw = (struct reg_window32 *) fp;
4305 pc = rw->ins[7];
4306 printk("[%08lx : ", pc);
4307- printk("%pS ] ", (void *) pc);
4308+ printk("%pA ] ", (void *) pc);
4309 fp = rw->ins[6];
4310 } while (++count < 16);
4311 printk("\n");
4312diff -urNp linux-2.6.32.42/arch/sparc/kernel/process_64.c linux-2.6.32.42/arch/sparc/kernel/process_64.c
4313--- linux-2.6.32.42/arch/sparc/kernel/process_64.c 2011-03-27 14:31:47.000000000 -0400
4314+++ linux-2.6.32.42/arch/sparc/kernel/process_64.c 2011-04-17 15:56:46.000000000 -0400
4315@@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
4316 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
4317 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
4318 if (regs->tstate & TSTATE_PRIV)
4319- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
4320+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
4321 }
4322
4323 void show_regs(struct pt_regs *regs)
4324 {
4325 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
4326 regs->tpc, regs->tnpc, regs->y, print_tainted());
4327- printk("TPC: <%pS>\n", (void *) regs->tpc);
4328+ printk("TPC: <%pA>\n", (void *) regs->tpc);
4329 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
4330 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
4331 regs->u_regs[3]);
4332@@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
4333 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
4334 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
4335 regs->u_regs[15]);
4336- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
4337+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
4338 show_regwindow(regs);
4339 }
4340
4341@@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void
4342 ((tp && tp->task) ? tp->task->pid : -1));
4343
4344 if (gp->tstate & TSTATE_PRIV) {
4345- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
4346+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
4347 (void *) gp->tpc,
4348 (void *) gp->o7,
4349 (void *) gp->i7,
4350diff -urNp linux-2.6.32.42/arch/sparc/kernel/sys_sparc_32.c linux-2.6.32.42/arch/sparc/kernel/sys_sparc_32.c
4351--- linux-2.6.32.42/arch/sparc/kernel/sys_sparc_32.c 2011-03-27 14:31:47.000000000 -0400
4352+++ linux-2.6.32.42/arch/sparc/kernel/sys_sparc_32.c 2011-04-17 15:56:46.000000000 -0400
4353@@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(str
4354 if (ARCH_SUN4C && len > 0x20000000)
4355 return -ENOMEM;
4356 if (!addr)
4357- addr = TASK_UNMAPPED_BASE;
4358+ addr = current->mm->mmap_base;
4359
4360 if (flags & MAP_SHARED)
4361 addr = COLOUR_ALIGN(addr);
4362@@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(str
4363 }
4364 if (TASK_SIZE - PAGE_SIZE - len < addr)
4365 return -ENOMEM;
4366- if (!vmm || addr + len <= vmm->vm_start)
4367+ if (check_heap_stack_gap(vmm, addr, len))
4368 return addr;
4369 addr = vmm->vm_end;
4370 if (flags & MAP_SHARED)
4371diff -urNp linux-2.6.32.42/arch/sparc/kernel/sys_sparc_64.c linux-2.6.32.42/arch/sparc/kernel/sys_sparc_64.c
4372--- linux-2.6.32.42/arch/sparc/kernel/sys_sparc_64.c 2011-03-27 14:31:47.000000000 -0400
4373+++ linux-2.6.32.42/arch/sparc/kernel/sys_sparc_64.c 2011-04-17 15:56:46.000000000 -0400
4374@@ -125,7 +125,7 @@ unsigned long arch_get_unmapped_area(str
4375 /* We do not accept a shared mapping if it would violate
4376 * cache aliasing constraints.
4377 */
4378- if ((flags & MAP_SHARED) &&
4379+ if ((filp || (flags & MAP_SHARED)) &&
4380 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4381 return -EINVAL;
4382 return addr;
4383@@ -140,6 +140,10 @@ unsigned long arch_get_unmapped_area(str
4384 if (filp || (flags & MAP_SHARED))
4385 do_color_align = 1;
4386
4387+#ifdef CONFIG_PAX_RANDMMAP
4388+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4389+#endif
4390+
4391 if (addr) {
4392 if (do_color_align)
4393 addr = COLOUR_ALIGN(addr, pgoff);
4394@@ -147,15 +151,14 @@ unsigned long arch_get_unmapped_area(str
4395 addr = PAGE_ALIGN(addr);
4396
4397 vma = find_vma(mm, addr);
4398- if (task_size - len >= addr &&
4399- (!vma || addr + len <= vma->vm_start))
4400+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4401 return addr;
4402 }
4403
4404 if (len > mm->cached_hole_size) {
4405- start_addr = addr = mm->free_area_cache;
4406+ start_addr = addr = mm->free_area_cache;
4407 } else {
4408- start_addr = addr = TASK_UNMAPPED_BASE;
4409+ start_addr = addr = mm->mmap_base;
4410 mm->cached_hole_size = 0;
4411 }
4412
4413@@ -175,14 +178,14 @@ full_search:
4414 vma = find_vma(mm, VA_EXCLUDE_END);
4415 }
4416 if (unlikely(task_size < addr)) {
4417- if (start_addr != TASK_UNMAPPED_BASE) {
4418- start_addr = addr = TASK_UNMAPPED_BASE;
4419+ if (start_addr != mm->mmap_base) {
4420+ start_addr = addr = mm->mmap_base;
4421 mm->cached_hole_size = 0;
4422 goto full_search;
4423 }
4424 return -ENOMEM;
4425 }
4426- if (likely(!vma || addr + len <= vma->vm_start)) {
4427+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4428 /*
4429 * Remember the place where we stopped the search:
4430 */
4431@@ -216,7 +219,7 @@ arch_get_unmapped_area_topdown(struct fi
4432 /* We do not accept a shared mapping if it would violate
4433 * cache aliasing constraints.
4434 */
4435- if ((flags & MAP_SHARED) &&
4436+ if ((filp || (flags & MAP_SHARED)) &&
4437 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4438 return -EINVAL;
4439 return addr;
4440@@ -237,8 +240,7 @@ arch_get_unmapped_area_topdown(struct fi
4441 addr = PAGE_ALIGN(addr);
4442
4443 vma = find_vma(mm, addr);
4444- if (task_size - len >= addr &&
4445- (!vma || addr + len <= vma->vm_start))
4446+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4447 return addr;
4448 }
4449
4450@@ -259,7 +261,7 @@ arch_get_unmapped_area_topdown(struct fi
4451 /* make sure it can fit in the remaining address space */
4452 if (likely(addr > len)) {
4453 vma = find_vma(mm, addr-len);
4454- if (!vma || addr <= vma->vm_start) {
4455+ if (check_heap_stack_gap(vma, addr - len, len)) {
4456 /* remember the address as a hint for next time */
4457 return (mm->free_area_cache = addr-len);
4458 }
4459@@ -268,18 +270,18 @@ arch_get_unmapped_area_topdown(struct fi
4460 if (unlikely(mm->mmap_base < len))
4461 goto bottomup;
4462
4463- addr = mm->mmap_base-len;
4464- if (do_color_align)
4465- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4466+ addr = mm->mmap_base - len;
4467
4468 do {
4469+ if (do_color_align)
4470+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4471 /*
4472 * Lookup failure means no vma is above this address,
4473 * else if new region fits below vma->vm_start,
4474 * return with success:
4475 */
4476 vma = find_vma(mm, addr);
4477- if (likely(!vma || addr+len <= vma->vm_start)) {
4478+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4479 /* remember the address as a hint for next time */
4480 return (mm->free_area_cache = addr);
4481 }
4482@@ -289,10 +291,8 @@ arch_get_unmapped_area_topdown(struct fi
4483 mm->cached_hole_size = vma->vm_start - addr;
4484
4485 /* try just below the current vma->vm_start */
4486- addr = vma->vm_start-len;
4487- if (do_color_align)
4488- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4489- } while (likely(len < vma->vm_start));
4490+ addr = skip_heap_stack_gap(vma, len);
4491+ } while (!IS_ERR_VALUE(addr));
4492
4493 bottomup:
4494 /*
4495@@ -384,6 +384,12 @@ void arch_pick_mmap_layout(struct mm_str
4496 current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
4497 sysctl_legacy_va_layout) {
4498 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4499+
4500+#ifdef CONFIG_PAX_RANDMMAP
4501+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4502+ mm->mmap_base += mm->delta_mmap;
4503+#endif
4504+
4505 mm->get_unmapped_area = arch_get_unmapped_area;
4506 mm->unmap_area = arch_unmap_area;
4507 } else {
4508@@ -398,6 +404,12 @@ void arch_pick_mmap_layout(struct mm_str
4509 gap = (task_size / 6 * 5);
4510
4511 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
4512+
4513+#ifdef CONFIG_PAX_RANDMMAP
4514+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4515+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4516+#endif
4517+
4518 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4519 mm->unmap_area = arch_unmap_area_topdown;
4520 }
4521diff -urNp linux-2.6.32.42/arch/sparc/kernel/traps_32.c linux-2.6.32.42/arch/sparc/kernel/traps_32.c
4522--- linux-2.6.32.42/arch/sparc/kernel/traps_32.c 2011-03-27 14:31:47.000000000 -0400
4523+++ linux-2.6.32.42/arch/sparc/kernel/traps_32.c 2011-06-13 21:25:39.000000000 -0400
4524@@ -44,6 +44,8 @@ static void instruction_dump(unsigned lo
4525 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
4526 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
4527
4528+extern void gr_handle_kernel_exploit(void);
4529+
4530 void die_if_kernel(char *str, struct pt_regs *regs)
4531 {
4532 static int die_counter;
4533@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_
4534 count++ < 30 &&
4535 (((unsigned long) rw) >= PAGE_OFFSET) &&
4536 !(((unsigned long) rw) & 0x7)) {
4537- printk("Caller[%08lx]: %pS\n", rw->ins[7],
4538+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
4539 (void *) rw->ins[7]);
4540 rw = (struct reg_window32 *)rw->ins[6];
4541 }
4542 }
4543 printk("Instruction DUMP:");
4544 instruction_dump ((unsigned long *) regs->pc);
4545- if(regs->psr & PSR_PS)
4546+ if(regs->psr & PSR_PS) {
4547+ gr_handle_kernel_exploit();
4548 do_exit(SIGKILL);
4549+ }
4550 do_exit(SIGSEGV);
4551 }
4552
4553diff -urNp linux-2.6.32.42/arch/sparc/kernel/traps_64.c linux-2.6.32.42/arch/sparc/kernel/traps_64.c
4554--- linux-2.6.32.42/arch/sparc/kernel/traps_64.c 2011-03-27 14:31:47.000000000 -0400
4555+++ linux-2.6.32.42/arch/sparc/kernel/traps_64.c 2011-06-13 21:24:11.000000000 -0400
4556@@ -73,7 +73,7 @@ static void dump_tl1_traplog(struct tl1_
4557 i + 1,
4558 p->trapstack[i].tstate, p->trapstack[i].tpc,
4559 p->trapstack[i].tnpc, p->trapstack[i].tt);
4560- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
4561+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
4562 }
4563 }
4564
4565@@ -93,6 +93,12 @@ void bad_trap(struct pt_regs *regs, long
4566
4567 lvl -= 0x100;
4568 if (regs->tstate & TSTATE_PRIV) {
4569+
4570+#ifdef CONFIG_PAX_REFCOUNT
4571+ if (lvl == 6)
4572+ pax_report_refcount_overflow(regs);
4573+#endif
4574+
4575 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
4576 die_if_kernel(buffer, regs);
4577 }
4578@@ -111,11 +117,16 @@ void bad_trap(struct pt_regs *regs, long
4579 void bad_trap_tl1(struct pt_regs *regs, long lvl)
4580 {
4581 char buffer[32];
4582-
4583+
4584 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
4585 0, lvl, SIGTRAP) == NOTIFY_STOP)
4586 return;
4587
4588+#ifdef CONFIG_PAX_REFCOUNT
4589+ if (lvl == 6)
4590+ pax_report_refcount_overflow(regs);
4591+#endif
4592+
4593 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
4594
4595 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
4596@@ -1139,7 +1150,7 @@ static void cheetah_log_errors(struct pt
4597 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
4598 printk("%s" "ERROR(%d): ",
4599 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
4600- printk("TPC<%pS>\n", (void *) regs->tpc);
4601+ printk("TPC<%pA>\n", (void *) regs->tpc);
4602 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
4603 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
4604 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
4605@@ -1746,7 +1757,7 @@ void cheetah_plus_parity_error(int type,
4606 smp_processor_id(),
4607 (type & 0x1) ? 'I' : 'D',
4608 regs->tpc);
4609- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
4610+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
4611 panic("Irrecoverable Cheetah+ parity error.");
4612 }
4613
4614@@ -1754,7 +1765,7 @@ void cheetah_plus_parity_error(int type,
4615 smp_processor_id(),
4616 (type & 0x1) ? 'I' : 'D',
4617 regs->tpc);
4618- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
4619+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
4620 }
4621
4622 struct sun4v_error_entry {
4623@@ -1961,9 +1972,9 @@ void sun4v_itlb_error_report(struct pt_r
4624
4625 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
4626 regs->tpc, tl);
4627- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
4628+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
4629 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4630- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
4631+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
4632 (void *) regs->u_regs[UREG_I7]);
4633 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
4634 "pte[%lx] error[%lx]\n",
4635@@ -1985,9 +1996,9 @@ void sun4v_dtlb_error_report(struct pt_r
4636
4637 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
4638 regs->tpc, tl);
4639- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
4640+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
4641 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4642- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
4643+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
4644 (void *) regs->u_regs[UREG_I7]);
4645 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
4646 "pte[%lx] error[%lx]\n",
4647@@ -2191,7 +2202,7 @@ void show_stack(struct task_struct *tsk,
4648 fp = (unsigned long)sf->fp + STACK_BIAS;
4649 }
4650
4651- printk(" [%016lx] %pS\n", pc, (void *) pc);
4652+ printk(" [%016lx] %pA\n", pc, (void *) pc);
4653 } while (++count < 16);
4654 }
4655
4656@@ -2233,6 +2244,8 @@ static inline struct reg_window *kernel_
4657 return (struct reg_window *) (fp + STACK_BIAS);
4658 }
4659
4660+extern void gr_handle_kernel_exploit(void);
4661+
4662 void die_if_kernel(char *str, struct pt_regs *regs)
4663 {
4664 static int die_counter;
4665@@ -2260,7 +2273,7 @@ void die_if_kernel(char *str, struct pt_
4666 while (rw &&
4667 count++ < 30&&
4668 is_kernel_stack(current, rw)) {
4669- printk("Caller[%016lx]: %pS\n", rw->ins[7],
4670+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
4671 (void *) rw->ins[7]);
4672
4673 rw = kernel_stack_up(rw);
4674@@ -2273,8 +2286,11 @@ void die_if_kernel(char *str, struct pt_
4675 }
4676 user_instruction_dump ((unsigned int __user *) regs->tpc);
4677 }
4678- if (regs->tstate & TSTATE_PRIV)
4679+ if (regs->tstate & TSTATE_PRIV) {
4680+ gr_handle_kernel_exploit();
4681 do_exit(SIGKILL);
4682+ }
4683+
4684 do_exit(SIGSEGV);
4685 }
4686 EXPORT_SYMBOL(die_if_kernel);
4687diff -urNp linux-2.6.32.42/arch/sparc/kernel/unaligned_64.c linux-2.6.32.42/arch/sparc/kernel/unaligned_64.c
4688--- linux-2.6.32.42/arch/sparc/kernel/unaligned_64.c 2011-03-27 14:31:47.000000000 -0400
4689+++ linux-2.6.32.42/arch/sparc/kernel/unaligned_64.c 2011-04-17 15:56:46.000000000 -0400
4690@@ -288,7 +288,7 @@ static void log_unaligned(struct pt_regs
4691 if (count < 5) {
4692 last_time = jiffies;
4693 count++;
4694- printk("Kernel unaligned access at TPC[%lx] %pS\n",
4695+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
4696 regs->tpc, (void *) regs->tpc);
4697 }
4698 }
4699diff -urNp linux-2.6.32.42/arch/sparc/lib/atomic_64.S linux-2.6.32.42/arch/sparc/lib/atomic_64.S
4700--- linux-2.6.32.42/arch/sparc/lib/atomic_64.S 2011-03-27 14:31:47.000000000 -0400
4701+++ linux-2.6.32.42/arch/sparc/lib/atomic_64.S 2011-04-17 15:56:46.000000000 -0400
4702@@ -18,7 +18,12 @@
4703 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4704 BACKOFF_SETUP(%o2)
4705 1: lduw [%o1], %g1
4706- add %g1, %o0, %g7
4707+ addcc %g1, %o0, %g7
4708+
4709+#ifdef CONFIG_PAX_REFCOUNT
4710+ tvs %icc, 6
4711+#endif
4712+
4713 cas [%o1], %g1, %g7
4714 cmp %g1, %g7
4715 bne,pn %icc, 2f
4716@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
4717 2: BACKOFF_SPIN(%o2, %o3, 1b)
4718 .size atomic_add, .-atomic_add
4719
4720+ .globl atomic_add_unchecked
4721+ .type atomic_add_unchecked,#function
4722+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4723+ BACKOFF_SETUP(%o2)
4724+1: lduw [%o1], %g1
4725+ add %g1, %o0, %g7
4726+ cas [%o1], %g1, %g7
4727+ cmp %g1, %g7
4728+ bne,pn %icc, 2f
4729+ nop
4730+ retl
4731+ nop
4732+2: BACKOFF_SPIN(%o2, %o3, 1b)
4733+ .size atomic_add_unchecked, .-atomic_add_unchecked
4734+
4735 .globl atomic_sub
4736 .type atomic_sub,#function
4737 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4738 BACKOFF_SETUP(%o2)
4739 1: lduw [%o1], %g1
4740- sub %g1, %o0, %g7
4741+ subcc %g1, %o0, %g7
4742+
4743+#ifdef CONFIG_PAX_REFCOUNT
4744+ tvs %icc, 6
4745+#endif
4746+
4747 cas [%o1], %g1, %g7
4748 cmp %g1, %g7
4749 bne,pn %icc, 2f
4750@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
4751 2: BACKOFF_SPIN(%o2, %o3, 1b)
4752 .size atomic_sub, .-atomic_sub
4753
4754+ .globl atomic_sub_unchecked
4755+ .type atomic_sub_unchecked,#function
4756+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4757+ BACKOFF_SETUP(%o2)
4758+1: lduw [%o1], %g1
4759+ sub %g1, %o0, %g7
4760+ cas [%o1], %g1, %g7
4761+ cmp %g1, %g7
4762+ bne,pn %icc, 2f
4763+ nop
4764+ retl
4765+ nop
4766+2: BACKOFF_SPIN(%o2, %o3, 1b)
4767+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
4768+
4769 .globl atomic_add_ret
4770 .type atomic_add_ret,#function
4771 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4772 BACKOFF_SETUP(%o2)
4773 1: lduw [%o1], %g1
4774- add %g1, %o0, %g7
4775+ addcc %g1, %o0, %g7
4776+
4777+#ifdef CONFIG_PAX_REFCOUNT
4778+ tvs %icc, 6
4779+#endif
4780+
4781 cas [%o1], %g1, %g7
4782 cmp %g1, %g7
4783 bne,pn %icc, 2f
4784@@ -59,12 +104,33 @@ atomic_add_ret: /* %o0 = increment, %o1
4785 2: BACKOFF_SPIN(%o2, %o3, 1b)
4786 .size atomic_add_ret, .-atomic_add_ret
4787
4788+ .globl atomic_add_ret_unchecked
4789+ .type atomic_add_ret_unchecked,#function
4790+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4791+ BACKOFF_SETUP(%o2)
4792+1: lduw [%o1], %g1
4793+ addcc %g1, %o0, %g7
4794+ cas [%o1], %g1, %g7
4795+ cmp %g1, %g7
4796+ bne,pn %icc, 2f
4797+ add %g7, %o0, %g7
4798+ sra %g7, 0, %o0
4799+ retl
4800+ nop
4801+2: BACKOFF_SPIN(%o2, %o3, 1b)
4802+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
4803+
4804 .globl atomic_sub_ret
4805 .type atomic_sub_ret,#function
4806 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4807 BACKOFF_SETUP(%o2)
4808 1: lduw [%o1], %g1
4809- sub %g1, %o0, %g7
4810+ subcc %g1, %o0, %g7
4811+
4812+#ifdef CONFIG_PAX_REFCOUNT
4813+ tvs %icc, 6
4814+#endif
4815+
4816 cas [%o1], %g1, %g7
4817 cmp %g1, %g7
4818 bne,pn %icc, 2f
4819@@ -80,7 +146,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
4820 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4821 BACKOFF_SETUP(%o2)
4822 1: ldx [%o1], %g1
4823- add %g1, %o0, %g7
4824+ addcc %g1, %o0, %g7
4825+
4826+#ifdef CONFIG_PAX_REFCOUNT
4827+ tvs %xcc, 6
4828+#endif
4829+
4830 casx [%o1], %g1, %g7
4831 cmp %g1, %g7
4832 bne,pn %xcc, 2f
4833@@ -90,12 +161,32 @@ atomic64_add: /* %o0 = increment, %o1 =
4834 2: BACKOFF_SPIN(%o2, %o3, 1b)
4835 .size atomic64_add, .-atomic64_add
4836
4837+ .globl atomic64_add_unchecked
4838+ .type atomic64_add_unchecked,#function
4839+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4840+ BACKOFF_SETUP(%o2)
4841+1: ldx [%o1], %g1
4842+ addcc %g1, %o0, %g7
4843+ casx [%o1], %g1, %g7
4844+ cmp %g1, %g7
4845+ bne,pn %xcc, 2f
4846+ nop
4847+ retl
4848+ nop
4849+2: BACKOFF_SPIN(%o2, %o3, 1b)
4850+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
4851+
4852 .globl atomic64_sub
4853 .type atomic64_sub,#function
4854 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4855 BACKOFF_SETUP(%o2)
4856 1: ldx [%o1], %g1
4857- sub %g1, %o0, %g7
4858+ subcc %g1, %o0, %g7
4859+
4860+#ifdef CONFIG_PAX_REFCOUNT
4861+ tvs %xcc, 6
4862+#endif
4863+
4864 casx [%o1], %g1, %g7
4865 cmp %g1, %g7
4866 bne,pn %xcc, 2f
4867@@ -105,12 +196,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
4868 2: BACKOFF_SPIN(%o2, %o3, 1b)
4869 .size atomic64_sub, .-atomic64_sub
4870
4871+ .globl atomic64_sub_unchecked
4872+ .type atomic64_sub_unchecked,#function
4873+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4874+ BACKOFF_SETUP(%o2)
4875+1: ldx [%o1], %g1
4876+ subcc %g1, %o0, %g7
4877+ casx [%o1], %g1, %g7
4878+ cmp %g1, %g7
4879+ bne,pn %xcc, 2f
4880+ nop
4881+ retl
4882+ nop
4883+2: BACKOFF_SPIN(%o2, %o3, 1b)
4884+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
4885+
4886 .globl atomic64_add_ret
4887 .type atomic64_add_ret,#function
4888 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4889 BACKOFF_SETUP(%o2)
4890 1: ldx [%o1], %g1
4891- add %g1, %o0, %g7
4892+ addcc %g1, %o0, %g7
4893+
4894+#ifdef CONFIG_PAX_REFCOUNT
4895+ tvs %xcc, 6
4896+#endif
4897+
4898 casx [%o1], %g1, %g7
4899 cmp %g1, %g7
4900 bne,pn %xcc, 2f
4901@@ -121,12 +232,33 @@ atomic64_add_ret: /* %o0 = increment, %o
4902 2: BACKOFF_SPIN(%o2, %o3, 1b)
4903 .size atomic64_add_ret, .-atomic64_add_ret
4904
4905+ .globl atomic64_add_ret_unchecked
4906+ .type atomic64_add_ret_unchecked,#function
4907+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4908+ BACKOFF_SETUP(%o2)
4909+1: ldx [%o1], %g1
4910+ addcc %g1, %o0, %g7
4911+ casx [%o1], %g1, %g7
4912+ cmp %g1, %g7
4913+ bne,pn %xcc, 2f
4914+ add %g7, %o0, %g7
4915+ mov %g7, %o0
4916+ retl
4917+ nop
4918+2: BACKOFF_SPIN(%o2, %o3, 1b)
4919+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4920+
4921 .globl atomic64_sub_ret
4922 .type atomic64_sub_ret,#function
4923 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4924 BACKOFF_SETUP(%o2)
4925 1: ldx [%o1], %g1
4926- sub %g1, %o0, %g7
4927+ subcc %g1, %o0, %g7
4928+
4929+#ifdef CONFIG_PAX_REFCOUNT
4930+ tvs %xcc, 6
4931+#endif
4932+
4933 casx [%o1], %g1, %g7
4934 cmp %g1, %g7
4935 bne,pn %xcc, 2f
4936diff -urNp linux-2.6.32.42/arch/sparc/lib/ksyms.c linux-2.6.32.42/arch/sparc/lib/ksyms.c
4937--- linux-2.6.32.42/arch/sparc/lib/ksyms.c 2011-03-27 14:31:47.000000000 -0400
4938+++ linux-2.6.32.42/arch/sparc/lib/ksyms.c 2011-04-17 15:56:46.000000000 -0400
4939@@ -144,12 +144,17 @@ EXPORT_SYMBOL(__downgrade_write);
4940
4941 /* Atomic counter implementation. */
4942 EXPORT_SYMBOL(atomic_add);
4943+EXPORT_SYMBOL(atomic_add_unchecked);
4944 EXPORT_SYMBOL(atomic_add_ret);
4945 EXPORT_SYMBOL(atomic_sub);
4946+EXPORT_SYMBOL(atomic_sub_unchecked);
4947 EXPORT_SYMBOL(atomic_sub_ret);
4948 EXPORT_SYMBOL(atomic64_add);
4949+EXPORT_SYMBOL(atomic64_add_unchecked);
4950 EXPORT_SYMBOL(atomic64_add_ret);
4951+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4952 EXPORT_SYMBOL(atomic64_sub);
4953+EXPORT_SYMBOL(atomic64_sub_unchecked);
4954 EXPORT_SYMBOL(atomic64_sub_ret);
4955
4956 /* Atomic bit operations. */
4957diff -urNp linux-2.6.32.42/arch/sparc/lib/Makefile linux-2.6.32.42/arch/sparc/lib/Makefile
4958--- linux-2.6.32.42/arch/sparc/lib/Makefile 2011-03-27 14:31:47.000000000 -0400
4959+++ linux-2.6.32.42/arch/sparc/lib/Makefile 2011-05-17 19:26:34.000000000 -0400
4960@@ -2,7 +2,7 @@
4961 #
4962
4963 asflags-y := -ansi -DST_DIV0=0x02
4964-ccflags-y := -Werror
4965+#ccflags-y := -Werror
4966
4967 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4968 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4969diff -urNp linux-2.6.32.42/arch/sparc/lib/rwsem_64.S linux-2.6.32.42/arch/sparc/lib/rwsem_64.S
4970--- linux-2.6.32.42/arch/sparc/lib/rwsem_64.S 2011-03-27 14:31:47.000000000 -0400
4971+++ linux-2.6.32.42/arch/sparc/lib/rwsem_64.S 2011-04-17 15:56:46.000000000 -0400
4972@@ -11,7 +11,12 @@
4973 .globl __down_read
4974 __down_read:
4975 1: lduw [%o0], %g1
4976- add %g1, 1, %g7
4977+ addcc %g1, 1, %g7
4978+
4979+#ifdef CONFIG_PAX_REFCOUNT
4980+ tvs %icc, 6
4981+#endif
4982+
4983 cas [%o0], %g1, %g7
4984 cmp %g1, %g7
4985 bne,pn %icc, 1b
4986@@ -33,7 +38,12 @@ __down_read:
4987 .globl __down_read_trylock
4988 __down_read_trylock:
4989 1: lduw [%o0], %g1
4990- add %g1, 1, %g7
4991+ addcc %g1, 1, %g7
4992+
4993+#ifdef CONFIG_PAX_REFCOUNT
4994+ tvs %icc, 6
4995+#endif
4996+
4997 cmp %g7, 0
4998 bl,pn %icc, 2f
4999 mov 0, %o1
5000@@ -51,7 +61,12 @@ __down_write:
5001 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5002 1:
5003 lduw [%o0], %g3
5004- add %g3, %g1, %g7
5005+ addcc %g3, %g1, %g7
5006+
5007+#ifdef CONFIG_PAX_REFCOUNT
5008+ tvs %icc, 6
5009+#endif
5010+
5011 cas [%o0], %g3, %g7
5012 cmp %g3, %g7
5013 bne,pn %icc, 1b
5014@@ -77,7 +92,12 @@ __down_write_trylock:
5015 cmp %g3, 0
5016 bne,pn %icc, 2f
5017 mov 0, %o1
5018- add %g3, %g1, %g7
5019+ addcc %g3, %g1, %g7
5020+
5021+#ifdef CONFIG_PAX_REFCOUNT
5022+ tvs %icc, 6
5023+#endif
5024+
5025 cas [%o0], %g3, %g7
5026 cmp %g3, %g7
5027 bne,pn %icc, 1b
5028@@ -90,7 +110,12 @@ __down_write_trylock:
5029 __up_read:
5030 1:
5031 lduw [%o0], %g1
5032- sub %g1, 1, %g7
5033+ subcc %g1, 1, %g7
5034+
5035+#ifdef CONFIG_PAX_REFCOUNT
5036+ tvs %icc, 6
5037+#endif
5038+
5039 cas [%o0], %g1, %g7
5040 cmp %g1, %g7
5041 bne,pn %icc, 1b
5042@@ -118,7 +143,12 @@ __up_write:
5043 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5044 1:
5045 lduw [%o0], %g3
5046- sub %g3, %g1, %g7
5047+ subcc %g3, %g1, %g7
5048+
5049+#ifdef CONFIG_PAX_REFCOUNT
5050+ tvs %icc, 6
5051+#endif
5052+
5053 cas [%o0], %g3, %g7
5054 cmp %g3, %g7
5055 bne,pn %icc, 1b
5056@@ -143,7 +173,12 @@ __downgrade_write:
5057 or %g1, %lo(RWSEM_WAITING_BIAS), %g1
5058 1:
5059 lduw [%o0], %g3
5060- sub %g3, %g1, %g7
5061+ subcc %g3, %g1, %g7
5062+
5063+#ifdef CONFIG_PAX_REFCOUNT
5064+ tvs %icc, 6
5065+#endif
5066+
5067 cas [%o0], %g3, %g7
5068 cmp %g3, %g7
5069 bne,pn %icc, 1b
5070diff -urNp linux-2.6.32.42/arch/sparc/Makefile linux-2.6.32.42/arch/sparc/Makefile
5071--- linux-2.6.32.42/arch/sparc/Makefile 2011-03-27 14:31:47.000000000 -0400
5072+++ linux-2.6.32.42/arch/sparc/Makefile 2011-04-17 15:56:46.000000000 -0400
5073@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
5074 # Export what is needed by arch/sparc/boot/Makefile
5075 export VMLINUX_INIT VMLINUX_MAIN
5076 VMLINUX_INIT := $(head-y) $(init-y)
5077-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
5078+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
5079 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
5080 VMLINUX_MAIN += $(drivers-y) $(net-y)
5081
5082diff -urNp linux-2.6.32.42/arch/sparc/mm/fault_32.c linux-2.6.32.42/arch/sparc/mm/fault_32.c
5083--- linux-2.6.32.42/arch/sparc/mm/fault_32.c 2011-03-27 14:31:47.000000000 -0400
5084+++ linux-2.6.32.42/arch/sparc/mm/fault_32.c 2011-04-17 15:56:46.000000000 -0400
5085@@ -21,6 +21,9 @@
5086 #include <linux/interrupt.h>
5087 #include <linux/module.h>
5088 #include <linux/kdebug.h>
5089+#include <linux/slab.h>
5090+#include <linux/pagemap.h>
5091+#include <linux/compiler.h>
5092
5093 #include <asm/system.h>
5094 #include <asm/page.h>
5095@@ -167,6 +170,267 @@ static unsigned long compute_si_addr(str
5096 return safe_compute_effective_address(regs, insn);
5097 }
5098
5099+#ifdef CONFIG_PAX_PAGEEXEC
5100+#ifdef CONFIG_PAX_DLRESOLVE
5101+static void pax_emuplt_close(struct vm_area_struct *vma)
5102+{
5103+ vma->vm_mm->call_dl_resolve = 0UL;
5104+}
5105+
5106+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5107+{
5108+ unsigned int *kaddr;
5109+
5110+ vmf->page = alloc_page(GFP_HIGHUSER);
5111+ if (!vmf->page)
5112+ return VM_FAULT_OOM;
5113+
5114+ kaddr = kmap(vmf->page);
5115+ memset(kaddr, 0, PAGE_SIZE);
5116+ kaddr[0] = 0x9DE3BFA8U; /* save */
5117+ flush_dcache_page(vmf->page);
5118+ kunmap(vmf->page);
5119+ return VM_FAULT_MAJOR;
5120+}
5121+
5122+static const struct vm_operations_struct pax_vm_ops = {
5123+ .close = pax_emuplt_close,
5124+ .fault = pax_emuplt_fault
5125+};
5126+
5127+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5128+{
5129+ int ret;
5130+
5131+ vma->vm_mm = current->mm;
5132+ vma->vm_start = addr;
5133+ vma->vm_end = addr + PAGE_SIZE;
5134+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5135+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5136+ vma->vm_ops = &pax_vm_ops;
5137+
5138+ ret = insert_vm_struct(current->mm, vma);
5139+ if (ret)
5140+ return ret;
5141+
5142+ ++current->mm->total_vm;
5143+ return 0;
5144+}
5145+#endif
5146+
5147+/*
5148+ * PaX: decide what to do with offenders (regs->pc = fault address)
5149+ *
5150+ * returns 1 when task should be killed
5151+ * 2 when patched PLT trampoline was detected
5152+ * 3 when unpatched PLT trampoline was detected
5153+ */
5154+static int pax_handle_fetch_fault(struct pt_regs *regs)
5155+{
5156+
5157+#ifdef CONFIG_PAX_EMUPLT
5158+ int err;
5159+
5160+ do { /* PaX: patched PLT emulation #1 */
5161+ unsigned int sethi1, sethi2, jmpl;
5162+
5163+ err = get_user(sethi1, (unsigned int *)regs->pc);
5164+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
5165+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
5166+
5167+ if (err)
5168+ break;
5169+
5170+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5171+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
5172+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
5173+ {
5174+ unsigned int addr;
5175+
5176+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5177+ addr = regs->u_regs[UREG_G1];
5178+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5179+ regs->pc = addr;
5180+ regs->npc = addr+4;
5181+ return 2;
5182+ }
5183+ } while (0);
5184+
5185+ { /* PaX: patched PLT emulation #2 */
5186+ unsigned int ba;
5187+
5188+ err = get_user(ba, (unsigned int *)regs->pc);
5189+
5190+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5191+ unsigned int addr;
5192+
5193+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5194+ regs->pc = addr;
5195+ regs->npc = addr+4;
5196+ return 2;
5197+ }
5198+ }
5199+
5200+ do { /* PaX: patched PLT emulation #3 */
5201+ unsigned int sethi, jmpl, nop;
5202+
5203+ err = get_user(sethi, (unsigned int *)regs->pc);
5204+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
5205+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
5206+
5207+ if (err)
5208+ break;
5209+
5210+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5211+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5212+ nop == 0x01000000U)
5213+ {
5214+ unsigned int addr;
5215+
5216+ addr = (sethi & 0x003FFFFFU) << 10;
5217+ regs->u_regs[UREG_G1] = addr;
5218+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5219+ regs->pc = addr;
5220+ regs->npc = addr+4;
5221+ return 2;
5222+ }
5223+ } while (0);
5224+
5225+ do { /* PaX: unpatched PLT emulation step 1 */
5226+ unsigned int sethi, ba, nop;
5227+
5228+ err = get_user(sethi, (unsigned int *)regs->pc);
5229+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
5230+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
5231+
5232+ if (err)
5233+ break;
5234+
5235+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5236+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5237+ nop == 0x01000000U)
5238+ {
5239+ unsigned int addr, save, call;
5240+
5241+ if ((ba & 0xFFC00000U) == 0x30800000U)
5242+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5243+ else
5244+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
5245+
5246+ err = get_user(save, (unsigned int *)addr);
5247+ err |= get_user(call, (unsigned int *)(addr+4));
5248+ err |= get_user(nop, (unsigned int *)(addr+8));
5249+ if (err)
5250+ break;
5251+
5252+#ifdef CONFIG_PAX_DLRESOLVE
5253+ if (save == 0x9DE3BFA8U &&
5254+ (call & 0xC0000000U) == 0x40000000U &&
5255+ nop == 0x01000000U)
5256+ {
5257+ struct vm_area_struct *vma;
5258+ unsigned long call_dl_resolve;
5259+
5260+ down_read(&current->mm->mmap_sem);
5261+ call_dl_resolve = current->mm->call_dl_resolve;
5262+ up_read(&current->mm->mmap_sem);
5263+ if (likely(call_dl_resolve))
5264+ goto emulate;
5265+
5266+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5267+
5268+ down_write(&current->mm->mmap_sem);
5269+ if (current->mm->call_dl_resolve) {
5270+ call_dl_resolve = current->mm->call_dl_resolve;
5271+ up_write(&current->mm->mmap_sem);
5272+ if (vma)
5273+ kmem_cache_free(vm_area_cachep, vma);
5274+ goto emulate;
5275+ }
5276+
5277+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5278+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5279+ up_write(&current->mm->mmap_sem);
5280+ if (vma)
5281+ kmem_cache_free(vm_area_cachep, vma);
5282+ return 1;
5283+ }
5284+
5285+ if (pax_insert_vma(vma, call_dl_resolve)) {
5286+ up_write(&current->mm->mmap_sem);
5287+ kmem_cache_free(vm_area_cachep, vma);
5288+ return 1;
5289+ }
5290+
5291+ current->mm->call_dl_resolve = call_dl_resolve;
5292+ up_write(&current->mm->mmap_sem);
5293+
5294+emulate:
5295+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5296+ regs->pc = call_dl_resolve;
5297+ regs->npc = addr+4;
5298+ return 3;
5299+ }
5300+#endif
5301+
5302+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5303+ if ((save & 0xFFC00000U) == 0x05000000U &&
5304+ (call & 0xFFFFE000U) == 0x85C0A000U &&
5305+ nop == 0x01000000U)
5306+ {
5307+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5308+ regs->u_regs[UREG_G2] = addr + 4;
5309+ addr = (save & 0x003FFFFFU) << 10;
5310+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5311+ regs->pc = addr;
5312+ regs->npc = addr+4;
5313+ return 3;
5314+ }
5315+ }
5316+ } while (0);
5317+
5318+ do { /* PaX: unpatched PLT emulation step 2 */
5319+ unsigned int save, call, nop;
5320+
5321+ err = get_user(save, (unsigned int *)(regs->pc-4));
5322+ err |= get_user(call, (unsigned int *)regs->pc);
5323+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
5324+ if (err)
5325+ break;
5326+
5327+ if (save == 0x9DE3BFA8U &&
5328+ (call & 0xC0000000U) == 0x40000000U &&
5329+ nop == 0x01000000U)
5330+ {
5331+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
5332+
5333+ regs->u_regs[UREG_RETPC] = regs->pc;
5334+ regs->pc = dl_resolve;
5335+ regs->npc = dl_resolve+4;
5336+ return 3;
5337+ }
5338+ } while (0);
5339+#endif
5340+
5341+ return 1;
5342+}
5343+
5344+void pax_report_insns(void *pc, void *sp)
5345+{
5346+ unsigned long i;
5347+
5348+ printk(KERN_ERR "PAX: bytes at PC: ");
5349+ for (i = 0; i < 8; i++) {
5350+ unsigned int c;
5351+ if (get_user(c, (unsigned int *)pc+i))
5352+ printk(KERN_CONT "???????? ");
5353+ else
5354+ printk(KERN_CONT "%08x ", c);
5355+ }
5356+ printk("\n");
5357+}
5358+#endif
5359+
5360 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
5361 unsigned long address)
5362 {
5363@@ -231,6 +495,24 @@ good_area:
5364 if(!(vma->vm_flags & VM_WRITE))
5365 goto bad_area;
5366 } else {
5367+
5368+#ifdef CONFIG_PAX_PAGEEXEC
5369+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
5370+ up_read(&mm->mmap_sem);
5371+ switch (pax_handle_fetch_fault(regs)) {
5372+
5373+#ifdef CONFIG_PAX_EMUPLT
5374+ case 2:
5375+ case 3:
5376+ return;
5377+#endif
5378+
5379+ }
5380+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
5381+ do_group_exit(SIGKILL);
5382+ }
5383+#endif
5384+
5385 /* Allow reads even for write-only mappings */
5386 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
5387 goto bad_area;
5388diff -urNp linux-2.6.32.42/arch/sparc/mm/fault_64.c linux-2.6.32.42/arch/sparc/mm/fault_64.c
5389--- linux-2.6.32.42/arch/sparc/mm/fault_64.c 2011-03-27 14:31:47.000000000 -0400
5390+++ linux-2.6.32.42/arch/sparc/mm/fault_64.c 2011-04-17 15:56:46.000000000 -0400
5391@@ -20,6 +20,9 @@
5392 #include <linux/kprobes.h>
5393 #include <linux/kdebug.h>
5394 #include <linux/percpu.h>
5395+#include <linux/slab.h>
5396+#include <linux/pagemap.h>
5397+#include <linux/compiler.h>
5398
5399 #include <asm/page.h>
5400 #include <asm/pgtable.h>
5401@@ -78,7 +81,7 @@ static void bad_kernel_pc(struct pt_regs
5402 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
5403 regs->tpc);
5404 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
5405- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
5406+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
5407 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
5408 dump_stack();
5409 unhandled_fault(regs->tpc, current, regs);
5410@@ -249,6 +252,456 @@ static void noinline bogus_32bit_fault_a
5411 show_regs(regs);
5412 }
5413
5414+#ifdef CONFIG_PAX_PAGEEXEC
5415+#ifdef CONFIG_PAX_DLRESOLVE
5416+static void pax_emuplt_close(struct vm_area_struct *vma)
5417+{
5418+ vma->vm_mm->call_dl_resolve = 0UL;
5419+}
5420+
5421+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5422+{
5423+ unsigned int *kaddr;
5424+
5425+ vmf->page = alloc_page(GFP_HIGHUSER);
5426+ if (!vmf->page)
5427+ return VM_FAULT_OOM;
5428+
5429+ kaddr = kmap(vmf->page);
5430+ memset(kaddr, 0, PAGE_SIZE);
5431+ kaddr[0] = 0x9DE3BFA8U; /* save */
5432+ flush_dcache_page(vmf->page);
5433+ kunmap(vmf->page);
5434+ return VM_FAULT_MAJOR;
5435+}
5436+
5437+static const struct vm_operations_struct pax_vm_ops = {
5438+ .close = pax_emuplt_close,
5439+ .fault = pax_emuplt_fault
5440+};
5441+
5442+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5443+{
5444+ int ret;
5445+
5446+ vma->vm_mm = current->mm;
5447+ vma->vm_start = addr;
5448+ vma->vm_end = addr + PAGE_SIZE;
5449+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5450+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5451+ vma->vm_ops = &pax_vm_ops;
5452+
5453+ ret = insert_vm_struct(current->mm, vma);
5454+ if (ret)
5455+ return ret;
5456+
5457+ ++current->mm->total_vm;
5458+ return 0;
5459+}
5460+#endif
5461+
5462+/*
5463+ * PaX: decide what to do with offenders (regs->tpc = fault address)
5464+ *
5465+ * returns 1 when task should be killed
5466+ * 2 when patched PLT trampoline was detected
5467+ * 3 when unpatched PLT trampoline was detected
5468+ */
5469+static int pax_handle_fetch_fault(struct pt_regs *regs)
5470+{
5471+
5472+#ifdef CONFIG_PAX_EMUPLT
5473+ int err;
5474+
5475+ do { /* PaX: patched PLT emulation #1 */
5476+ unsigned int sethi1, sethi2, jmpl;
5477+
5478+ err = get_user(sethi1, (unsigned int *)regs->tpc);
5479+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
5480+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
5481+
5482+ if (err)
5483+ break;
5484+
5485+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5486+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
5487+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
5488+ {
5489+ unsigned long addr;
5490+
5491+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5492+ addr = regs->u_regs[UREG_G1];
5493+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5494+
5495+ if (test_thread_flag(TIF_32BIT))
5496+ addr &= 0xFFFFFFFFUL;
5497+
5498+ regs->tpc = addr;
5499+ regs->tnpc = addr+4;
5500+ return 2;
5501+ }
5502+ } while (0);
5503+
5504+ { /* PaX: patched PLT emulation #2 */
5505+ unsigned int ba;
5506+
5507+ err = get_user(ba, (unsigned int *)regs->tpc);
5508+
5509+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5510+ unsigned long addr;
5511+
5512+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5513+
5514+ if (test_thread_flag(TIF_32BIT))
5515+ addr &= 0xFFFFFFFFUL;
5516+
5517+ regs->tpc = addr;
5518+ regs->tnpc = addr+4;
5519+ return 2;
5520+ }
5521+ }
5522+
5523+ do { /* PaX: patched PLT emulation #3 */
5524+ unsigned int sethi, jmpl, nop;
5525+
5526+ err = get_user(sethi, (unsigned int *)regs->tpc);
5527+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
5528+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5529+
5530+ if (err)
5531+ break;
5532+
5533+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5534+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5535+ nop == 0x01000000U)
5536+ {
5537+ unsigned long addr;
5538+
5539+ addr = (sethi & 0x003FFFFFU) << 10;
5540+ regs->u_regs[UREG_G1] = addr;
5541+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5542+
5543+ if (test_thread_flag(TIF_32BIT))
5544+ addr &= 0xFFFFFFFFUL;
5545+
5546+ regs->tpc = addr;
5547+ regs->tnpc = addr+4;
5548+ return 2;
5549+ }
5550+ } while (0);
5551+
5552+ do { /* PaX: patched PLT emulation #4 */
5553+ unsigned int sethi, mov1, call, mov2;
5554+
5555+ err = get_user(sethi, (unsigned int *)regs->tpc);
5556+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
5557+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
5558+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
5559+
5560+ if (err)
5561+ break;
5562+
5563+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5564+ mov1 == 0x8210000FU &&
5565+ (call & 0xC0000000U) == 0x40000000U &&
5566+ mov2 == 0x9E100001U)
5567+ {
5568+ unsigned long addr;
5569+
5570+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
5571+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5572+
5573+ if (test_thread_flag(TIF_32BIT))
5574+ addr &= 0xFFFFFFFFUL;
5575+
5576+ regs->tpc = addr;
5577+ regs->tnpc = addr+4;
5578+ return 2;
5579+ }
5580+ } while (0);
5581+
5582+ do { /* PaX: patched PLT emulation #5 */
5583+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
5584+
5585+ err = get_user(sethi, (unsigned int *)regs->tpc);
5586+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5587+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5588+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
5589+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
5590+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
5591+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
5592+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
5593+
5594+ if (err)
5595+ break;
5596+
5597+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5598+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
5599+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5600+ (or1 & 0xFFFFE000U) == 0x82106000U &&
5601+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
5602+ sllx == 0x83287020U &&
5603+ jmpl == 0x81C04005U &&
5604+ nop == 0x01000000U)
5605+ {
5606+ unsigned long addr;
5607+
5608+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5609+ regs->u_regs[UREG_G1] <<= 32;
5610+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5611+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5612+ regs->tpc = addr;
5613+ regs->tnpc = addr+4;
5614+ return 2;
5615+ }
5616+ } while (0);
5617+
5618+ do { /* PaX: patched PLT emulation #6 */
5619+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
5620+
5621+ err = get_user(sethi, (unsigned int *)regs->tpc);
5622+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5623+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5624+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
5625+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
5626+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
5627+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
5628+
5629+ if (err)
5630+ break;
5631+
5632+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5633+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
5634+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5635+ sllx == 0x83287020U &&
5636+ (or & 0xFFFFE000U) == 0x8A116000U &&
5637+ jmpl == 0x81C04005U &&
5638+ nop == 0x01000000U)
5639+ {
5640+ unsigned long addr;
5641+
5642+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
5643+ regs->u_regs[UREG_G1] <<= 32;
5644+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
5645+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5646+ regs->tpc = addr;
5647+ regs->tnpc = addr+4;
5648+ return 2;
5649+ }
5650+ } while (0);
5651+
5652+ do { /* PaX: unpatched PLT emulation step 1 */
5653+ unsigned int sethi, ba, nop;
5654+
5655+ err = get_user(sethi, (unsigned int *)regs->tpc);
5656+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5657+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5658+
5659+ if (err)
5660+ break;
5661+
5662+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5663+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5664+ nop == 0x01000000U)
5665+ {
5666+ unsigned long addr;
5667+ unsigned int save, call;
5668+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
5669+
5670+ if ((ba & 0xFFC00000U) == 0x30800000U)
5671+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5672+ else
5673+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5674+
5675+ if (test_thread_flag(TIF_32BIT))
5676+ addr &= 0xFFFFFFFFUL;
5677+
5678+ err = get_user(save, (unsigned int *)addr);
5679+ err |= get_user(call, (unsigned int *)(addr+4));
5680+ err |= get_user(nop, (unsigned int *)(addr+8));
5681+ if (err)
5682+ break;
5683+
5684+#ifdef CONFIG_PAX_DLRESOLVE
5685+ if (save == 0x9DE3BFA8U &&
5686+ (call & 0xC0000000U) == 0x40000000U &&
5687+ nop == 0x01000000U)
5688+ {
5689+ struct vm_area_struct *vma;
5690+ unsigned long call_dl_resolve;
5691+
5692+ down_read(&current->mm->mmap_sem);
5693+ call_dl_resolve = current->mm->call_dl_resolve;
5694+ up_read(&current->mm->mmap_sem);
5695+ if (likely(call_dl_resolve))
5696+ goto emulate;
5697+
5698+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5699+
5700+ down_write(&current->mm->mmap_sem);
5701+ if (current->mm->call_dl_resolve) {
5702+ call_dl_resolve = current->mm->call_dl_resolve;
5703+ up_write(&current->mm->mmap_sem);
5704+ if (vma)
5705+ kmem_cache_free(vm_area_cachep, vma);
5706+ goto emulate;
5707+ }
5708+
5709+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5710+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5711+ up_write(&current->mm->mmap_sem);
5712+ if (vma)
5713+ kmem_cache_free(vm_area_cachep, vma);
5714+ return 1;
5715+ }
5716+
5717+ if (pax_insert_vma(vma, call_dl_resolve)) {
5718+ up_write(&current->mm->mmap_sem);
5719+ kmem_cache_free(vm_area_cachep, vma);
5720+ return 1;
5721+ }
5722+
5723+ current->mm->call_dl_resolve = call_dl_resolve;
5724+ up_write(&current->mm->mmap_sem);
5725+
5726+emulate:
5727+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5728+ regs->tpc = call_dl_resolve;
5729+ regs->tnpc = addr+4;
5730+ return 3;
5731+ }
5732+#endif
5733+
5734+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5735+ if ((save & 0xFFC00000U) == 0x05000000U &&
5736+ (call & 0xFFFFE000U) == 0x85C0A000U &&
5737+ nop == 0x01000000U)
5738+ {
5739+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5740+ regs->u_regs[UREG_G2] = addr + 4;
5741+ addr = (save & 0x003FFFFFU) << 10;
5742+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5743+
5744+ if (test_thread_flag(TIF_32BIT))
5745+ addr &= 0xFFFFFFFFUL;
5746+
5747+ regs->tpc = addr;
5748+ regs->tnpc = addr+4;
5749+ return 3;
5750+ }
5751+
5752+ /* PaX: 64-bit PLT stub */
5753+ err = get_user(sethi1, (unsigned int *)addr);
5754+ err |= get_user(sethi2, (unsigned int *)(addr+4));
5755+ err |= get_user(or1, (unsigned int *)(addr+8));
5756+ err |= get_user(or2, (unsigned int *)(addr+12));
5757+ err |= get_user(sllx, (unsigned int *)(addr+16));
5758+ err |= get_user(add, (unsigned int *)(addr+20));
5759+ err |= get_user(jmpl, (unsigned int *)(addr+24));
5760+ err |= get_user(nop, (unsigned int *)(addr+28));
5761+ if (err)
5762+ break;
5763+
5764+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
5765+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5766+ (or1 & 0xFFFFE000U) == 0x88112000U &&
5767+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
5768+ sllx == 0x89293020U &&
5769+ add == 0x8A010005U &&
5770+ jmpl == 0x89C14000U &&
5771+ nop == 0x01000000U)
5772+ {
5773+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5774+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5775+ regs->u_regs[UREG_G4] <<= 32;
5776+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5777+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
5778+ regs->u_regs[UREG_G4] = addr + 24;
5779+ addr = regs->u_regs[UREG_G5];
5780+ regs->tpc = addr;
5781+ regs->tnpc = addr+4;
5782+ return 3;
5783+ }
5784+ }
5785+ } while (0);
5786+
5787+#ifdef CONFIG_PAX_DLRESOLVE
5788+ do { /* PaX: unpatched PLT emulation step 2 */
5789+ unsigned int save, call, nop;
5790+
5791+ err = get_user(save, (unsigned int *)(regs->tpc-4));
5792+ err |= get_user(call, (unsigned int *)regs->tpc);
5793+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
5794+ if (err)
5795+ break;
5796+
5797+ if (save == 0x9DE3BFA8U &&
5798+ (call & 0xC0000000U) == 0x40000000U &&
5799+ nop == 0x01000000U)
5800+ {
5801+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5802+
5803+ if (test_thread_flag(TIF_32BIT))
5804+ dl_resolve &= 0xFFFFFFFFUL;
5805+
5806+ regs->u_regs[UREG_RETPC] = regs->tpc;
5807+ regs->tpc = dl_resolve;
5808+ regs->tnpc = dl_resolve+4;
5809+ return 3;
5810+ }
5811+ } while (0);
5812+#endif
5813+
5814+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
5815+ unsigned int sethi, ba, nop;
5816+
5817+ err = get_user(sethi, (unsigned int *)regs->tpc);
5818+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5819+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5820+
5821+ if (err)
5822+ break;
5823+
5824+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5825+ (ba & 0xFFF00000U) == 0x30600000U &&
5826+ nop == 0x01000000U)
5827+ {
5828+ unsigned long addr;
5829+
5830+ addr = (sethi & 0x003FFFFFU) << 10;
5831+ regs->u_regs[UREG_G1] = addr;
5832+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5833+
5834+ if (test_thread_flag(TIF_32BIT))
5835+ addr &= 0xFFFFFFFFUL;
5836+
5837+ regs->tpc = addr;
5838+ regs->tnpc = addr+4;
5839+ return 2;
5840+ }
5841+ } while (0);
5842+
5843+#endif
5844+
5845+ return 1;
5846+}
5847+
5848+void pax_report_insns(void *pc, void *sp)
5849+{
5850+ unsigned long i;
5851+
5852+ printk(KERN_ERR "PAX: bytes at PC: ");
5853+ for (i = 0; i < 8; i++) {
5854+ unsigned int c;
5855+ if (get_user(c, (unsigned int *)pc+i))
5856+ printk(KERN_CONT "???????? ");
5857+ else
5858+ printk(KERN_CONT "%08x ", c);
5859+ }
5860+ printk("\n");
5861+}
5862+#endif
5863+
5864 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5865 {
5866 struct mm_struct *mm = current->mm;
5867@@ -315,6 +768,29 @@ asmlinkage void __kprobes do_sparc64_fau
5868 if (!vma)
5869 goto bad_area;
5870
5871+#ifdef CONFIG_PAX_PAGEEXEC
5872+ /* PaX: detect ITLB misses on non-exec pages */
5873+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
5874+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
5875+ {
5876+ if (address != regs->tpc)
5877+ goto good_area;
5878+
5879+ up_read(&mm->mmap_sem);
5880+ switch (pax_handle_fetch_fault(regs)) {
5881+
5882+#ifdef CONFIG_PAX_EMUPLT
5883+ case 2:
5884+ case 3:
5885+ return;
5886+#endif
5887+
5888+ }
5889+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
5890+ do_group_exit(SIGKILL);
5891+ }
5892+#endif
5893+
5894 /* Pure DTLB misses do not tell us whether the fault causing
5895 * load/store/atomic was a write or not, it only says that there
5896 * was no match. So in such a case we (carefully) read the
5897diff -urNp linux-2.6.32.42/arch/sparc/mm/hugetlbpage.c linux-2.6.32.42/arch/sparc/mm/hugetlbpage.c
5898--- linux-2.6.32.42/arch/sparc/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
5899+++ linux-2.6.32.42/arch/sparc/mm/hugetlbpage.c 2011-04-17 15:56:46.000000000 -0400
5900@@ -69,7 +69,7 @@ full_search:
5901 }
5902 return -ENOMEM;
5903 }
5904- if (likely(!vma || addr + len <= vma->vm_start)) {
5905+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5906 /*
5907 * Remember the place where we stopped the search:
5908 */
5909@@ -108,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct
5910 /* make sure it can fit in the remaining address space */
5911 if (likely(addr > len)) {
5912 vma = find_vma(mm, addr-len);
5913- if (!vma || addr <= vma->vm_start) {
5914+ if (check_heap_stack_gap(vma, addr - len, len)) {
5915 /* remember the address as a hint for next time */
5916 return (mm->free_area_cache = addr-len);
5917 }
5918@@ -117,16 +117,17 @@ hugetlb_get_unmapped_area_topdown(struct
5919 if (unlikely(mm->mmap_base < len))
5920 goto bottomup;
5921
5922- addr = (mm->mmap_base-len) & HPAGE_MASK;
5923+ addr = mm->mmap_base - len;
5924
5925 do {
5926+ addr &= HPAGE_MASK;
5927 /*
5928 * Lookup failure means no vma is above this address,
5929 * else if new region fits below vma->vm_start,
5930 * return with success:
5931 */
5932 vma = find_vma(mm, addr);
5933- if (likely(!vma || addr+len <= vma->vm_start)) {
5934+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5935 /* remember the address as a hint for next time */
5936 return (mm->free_area_cache = addr);
5937 }
5938@@ -136,8 +137,8 @@ hugetlb_get_unmapped_area_topdown(struct
5939 mm->cached_hole_size = vma->vm_start - addr;
5940
5941 /* try just below the current vma->vm_start */
5942- addr = (vma->vm_start-len) & HPAGE_MASK;
5943- } while (likely(len < vma->vm_start));
5944+ addr = skip_heap_stack_gap(vma, len);
5945+ } while (!IS_ERR_VALUE(addr));
5946
5947 bottomup:
5948 /*
5949@@ -183,8 +184,7 @@ hugetlb_get_unmapped_area(struct file *f
5950 if (addr) {
5951 addr = ALIGN(addr, HPAGE_SIZE);
5952 vma = find_vma(mm, addr);
5953- if (task_size - len >= addr &&
5954- (!vma || addr + len <= vma->vm_start))
5955+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5956 return addr;
5957 }
5958 if (mm->get_unmapped_area == arch_get_unmapped_area)
5959diff -urNp linux-2.6.32.42/arch/sparc/mm/init_32.c linux-2.6.32.42/arch/sparc/mm/init_32.c
5960--- linux-2.6.32.42/arch/sparc/mm/init_32.c 2011-03-27 14:31:47.000000000 -0400
5961+++ linux-2.6.32.42/arch/sparc/mm/init_32.c 2011-04-17 15:56:46.000000000 -0400
5962@@ -317,6 +317,9 @@ extern void device_scan(void);
5963 pgprot_t PAGE_SHARED __read_mostly;
5964 EXPORT_SYMBOL(PAGE_SHARED);
5965
5966+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
5967+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
5968+
5969 void __init paging_init(void)
5970 {
5971 switch(sparc_cpu_model) {
5972@@ -345,17 +348,17 @@ void __init paging_init(void)
5973
5974 /* Initialize the protection map with non-constant, MMU dependent values. */
5975 protection_map[0] = PAGE_NONE;
5976- protection_map[1] = PAGE_READONLY;
5977- protection_map[2] = PAGE_COPY;
5978- protection_map[3] = PAGE_COPY;
5979+ protection_map[1] = PAGE_READONLY_NOEXEC;
5980+ protection_map[2] = PAGE_COPY_NOEXEC;
5981+ protection_map[3] = PAGE_COPY_NOEXEC;
5982 protection_map[4] = PAGE_READONLY;
5983 protection_map[5] = PAGE_READONLY;
5984 protection_map[6] = PAGE_COPY;
5985 protection_map[7] = PAGE_COPY;
5986 protection_map[8] = PAGE_NONE;
5987- protection_map[9] = PAGE_READONLY;
5988- protection_map[10] = PAGE_SHARED;
5989- protection_map[11] = PAGE_SHARED;
5990+ protection_map[9] = PAGE_READONLY_NOEXEC;
5991+ protection_map[10] = PAGE_SHARED_NOEXEC;
5992+ protection_map[11] = PAGE_SHARED_NOEXEC;
5993 protection_map[12] = PAGE_READONLY;
5994 protection_map[13] = PAGE_READONLY;
5995 protection_map[14] = PAGE_SHARED;
5996diff -urNp linux-2.6.32.42/arch/sparc/mm/Makefile linux-2.6.32.42/arch/sparc/mm/Makefile
5997--- linux-2.6.32.42/arch/sparc/mm/Makefile 2011-03-27 14:31:47.000000000 -0400
5998+++ linux-2.6.32.42/arch/sparc/mm/Makefile 2011-04-17 15:56:46.000000000 -0400
5999@@ -2,7 +2,7 @@
6000 #
6001
6002 asflags-y := -ansi
6003-ccflags-y := -Werror
6004+#ccflags-y := -Werror
6005
6006 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
6007 obj-y += fault_$(BITS).o
6008diff -urNp linux-2.6.32.42/arch/sparc/mm/srmmu.c linux-2.6.32.42/arch/sparc/mm/srmmu.c
6009--- linux-2.6.32.42/arch/sparc/mm/srmmu.c 2011-03-27 14:31:47.000000000 -0400
6010+++ linux-2.6.32.42/arch/sparc/mm/srmmu.c 2011-04-17 15:56:46.000000000 -0400
6011@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
6012 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
6013 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
6014 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
6015+
6016+#ifdef CONFIG_PAX_PAGEEXEC
6017+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
6018+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
6019+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
6020+#endif
6021+
6022 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
6023 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
6024
6025diff -urNp linux-2.6.32.42/arch/um/include/asm/kmap_types.h linux-2.6.32.42/arch/um/include/asm/kmap_types.h
6026--- linux-2.6.32.42/arch/um/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
6027+++ linux-2.6.32.42/arch/um/include/asm/kmap_types.h 2011-04-17 15:56:46.000000000 -0400
6028@@ -23,6 +23,7 @@ enum km_type {
6029 KM_IRQ1,
6030 KM_SOFTIRQ0,
6031 KM_SOFTIRQ1,
6032+ KM_CLEARPAGE,
6033 KM_TYPE_NR
6034 };
6035
6036diff -urNp linux-2.6.32.42/arch/um/include/asm/page.h linux-2.6.32.42/arch/um/include/asm/page.h
6037--- linux-2.6.32.42/arch/um/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
6038+++ linux-2.6.32.42/arch/um/include/asm/page.h 2011-04-17 15:56:46.000000000 -0400
6039@@ -14,6 +14,9 @@
6040 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
6041 #define PAGE_MASK (~(PAGE_SIZE-1))
6042
6043+#define ktla_ktva(addr) (addr)
6044+#define ktva_ktla(addr) (addr)
6045+
6046 #ifndef __ASSEMBLY__
6047
6048 struct page;
6049diff -urNp linux-2.6.32.42/arch/um/kernel/process.c linux-2.6.32.42/arch/um/kernel/process.c
6050--- linux-2.6.32.42/arch/um/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
6051+++ linux-2.6.32.42/arch/um/kernel/process.c 2011-04-17 15:56:46.000000000 -0400
6052@@ -393,22 +393,6 @@ int singlestepping(void * t)
6053 return 2;
6054 }
6055
6056-/*
6057- * Only x86 and x86_64 have an arch_align_stack().
6058- * All other arches have "#define arch_align_stack(x) (x)"
6059- * in their asm/system.h
6060- * As this is included in UML from asm-um/system-generic.h,
6061- * we can use it to behave as the subarch does.
6062- */
6063-#ifndef arch_align_stack
6064-unsigned long arch_align_stack(unsigned long sp)
6065-{
6066- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6067- sp -= get_random_int() % 8192;
6068- return sp & ~0xf;
6069-}
6070-#endif
6071-
6072 unsigned long get_wchan(struct task_struct *p)
6073 {
6074 unsigned long stack_page, sp, ip;
6075diff -urNp linux-2.6.32.42/arch/um/sys-i386/syscalls.c linux-2.6.32.42/arch/um/sys-i386/syscalls.c
6076--- linux-2.6.32.42/arch/um/sys-i386/syscalls.c 2011-03-27 14:31:47.000000000 -0400
6077+++ linux-2.6.32.42/arch/um/sys-i386/syscalls.c 2011-04-17 15:56:46.000000000 -0400
6078@@ -11,6 +11,21 @@
6079 #include "asm/uaccess.h"
6080 #include "asm/unistd.h"
6081
6082+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
6083+{
6084+ unsigned long pax_task_size = TASK_SIZE;
6085+
6086+#ifdef CONFIG_PAX_SEGMEXEC
6087+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
6088+ pax_task_size = SEGMEXEC_TASK_SIZE;
6089+#endif
6090+
6091+ if (len > pax_task_size || addr > pax_task_size - len)
6092+ return -EINVAL;
6093+
6094+ return 0;
6095+}
6096+
6097 /*
6098 * Perform the select(nd, in, out, ex, tv) and mmap() system
6099 * calls. Linux/i386 didn't use to be able to handle more than
6100diff -urNp linux-2.6.32.42/arch/x86/boot/bitops.h linux-2.6.32.42/arch/x86/boot/bitops.h
6101--- linux-2.6.32.42/arch/x86/boot/bitops.h 2011-03-27 14:31:47.000000000 -0400
6102+++ linux-2.6.32.42/arch/x86/boot/bitops.h 2011-04-17 15:56:46.000000000 -0400
6103@@ -26,7 +26,7 @@ static inline int variable_test_bit(int
6104 u8 v;
6105 const u32 *p = (const u32 *)addr;
6106
6107- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
6108+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
6109 return v;
6110 }
6111
6112@@ -37,7 +37,7 @@ static inline int variable_test_bit(int
6113
6114 static inline void set_bit(int nr, void *addr)
6115 {
6116- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
6117+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
6118 }
6119
6120 #endif /* BOOT_BITOPS_H */
6121diff -urNp linux-2.6.32.42/arch/x86/boot/boot.h linux-2.6.32.42/arch/x86/boot/boot.h
6122--- linux-2.6.32.42/arch/x86/boot/boot.h 2011-03-27 14:31:47.000000000 -0400
6123+++ linux-2.6.32.42/arch/x86/boot/boot.h 2011-04-17 15:56:46.000000000 -0400
6124@@ -82,7 +82,7 @@ static inline void io_delay(void)
6125 static inline u16 ds(void)
6126 {
6127 u16 seg;
6128- asm("movw %%ds,%0" : "=rm" (seg));
6129+ asm volatile("movw %%ds,%0" : "=rm" (seg));
6130 return seg;
6131 }
6132
6133@@ -178,7 +178,7 @@ static inline void wrgs32(u32 v, addr_t
6134 static inline int memcmp(const void *s1, const void *s2, size_t len)
6135 {
6136 u8 diff;
6137- asm("repe; cmpsb; setnz %0"
6138+ asm volatile("repe; cmpsb; setnz %0"
6139 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
6140 return diff;
6141 }
6142diff -urNp linux-2.6.32.42/arch/x86/boot/compressed/head_32.S linux-2.6.32.42/arch/x86/boot/compressed/head_32.S
6143--- linux-2.6.32.42/arch/x86/boot/compressed/head_32.S 2011-03-27 14:31:47.000000000 -0400
6144+++ linux-2.6.32.42/arch/x86/boot/compressed/head_32.S 2011-04-17 15:56:46.000000000 -0400
6145@@ -76,7 +76,7 @@ ENTRY(startup_32)
6146 notl %eax
6147 andl %eax, %ebx
6148 #else
6149- movl $LOAD_PHYSICAL_ADDR, %ebx
6150+ movl $____LOAD_PHYSICAL_ADDR, %ebx
6151 #endif
6152
6153 /* Target address to relocate to for decompression */
6154@@ -149,7 +149,7 @@ relocated:
6155 * and where it was actually loaded.
6156 */
6157 movl %ebp, %ebx
6158- subl $LOAD_PHYSICAL_ADDR, %ebx
6159+ subl $____LOAD_PHYSICAL_ADDR, %ebx
6160 jz 2f /* Nothing to be done if loaded at compiled addr. */
6161 /*
6162 * Process relocations.
6163@@ -157,8 +157,7 @@ relocated:
6164
6165 1: subl $4, %edi
6166 movl (%edi), %ecx
6167- testl %ecx, %ecx
6168- jz 2f
6169+ jecxz 2f
6170 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
6171 jmp 1b
6172 2:
6173diff -urNp linux-2.6.32.42/arch/x86/boot/compressed/head_64.S linux-2.6.32.42/arch/x86/boot/compressed/head_64.S
6174--- linux-2.6.32.42/arch/x86/boot/compressed/head_64.S 2011-03-27 14:31:47.000000000 -0400
6175+++ linux-2.6.32.42/arch/x86/boot/compressed/head_64.S 2011-07-01 18:53:00.000000000 -0400
6176@@ -91,7 +91,7 @@ ENTRY(startup_32)
6177 notl %eax
6178 andl %eax, %ebx
6179 #else
6180- movl $LOAD_PHYSICAL_ADDR, %ebx
6181+ movl $____LOAD_PHYSICAL_ADDR, %ebx
6182 #endif
6183
6184 /* Target address to relocate to for decompression */
6185@@ -183,7 +183,7 @@ no_longmode:
6186 hlt
6187 jmp 1b
6188
6189-#include "../../kernel/verify_cpu_64.S"
6190+#include "../../kernel/verify_cpu.S"
6191
6192 /*
6193 * Be careful here startup_64 needs to be at a predictable
6194@@ -234,7 +234,7 @@ ENTRY(startup_64)
6195 notq %rax
6196 andq %rax, %rbp
6197 #else
6198- movq $LOAD_PHYSICAL_ADDR, %rbp
6199+ movq $____LOAD_PHYSICAL_ADDR, %rbp
6200 #endif
6201
6202 /* Target address to relocate to for decompression */
6203diff -urNp linux-2.6.32.42/arch/x86/boot/compressed/misc.c linux-2.6.32.42/arch/x86/boot/compressed/misc.c
6204--- linux-2.6.32.42/arch/x86/boot/compressed/misc.c 2011-03-27 14:31:47.000000000 -0400
6205+++ linux-2.6.32.42/arch/x86/boot/compressed/misc.c 2011-04-17 15:56:46.000000000 -0400
6206@@ -288,7 +288,7 @@ static void parse_elf(void *output)
6207 case PT_LOAD:
6208 #ifdef CONFIG_RELOCATABLE
6209 dest = output;
6210- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
6211+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
6212 #else
6213 dest = (void *)(phdr->p_paddr);
6214 #endif
6215@@ -335,7 +335,7 @@ asmlinkage void decompress_kernel(void *
6216 error("Destination address too large");
6217 #endif
6218 #ifndef CONFIG_RELOCATABLE
6219- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
6220+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
6221 error("Wrong destination address");
6222 #endif
6223
6224diff -urNp linux-2.6.32.42/arch/x86/boot/compressed/mkpiggy.c linux-2.6.32.42/arch/x86/boot/compressed/mkpiggy.c
6225--- linux-2.6.32.42/arch/x86/boot/compressed/mkpiggy.c 2011-03-27 14:31:47.000000000 -0400
6226+++ linux-2.6.32.42/arch/x86/boot/compressed/mkpiggy.c 2011-04-17 15:56:46.000000000 -0400
6227@@ -74,7 +74,7 @@ int main(int argc, char *argv[])
6228
6229 offs = (olen > ilen) ? olen - ilen : 0;
6230 offs += olen >> 12; /* Add 8 bytes for each 32K block */
6231- offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */
6232+ offs += 64*1024; /* Add 64K bytes slack */
6233 offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
6234
6235 printf(".section \".rodata.compressed\",\"a\",@progbits\n");
6236diff -urNp linux-2.6.32.42/arch/x86/boot/compressed/relocs.c linux-2.6.32.42/arch/x86/boot/compressed/relocs.c
6237--- linux-2.6.32.42/arch/x86/boot/compressed/relocs.c 2011-03-27 14:31:47.000000000 -0400
6238+++ linux-2.6.32.42/arch/x86/boot/compressed/relocs.c 2011-04-17 15:56:46.000000000 -0400
6239@@ -10,8 +10,11 @@
6240 #define USE_BSD
6241 #include <endian.h>
6242
6243+#include "../../../../include/linux/autoconf.h"
6244+
6245 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
6246 static Elf32_Ehdr ehdr;
6247+static Elf32_Phdr *phdr;
6248 static unsigned long reloc_count, reloc_idx;
6249 static unsigned long *relocs;
6250
6251@@ -37,7 +40,7 @@ static const char* safe_abs_relocs[] = {
6252
6253 static int is_safe_abs_reloc(const char* sym_name)
6254 {
6255- int i;
6256+ unsigned int i;
6257
6258 for (i = 0; i < ARRAY_SIZE(safe_abs_relocs); i++) {
6259 if (!strcmp(sym_name, safe_abs_relocs[i]))
6260@@ -245,9 +248,39 @@ static void read_ehdr(FILE *fp)
6261 }
6262 }
6263
6264+static void read_phdrs(FILE *fp)
6265+{
6266+ unsigned int i;
6267+
6268+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
6269+ if (!phdr) {
6270+ die("Unable to allocate %d program headers\n",
6271+ ehdr.e_phnum);
6272+ }
6273+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
6274+ die("Seek to %d failed: %s\n",
6275+ ehdr.e_phoff, strerror(errno));
6276+ }
6277+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
6278+ die("Cannot read ELF program headers: %s\n",
6279+ strerror(errno));
6280+ }
6281+ for(i = 0; i < ehdr.e_phnum; i++) {
6282+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
6283+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
6284+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
6285+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
6286+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
6287+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
6288+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
6289+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
6290+ }
6291+
6292+}
6293+
6294 static void read_shdrs(FILE *fp)
6295 {
6296- int i;
6297+ unsigned int i;
6298 Elf32_Shdr shdr;
6299
6300 secs = calloc(ehdr.e_shnum, sizeof(struct section));
6301@@ -282,7 +315,7 @@ static void read_shdrs(FILE *fp)
6302
6303 static void read_strtabs(FILE *fp)
6304 {
6305- int i;
6306+ unsigned int i;
6307 for (i = 0; i < ehdr.e_shnum; i++) {
6308 struct section *sec = &secs[i];
6309 if (sec->shdr.sh_type != SHT_STRTAB) {
6310@@ -307,7 +340,7 @@ static void read_strtabs(FILE *fp)
6311
6312 static void read_symtabs(FILE *fp)
6313 {
6314- int i,j;
6315+ unsigned int i,j;
6316 for (i = 0; i < ehdr.e_shnum; i++) {
6317 struct section *sec = &secs[i];
6318 if (sec->shdr.sh_type != SHT_SYMTAB) {
6319@@ -340,7 +373,9 @@ static void read_symtabs(FILE *fp)
6320
6321 static void read_relocs(FILE *fp)
6322 {
6323- int i,j;
6324+ unsigned int i,j;
6325+ uint32_t base;
6326+
6327 for (i = 0; i < ehdr.e_shnum; i++) {
6328 struct section *sec = &secs[i];
6329 if (sec->shdr.sh_type != SHT_REL) {
6330@@ -360,9 +395,18 @@ static void read_relocs(FILE *fp)
6331 die("Cannot read symbol table: %s\n",
6332 strerror(errno));
6333 }
6334+ base = 0;
6335+ for (j = 0; j < ehdr.e_phnum; j++) {
6336+ if (phdr[j].p_type != PT_LOAD )
6337+ continue;
6338+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
6339+ continue;
6340+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
6341+ break;
6342+ }
6343 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
6344 Elf32_Rel *rel = &sec->reltab[j];
6345- rel->r_offset = elf32_to_cpu(rel->r_offset);
6346+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
6347 rel->r_info = elf32_to_cpu(rel->r_info);
6348 }
6349 }
6350@@ -371,14 +415,14 @@ static void read_relocs(FILE *fp)
6351
6352 static void print_absolute_symbols(void)
6353 {
6354- int i;
6355+ unsigned int i;
6356 printf("Absolute symbols\n");
6357 printf(" Num: Value Size Type Bind Visibility Name\n");
6358 for (i = 0; i < ehdr.e_shnum; i++) {
6359 struct section *sec = &secs[i];
6360 char *sym_strtab;
6361 Elf32_Sym *sh_symtab;
6362- int j;
6363+ unsigned int j;
6364
6365 if (sec->shdr.sh_type != SHT_SYMTAB) {
6366 continue;
6367@@ -406,14 +450,14 @@ static void print_absolute_symbols(void)
6368
6369 static void print_absolute_relocs(void)
6370 {
6371- int i, printed = 0;
6372+ unsigned int i, printed = 0;
6373
6374 for (i = 0; i < ehdr.e_shnum; i++) {
6375 struct section *sec = &secs[i];
6376 struct section *sec_applies, *sec_symtab;
6377 char *sym_strtab;
6378 Elf32_Sym *sh_symtab;
6379- int j;
6380+ unsigned int j;
6381 if (sec->shdr.sh_type != SHT_REL) {
6382 continue;
6383 }
6384@@ -474,13 +518,13 @@ static void print_absolute_relocs(void)
6385
6386 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6387 {
6388- int i;
6389+ unsigned int i;
6390 /* Walk through the relocations */
6391 for (i = 0; i < ehdr.e_shnum; i++) {
6392 char *sym_strtab;
6393 Elf32_Sym *sh_symtab;
6394 struct section *sec_applies, *sec_symtab;
6395- int j;
6396+ unsigned int j;
6397 struct section *sec = &secs[i];
6398
6399 if (sec->shdr.sh_type != SHT_REL) {
6400@@ -504,6 +548,21 @@ static void walk_relocs(void (*visit)(El
6401 if (sym->st_shndx == SHN_ABS) {
6402 continue;
6403 }
6404+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
6405+ if (!strcmp(sec_name(sym->st_shndx), ".data.percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
6406+ continue;
6407+
6408+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
6409+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
6410+ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
6411+ continue;
6412+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
6413+ continue;
6414+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
6415+ continue;
6416+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
6417+ continue;
6418+#endif
6419 if (r_type == R_386_NONE || r_type == R_386_PC32) {
6420 /*
6421 * NONE can be ignored and and PC relative
6422@@ -541,7 +600,7 @@ static int cmp_relocs(const void *va, co
6423
6424 static void emit_relocs(int as_text)
6425 {
6426- int i;
6427+ unsigned int i;
6428 /* Count how many relocations I have and allocate space for them. */
6429 reloc_count = 0;
6430 walk_relocs(count_reloc);
6431@@ -634,6 +693,7 @@ int main(int argc, char **argv)
6432 fname, strerror(errno));
6433 }
6434 read_ehdr(fp);
6435+ read_phdrs(fp);
6436 read_shdrs(fp);
6437 read_strtabs(fp);
6438 read_symtabs(fp);
6439diff -urNp linux-2.6.32.42/arch/x86/boot/cpucheck.c linux-2.6.32.42/arch/x86/boot/cpucheck.c
6440--- linux-2.6.32.42/arch/x86/boot/cpucheck.c 2011-03-27 14:31:47.000000000 -0400
6441+++ linux-2.6.32.42/arch/x86/boot/cpucheck.c 2011-04-17 15:56:46.000000000 -0400
6442@@ -74,7 +74,7 @@ static int has_fpu(void)
6443 u16 fcw = -1, fsw = -1;
6444 u32 cr0;
6445
6446- asm("movl %%cr0,%0" : "=r" (cr0));
6447+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
6448 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
6449 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
6450 asm volatile("movl %0,%%cr0" : : "r" (cr0));
6451@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
6452 {
6453 u32 f0, f1;
6454
6455- asm("pushfl ; "
6456+ asm volatile("pushfl ; "
6457 "pushfl ; "
6458 "popl %0 ; "
6459 "movl %0,%1 ; "
6460@@ -115,7 +115,7 @@ static void get_flags(void)
6461 set_bit(X86_FEATURE_FPU, cpu.flags);
6462
6463 if (has_eflag(X86_EFLAGS_ID)) {
6464- asm("cpuid"
6465+ asm volatile("cpuid"
6466 : "=a" (max_intel_level),
6467 "=b" (cpu_vendor[0]),
6468 "=d" (cpu_vendor[1]),
6469@@ -124,7 +124,7 @@ static void get_flags(void)
6470
6471 if (max_intel_level >= 0x00000001 &&
6472 max_intel_level <= 0x0000ffff) {
6473- asm("cpuid"
6474+ asm volatile("cpuid"
6475 : "=a" (tfms),
6476 "=c" (cpu.flags[4]),
6477 "=d" (cpu.flags[0])
6478@@ -136,7 +136,7 @@ static void get_flags(void)
6479 cpu.model += ((tfms >> 16) & 0xf) << 4;
6480 }
6481
6482- asm("cpuid"
6483+ asm volatile("cpuid"
6484 : "=a" (max_amd_level)
6485 : "a" (0x80000000)
6486 : "ebx", "ecx", "edx");
6487@@ -144,7 +144,7 @@ static void get_flags(void)
6488 if (max_amd_level >= 0x80000001 &&
6489 max_amd_level <= 0x8000ffff) {
6490 u32 eax = 0x80000001;
6491- asm("cpuid"
6492+ asm volatile("cpuid"
6493 : "+a" (eax),
6494 "=c" (cpu.flags[6]),
6495 "=d" (cpu.flags[1])
6496@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
6497 u32 ecx = MSR_K7_HWCR;
6498 u32 eax, edx;
6499
6500- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6501+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6502 eax &= ~(1 << 15);
6503- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6504+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6505
6506 get_flags(); /* Make sure it really did something */
6507 err = check_flags();
6508@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
6509 u32 ecx = MSR_VIA_FCR;
6510 u32 eax, edx;
6511
6512- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6513+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6514 eax |= (1<<1)|(1<<7);
6515- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6516+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6517
6518 set_bit(X86_FEATURE_CX8, cpu.flags);
6519 err = check_flags();
6520@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
6521 u32 eax, edx;
6522 u32 level = 1;
6523
6524- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6525- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6526- asm("cpuid"
6527+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6528+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6529+ asm volatile("cpuid"
6530 : "+a" (level), "=d" (cpu.flags[0])
6531 : : "ecx", "ebx");
6532- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6533+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6534
6535 err = check_flags();
6536 }
6537diff -urNp linux-2.6.32.42/arch/x86/boot/header.S linux-2.6.32.42/arch/x86/boot/header.S
6538--- linux-2.6.32.42/arch/x86/boot/header.S 2011-03-27 14:31:47.000000000 -0400
6539+++ linux-2.6.32.42/arch/x86/boot/header.S 2011-04-17 15:56:46.000000000 -0400
6540@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
6541 # single linked list of
6542 # struct setup_data
6543
6544-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
6545+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
6546
6547 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
6548 #define VO_INIT_SIZE (VO__end - VO__text)
6549diff -urNp linux-2.6.32.42/arch/x86/boot/memory.c linux-2.6.32.42/arch/x86/boot/memory.c
6550--- linux-2.6.32.42/arch/x86/boot/memory.c 2011-03-27 14:31:47.000000000 -0400
6551+++ linux-2.6.32.42/arch/x86/boot/memory.c 2011-04-17 15:56:46.000000000 -0400
6552@@ -19,7 +19,7 @@
6553
6554 static int detect_memory_e820(void)
6555 {
6556- int count = 0;
6557+ unsigned int count = 0;
6558 struct biosregs ireg, oreg;
6559 struct e820entry *desc = boot_params.e820_map;
6560 static struct e820entry buf; /* static so it is zeroed */
6561diff -urNp linux-2.6.32.42/arch/x86/boot/video.c linux-2.6.32.42/arch/x86/boot/video.c
6562--- linux-2.6.32.42/arch/x86/boot/video.c 2011-03-27 14:31:47.000000000 -0400
6563+++ linux-2.6.32.42/arch/x86/boot/video.c 2011-04-17 15:56:46.000000000 -0400
6564@@ -90,7 +90,7 @@ static void store_mode_params(void)
6565 static unsigned int get_entry(void)
6566 {
6567 char entry_buf[4];
6568- int i, len = 0;
6569+ unsigned int i, len = 0;
6570 int key;
6571 unsigned int v;
6572
6573diff -urNp linux-2.6.32.42/arch/x86/boot/video-vesa.c linux-2.6.32.42/arch/x86/boot/video-vesa.c
6574--- linux-2.6.32.42/arch/x86/boot/video-vesa.c 2011-03-27 14:31:47.000000000 -0400
6575+++ linux-2.6.32.42/arch/x86/boot/video-vesa.c 2011-04-17 15:56:46.000000000 -0400
6576@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
6577
6578 boot_params.screen_info.vesapm_seg = oreg.es;
6579 boot_params.screen_info.vesapm_off = oreg.di;
6580+ boot_params.screen_info.vesapm_size = oreg.cx;
6581 }
6582
6583 /*
6584diff -urNp linux-2.6.32.42/arch/x86/ia32/ia32_aout.c linux-2.6.32.42/arch/x86/ia32/ia32_aout.c
6585--- linux-2.6.32.42/arch/x86/ia32/ia32_aout.c 2011-03-27 14:31:47.000000000 -0400
6586+++ linux-2.6.32.42/arch/x86/ia32/ia32_aout.c 2011-04-17 15:56:46.000000000 -0400
6587@@ -169,6 +169,8 @@ static int aout_core_dump(long signr, st
6588 unsigned long dump_start, dump_size;
6589 struct user32 dump;
6590
6591+ memset(&dump, 0, sizeof(dump));
6592+
6593 fs = get_fs();
6594 set_fs(KERNEL_DS);
6595 has_dumped = 1;
6596@@ -218,12 +220,6 @@ static int aout_core_dump(long signr, st
6597 dump_size = dump.u_ssize << PAGE_SHIFT;
6598 DUMP_WRITE(dump_start, dump_size);
6599 }
6600- /*
6601- * Finally dump the task struct. Not be used by gdb, but
6602- * could be useful
6603- */
6604- set_fs(KERNEL_DS);
6605- DUMP_WRITE(current, sizeof(*current));
6606 end_coredump:
6607 set_fs(fs);
6608 return has_dumped;
6609diff -urNp linux-2.6.32.42/arch/x86/ia32/ia32entry.S linux-2.6.32.42/arch/x86/ia32/ia32entry.S
6610--- linux-2.6.32.42/arch/x86/ia32/ia32entry.S 2011-03-27 14:31:47.000000000 -0400
6611+++ linux-2.6.32.42/arch/x86/ia32/ia32entry.S 2011-06-04 20:29:52.000000000 -0400
6612@@ -13,6 +13,7 @@
6613 #include <asm/thread_info.h>
6614 #include <asm/segment.h>
6615 #include <asm/irqflags.h>
6616+#include <asm/pgtable.h>
6617 #include <linux/linkage.h>
6618
6619 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
6620@@ -93,6 +94,30 @@ ENTRY(native_irq_enable_sysexit)
6621 ENDPROC(native_irq_enable_sysexit)
6622 #endif
6623
6624+ .macro pax_enter_kernel_user
6625+#ifdef CONFIG_PAX_MEMORY_UDEREF
6626+ call pax_enter_kernel_user
6627+#endif
6628+ .endm
6629+
6630+ .macro pax_exit_kernel_user
6631+#ifdef CONFIG_PAX_MEMORY_UDEREF
6632+ call pax_exit_kernel_user
6633+#endif
6634+#ifdef CONFIG_PAX_RANDKSTACK
6635+ pushq %rax
6636+ call pax_randomize_kstack
6637+ popq %rax
6638+#endif
6639+ pax_erase_kstack
6640+ .endm
6641+
6642+.macro pax_erase_kstack
6643+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
6644+ call pax_erase_kstack
6645+#endif
6646+.endm
6647+
6648 /*
6649 * 32bit SYSENTER instruction entry.
6650 *
6651@@ -119,7 +144,7 @@ ENTRY(ia32_sysenter_target)
6652 CFI_REGISTER rsp,rbp
6653 SWAPGS_UNSAFE_STACK
6654 movq PER_CPU_VAR(kernel_stack), %rsp
6655- addq $(KERNEL_STACK_OFFSET),%rsp
6656+ pax_enter_kernel_user
6657 /*
6658 * No need to follow this irqs on/off section: the syscall
6659 * disabled irqs, here we enable it straight after entry:
6660@@ -135,7 +160,8 @@ ENTRY(ia32_sysenter_target)
6661 pushfq
6662 CFI_ADJUST_CFA_OFFSET 8
6663 /*CFI_REL_OFFSET rflags,0*/
6664- movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
6665+ GET_THREAD_INFO(%r10)
6666+ movl TI_sysenter_return(%r10), %r10d
6667 CFI_REGISTER rip,r10
6668 pushq $__USER32_CS
6669 CFI_ADJUST_CFA_OFFSET 8
6670@@ -150,6 +176,12 @@ ENTRY(ia32_sysenter_target)
6671 SAVE_ARGS 0,0,1
6672 /* no need to do an access_ok check here because rbp has been
6673 32bit zero extended */
6674+
6675+#ifdef CONFIG_PAX_MEMORY_UDEREF
6676+ mov $PAX_USER_SHADOW_BASE,%r10
6677+ add %r10,%rbp
6678+#endif
6679+
6680 1: movl (%rbp),%ebp
6681 .section __ex_table,"a"
6682 .quad 1b,ia32_badarg
6683@@ -172,6 +204,7 @@ sysenter_dispatch:
6684 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6685 jnz sysexit_audit
6686 sysexit_from_sys_call:
6687+ pax_exit_kernel_user
6688 andl $~TS_COMPAT,TI_status(%r10)
6689 /* clear IF, that popfq doesn't enable interrupts early */
6690 andl $~0x200,EFLAGS-R11(%rsp)
6691@@ -200,6 +233,9 @@ sysexit_from_sys_call:
6692 movl %eax,%esi /* 2nd arg: syscall number */
6693 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
6694 call audit_syscall_entry
6695+
6696+ pax_erase_kstack
6697+
6698 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
6699 cmpq $(IA32_NR_syscalls-1),%rax
6700 ja ia32_badsys
6701@@ -252,6 +288,9 @@ sysenter_tracesys:
6702 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
6703 movq %rsp,%rdi /* &pt_regs -> arg1 */
6704 call syscall_trace_enter
6705+
6706+ pax_erase_kstack
6707+
6708 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6709 RESTORE_REST
6710 cmpq $(IA32_NR_syscalls-1),%rax
6711@@ -283,19 +322,24 @@ ENDPROC(ia32_sysenter_target)
6712 ENTRY(ia32_cstar_target)
6713 CFI_STARTPROC32 simple
6714 CFI_SIGNAL_FRAME
6715- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
6716+ CFI_DEF_CFA rsp,0
6717 CFI_REGISTER rip,rcx
6718 /*CFI_REGISTER rflags,r11*/
6719 SWAPGS_UNSAFE_STACK
6720 movl %esp,%r8d
6721 CFI_REGISTER rsp,r8
6722 movq PER_CPU_VAR(kernel_stack),%rsp
6723+
6724+#ifdef CONFIG_PAX_MEMORY_UDEREF
6725+ pax_enter_kernel_user
6726+#endif
6727+
6728 /*
6729 * No need to follow this irqs on/off section: the syscall
6730 * disabled irqs and here we enable it straight after entry:
6731 */
6732 ENABLE_INTERRUPTS(CLBR_NONE)
6733- SAVE_ARGS 8,1,1
6734+ SAVE_ARGS 8*6,1,1
6735 movl %eax,%eax /* zero extension */
6736 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
6737 movq %rcx,RIP-ARGOFFSET(%rsp)
6738@@ -311,6 +355,12 @@ ENTRY(ia32_cstar_target)
6739 /* no need to do an access_ok check here because r8 has been
6740 32bit zero extended */
6741 /* hardware stack frame is complete now */
6742+
6743+#ifdef CONFIG_PAX_MEMORY_UDEREF
6744+ mov $PAX_USER_SHADOW_BASE,%r10
6745+ add %r10,%r8
6746+#endif
6747+
6748 1: movl (%r8),%r9d
6749 .section __ex_table,"a"
6750 .quad 1b,ia32_badarg
6751@@ -333,6 +383,7 @@ cstar_dispatch:
6752 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6753 jnz sysretl_audit
6754 sysretl_from_sys_call:
6755+ pax_exit_kernel_user
6756 andl $~TS_COMPAT,TI_status(%r10)
6757 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
6758 movl RIP-ARGOFFSET(%rsp),%ecx
6759@@ -370,6 +421,9 @@ cstar_tracesys:
6760 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6761 movq %rsp,%rdi /* &pt_regs -> arg1 */
6762 call syscall_trace_enter
6763+
6764+ pax_erase_kstack
6765+
6766 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
6767 RESTORE_REST
6768 xchgl %ebp,%r9d
6769@@ -415,6 +469,7 @@ ENTRY(ia32_syscall)
6770 CFI_REL_OFFSET rip,RIP-RIP
6771 PARAVIRT_ADJUST_EXCEPTION_FRAME
6772 SWAPGS
6773+ pax_enter_kernel_user
6774 /*
6775 * No need to follow this irqs on/off section: the syscall
6776 * disabled irqs and here we enable it straight after entry:
6777@@ -448,6 +503,9 @@ ia32_tracesys:
6778 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6779 movq %rsp,%rdi /* &pt_regs -> arg1 */
6780 call syscall_trace_enter
6781+
6782+ pax_erase_kstack
6783+
6784 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6785 RESTORE_REST
6786 cmpq $(IA32_NR_syscalls-1),%rax
6787diff -urNp linux-2.6.32.42/arch/x86/ia32/ia32_signal.c linux-2.6.32.42/arch/x86/ia32/ia32_signal.c
6788--- linux-2.6.32.42/arch/x86/ia32/ia32_signal.c 2011-03-27 14:31:47.000000000 -0400
6789+++ linux-2.6.32.42/arch/x86/ia32/ia32_signal.c 2011-04-17 15:56:46.000000000 -0400
6790@@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
6791 sp -= frame_size;
6792 /* Align the stack pointer according to the i386 ABI,
6793 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
6794- sp = ((sp + 4) & -16ul) - 4;
6795+ sp = ((sp - 12) & -16ul) - 4;
6796 return (void __user *) sp;
6797 }
6798
6799@@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_s
6800 * These are actually not used anymore, but left because some
6801 * gdb versions depend on them as a marker.
6802 */
6803- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6804+ put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
6805 } put_user_catch(err);
6806
6807 if (err)
6808@@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
6809 0xb8,
6810 __NR_ia32_rt_sigreturn,
6811 0x80cd,
6812- 0,
6813+ 0
6814 };
6815
6816 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
6817@@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct
6818
6819 if (ka->sa.sa_flags & SA_RESTORER)
6820 restorer = ka->sa.sa_restorer;
6821+ else if (current->mm->context.vdso)
6822+ /* Return stub is in 32bit vsyscall page */
6823+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
6824 else
6825- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
6826- rt_sigreturn);
6827+ restorer = &frame->retcode;
6828 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
6829
6830 /*
6831 * Not actually used anymore, but left because some gdb
6832 * versions need it.
6833 */
6834- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6835+ put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
6836 } put_user_catch(err);
6837
6838 if (err)
6839diff -urNp linux-2.6.32.42/arch/x86/include/asm/alternative.h linux-2.6.32.42/arch/x86/include/asm/alternative.h
6840--- linux-2.6.32.42/arch/x86/include/asm/alternative.h 2011-03-27 14:31:47.000000000 -0400
6841+++ linux-2.6.32.42/arch/x86/include/asm/alternative.h 2011-04-17 15:56:46.000000000 -0400
6842@@ -85,7 +85,7 @@ static inline void alternatives_smp_swit
6843 " .byte 662b-661b\n" /* sourcelen */ \
6844 " .byte 664f-663f\n" /* replacementlen */ \
6845 ".previous\n" \
6846- ".section .altinstr_replacement, \"ax\"\n" \
6847+ ".section .altinstr_replacement, \"a\"\n" \
6848 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
6849 ".previous"
6850
6851diff -urNp linux-2.6.32.42/arch/x86/include/asm/apm.h linux-2.6.32.42/arch/x86/include/asm/apm.h
6852--- linux-2.6.32.42/arch/x86/include/asm/apm.h 2011-03-27 14:31:47.000000000 -0400
6853+++ linux-2.6.32.42/arch/x86/include/asm/apm.h 2011-04-17 15:56:46.000000000 -0400
6854@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
6855 __asm__ __volatile__(APM_DO_ZERO_SEGS
6856 "pushl %%edi\n\t"
6857 "pushl %%ebp\n\t"
6858- "lcall *%%cs:apm_bios_entry\n\t"
6859+ "lcall *%%ss:apm_bios_entry\n\t"
6860 "setc %%al\n\t"
6861 "popl %%ebp\n\t"
6862 "popl %%edi\n\t"
6863@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
6864 __asm__ __volatile__(APM_DO_ZERO_SEGS
6865 "pushl %%edi\n\t"
6866 "pushl %%ebp\n\t"
6867- "lcall *%%cs:apm_bios_entry\n\t"
6868+ "lcall *%%ss:apm_bios_entry\n\t"
6869 "setc %%bl\n\t"
6870 "popl %%ebp\n\t"
6871 "popl %%edi\n\t"
6872diff -urNp linux-2.6.32.42/arch/x86/include/asm/atomic_32.h linux-2.6.32.42/arch/x86/include/asm/atomic_32.h
6873--- linux-2.6.32.42/arch/x86/include/asm/atomic_32.h 2011-03-27 14:31:47.000000000 -0400
6874+++ linux-2.6.32.42/arch/x86/include/asm/atomic_32.h 2011-05-04 17:56:20.000000000 -0400
6875@@ -25,6 +25,17 @@ static inline int atomic_read(const atom
6876 }
6877
6878 /**
6879+ * atomic_read_unchecked - read atomic variable
6880+ * @v: pointer of type atomic_unchecked_t
6881+ *
6882+ * Atomically reads the value of @v.
6883+ */
6884+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6885+{
6886+ return v->counter;
6887+}
6888+
6889+/**
6890 * atomic_set - set atomic variable
6891 * @v: pointer of type atomic_t
6892 * @i: required value
6893@@ -37,6 +48,18 @@ static inline void atomic_set(atomic_t *
6894 }
6895
6896 /**
6897+ * atomic_set_unchecked - set atomic variable
6898+ * @v: pointer of type atomic_unchecked_t
6899+ * @i: required value
6900+ *
6901+ * Atomically sets the value of @v to @i.
6902+ */
6903+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6904+{
6905+ v->counter = i;
6906+}
6907+
6908+/**
6909 * atomic_add - add integer to atomic variable
6910 * @i: integer value to add
6911 * @v: pointer of type atomic_t
6912@@ -45,7 +68,29 @@ static inline void atomic_set(atomic_t *
6913 */
6914 static inline void atomic_add(int i, atomic_t *v)
6915 {
6916- asm volatile(LOCK_PREFIX "addl %1,%0"
6917+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
6918+
6919+#ifdef CONFIG_PAX_REFCOUNT
6920+ "jno 0f\n"
6921+ LOCK_PREFIX "subl %1,%0\n"
6922+ "int $4\n0:\n"
6923+ _ASM_EXTABLE(0b, 0b)
6924+#endif
6925+
6926+ : "+m" (v->counter)
6927+ : "ir" (i));
6928+}
6929+
6930+/**
6931+ * atomic_add_unchecked - add integer to atomic variable
6932+ * @i: integer value to add
6933+ * @v: pointer of type atomic_unchecked_t
6934+ *
6935+ * Atomically adds @i to @v.
6936+ */
6937+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
6938+{
6939+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
6940 : "+m" (v->counter)
6941 : "ir" (i));
6942 }
6943@@ -59,7 +104,29 @@ static inline void atomic_add(int i, ato
6944 */
6945 static inline void atomic_sub(int i, atomic_t *v)
6946 {
6947- asm volatile(LOCK_PREFIX "subl %1,%0"
6948+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
6949+
6950+#ifdef CONFIG_PAX_REFCOUNT
6951+ "jno 0f\n"
6952+ LOCK_PREFIX "addl %1,%0\n"
6953+ "int $4\n0:\n"
6954+ _ASM_EXTABLE(0b, 0b)
6955+#endif
6956+
6957+ : "+m" (v->counter)
6958+ : "ir" (i));
6959+}
6960+
6961+/**
6962+ * atomic_sub_unchecked - subtract integer from atomic variable
6963+ * @i: integer value to subtract
6964+ * @v: pointer of type atomic_unchecked_t
6965+ *
6966+ * Atomically subtracts @i from @v.
6967+ */
6968+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
6969+{
6970+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
6971 : "+m" (v->counter)
6972 : "ir" (i));
6973 }
6974@@ -77,7 +144,16 @@ static inline int atomic_sub_and_test(in
6975 {
6976 unsigned char c;
6977
6978- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
6979+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
6980+
6981+#ifdef CONFIG_PAX_REFCOUNT
6982+ "jno 0f\n"
6983+ LOCK_PREFIX "addl %2,%0\n"
6984+ "int $4\n0:\n"
6985+ _ASM_EXTABLE(0b, 0b)
6986+#endif
6987+
6988+ "sete %1\n"
6989 : "+m" (v->counter), "=qm" (c)
6990 : "ir" (i) : "memory");
6991 return c;
6992@@ -91,7 +167,27 @@ static inline int atomic_sub_and_test(in
6993 */
6994 static inline void atomic_inc(atomic_t *v)
6995 {
6996- asm volatile(LOCK_PREFIX "incl %0"
6997+ asm volatile(LOCK_PREFIX "incl %0\n"
6998+
6999+#ifdef CONFIG_PAX_REFCOUNT
7000+ "jno 0f\n"
7001+ LOCK_PREFIX "decl %0\n"
7002+ "int $4\n0:\n"
7003+ _ASM_EXTABLE(0b, 0b)
7004+#endif
7005+
7006+ : "+m" (v->counter));
7007+}
7008+
7009+/**
7010+ * atomic_inc_unchecked - increment atomic variable
7011+ * @v: pointer of type atomic_unchecked_t
7012+ *
7013+ * Atomically increments @v by 1.
7014+ */
7015+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7016+{
7017+ asm volatile(LOCK_PREFIX "incl %0\n"
7018 : "+m" (v->counter));
7019 }
7020
7021@@ -103,7 +199,27 @@ static inline void atomic_inc(atomic_t *
7022 */
7023 static inline void atomic_dec(atomic_t *v)
7024 {
7025- asm volatile(LOCK_PREFIX "decl %0"
7026+ asm volatile(LOCK_PREFIX "decl %0\n"
7027+
7028+#ifdef CONFIG_PAX_REFCOUNT
7029+ "jno 0f\n"
7030+ LOCK_PREFIX "incl %0\n"
7031+ "int $4\n0:\n"
7032+ _ASM_EXTABLE(0b, 0b)
7033+#endif
7034+
7035+ : "+m" (v->counter));
7036+}
7037+
7038+/**
7039+ * atomic_dec_unchecked - decrement atomic variable
7040+ * @v: pointer of type atomic_unchecked_t
7041+ *
7042+ * Atomically decrements @v by 1.
7043+ */
7044+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7045+{
7046+ asm volatile(LOCK_PREFIX "decl %0\n"
7047 : "+m" (v->counter));
7048 }
7049
7050@@ -119,7 +235,16 @@ static inline int atomic_dec_and_test(at
7051 {
7052 unsigned char c;
7053
7054- asm volatile(LOCK_PREFIX "decl %0; sete %1"
7055+ asm volatile(LOCK_PREFIX "decl %0\n"
7056+
7057+#ifdef CONFIG_PAX_REFCOUNT
7058+ "jno 0f\n"
7059+ LOCK_PREFIX "incl %0\n"
7060+ "int $4\n0:\n"
7061+ _ASM_EXTABLE(0b, 0b)
7062+#endif
7063+
7064+ "sete %1\n"
7065 : "+m" (v->counter), "=qm" (c)
7066 : : "memory");
7067 return c != 0;
7068@@ -137,7 +262,35 @@ static inline int atomic_inc_and_test(at
7069 {
7070 unsigned char c;
7071
7072- asm volatile(LOCK_PREFIX "incl %0; sete %1"
7073+ asm volatile(LOCK_PREFIX "incl %0\n"
7074+
7075+#ifdef CONFIG_PAX_REFCOUNT
7076+ "jno 0f\n"
7077+ LOCK_PREFIX "decl %0\n"
7078+ "into\n0:\n"
7079+ _ASM_EXTABLE(0b, 0b)
7080+#endif
7081+
7082+ "sete %1\n"
7083+ : "+m" (v->counter), "=qm" (c)
7084+ : : "memory");
7085+ return c != 0;
7086+}
7087+
7088+/**
7089+ * atomic_inc_and_test_unchecked - increment and test
7090+ * @v: pointer of type atomic_unchecked_t
7091+ *
7092+ * Atomically increments @v by 1
7093+ * and returns true if the result is zero, or false for all
7094+ * other cases.
7095+ */
7096+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7097+{
7098+ unsigned char c;
7099+
7100+ asm volatile(LOCK_PREFIX "incl %0\n"
7101+ "sete %1\n"
7102 : "+m" (v->counter), "=qm" (c)
7103 : : "memory");
7104 return c != 0;
7105@@ -156,7 +309,16 @@ static inline int atomic_add_negative(in
7106 {
7107 unsigned char c;
7108
7109- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7110+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
7111+
7112+#ifdef CONFIG_PAX_REFCOUNT
7113+ "jno 0f\n"
7114+ LOCK_PREFIX "subl %2,%0\n"
7115+ "int $4\n0:\n"
7116+ _ASM_EXTABLE(0b, 0b)
7117+#endif
7118+
7119+ "sets %1\n"
7120 : "+m" (v->counter), "=qm" (c)
7121 : "ir" (i) : "memory");
7122 return c;
7123@@ -179,6 +341,46 @@ static inline int atomic_add_return(int
7124 #endif
7125 /* Modern 486+ processor */
7126 __i = i;
7127+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7128+
7129+#ifdef CONFIG_PAX_REFCOUNT
7130+ "jno 0f\n"
7131+ "movl %0, %1\n"
7132+ "int $4\n0:\n"
7133+ _ASM_EXTABLE(0b, 0b)
7134+#endif
7135+
7136+ : "+r" (i), "+m" (v->counter)
7137+ : : "memory");
7138+ return i + __i;
7139+
7140+#ifdef CONFIG_M386
7141+no_xadd: /* Legacy 386 processor */
7142+ local_irq_save(flags);
7143+ __i = atomic_read(v);
7144+ atomic_set(v, i + __i);
7145+ local_irq_restore(flags);
7146+ return i + __i;
7147+#endif
7148+}
7149+
7150+/**
7151+ * atomic_add_return_unchecked - add integer and return
7152+ * @v: pointer of type atomic_unchecked_t
7153+ * @i: integer value to add
7154+ *
7155+ * Atomically adds @i to @v and returns @i + @v
7156+ */
7157+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7158+{
7159+ int __i;
7160+#ifdef CONFIG_M386
7161+ unsigned long flags;
7162+ if (unlikely(boot_cpu_data.x86 <= 3))
7163+ goto no_xadd;
7164+#endif
7165+ /* Modern 486+ processor */
7166+ __i = i;
7167 asm volatile(LOCK_PREFIX "xaddl %0, %1"
7168 : "+r" (i), "+m" (v->counter)
7169 : : "memory");
7170@@ -211,11 +413,21 @@ static inline int atomic_cmpxchg(atomic_
7171 return cmpxchg(&v->counter, old, new);
7172 }
7173
7174+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7175+{
7176+ return cmpxchg(&v->counter, old, new);
7177+}
7178+
7179 static inline int atomic_xchg(atomic_t *v, int new)
7180 {
7181 return xchg(&v->counter, new);
7182 }
7183
7184+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7185+{
7186+ return xchg(&v->counter, new);
7187+}
7188+
7189 /**
7190 * atomic_add_unless - add unless the number is already a given value
7191 * @v: pointer of type atomic_t
7192@@ -227,22 +439,39 @@ static inline int atomic_xchg(atomic_t *
7193 */
7194 static inline int atomic_add_unless(atomic_t *v, int a, int u)
7195 {
7196- int c, old;
7197+ int c, old, new;
7198 c = atomic_read(v);
7199 for (;;) {
7200- if (unlikely(c == (u)))
7201+ if (unlikely(c == u))
7202 break;
7203- old = atomic_cmpxchg((v), c, c + (a));
7204+
7205+ asm volatile("addl %2,%0\n"
7206+
7207+#ifdef CONFIG_PAX_REFCOUNT
7208+ "jno 0f\n"
7209+ "subl %2,%0\n"
7210+ "int $4\n0:\n"
7211+ _ASM_EXTABLE(0b, 0b)
7212+#endif
7213+
7214+ : "=r" (new)
7215+ : "0" (c), "ir" (a));
7216+
7217+ old = atomic_cmpxchg(v, c, new);
7218 if (likely(old == c))
7219 break;
7220 c = old;
7221 }
7222- return c != (u);
7223+ return c != u;
7224 }
7225
7226 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
7227
7228 #define atomic_inc_return(v) (atomic_add_return(1, v))
7229+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7230+{
7231+ return atomic_add_return_unchecked(1, v);
7232+}
7233 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7234
7235 /* These are x86-specific, used by some header files */
7236@@ -266,9 +495,18 @@ typedef struct {
7237 u64 __aligned(8) counter;
7238 } atomic64_t;
7239
7240+#ifdef CONFIG_PAX_REFCOUNT
7241+typedef struct {
7242+ u64 __aligned(8) counter;
7243+} atomic64_unchecked_t;
7244+#else
7245+typedef atomic64_t atomic64_unchecked_t;
7246+#endif
7247+
7248 #define ATOMIC64_INIT(val) { (val) }
7249
7250 extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
7251+extern u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val);
7252
7253 /**
7254 * atomic64_xchg - xchg atomic64 variable
7255@@ -279,6 +517,7 @@ extern u64 atomic64_cmpxchg(atomic64_t *
7256 * the old value.
7257 */
7258 extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
7259+extern u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
7260
7261 /**
7262 * atomic64_set - set atomic64 variable
7263@@ -290,6 +529,15 @@ extern u64 atomic64_xchg(atomic64_t *ptr
7264 extern void atomic64_set(atomic64_t *ptr, u64 new_val);
7265
7266 /**
7267+ * atomic64_unchecked_set - set atomic64 variable
7268+ * @ptr: pointer to type atomic64_unchecked_t
7269+ * @new_val: value to assign
7270+ *
7271+ * Atomically sets the value of @ptr to @new_val.
7272+ */
7273+extern void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
7274+
7275+/**
7276 * atomic64_read - read atomic64 variable
7277 * @ptr: pointer to type atomic64_t
7278 *
7279@@ -317,7 +565,33 @@ static inline u64 atomic64_read(atomic64
7280 return res;
7281 }
7282
7283-extern u64 atomic64_read(atomic64_t *ptr);
7284+/**
7285+ * atomic64_read_unchecked - read atomic64 variable
7286+ * @ptr: pointer to type atomic64_unchecked_t
7287+ *
7288+ * Atomically reads the value of @ptr and returns it.
7289+ */
7290+static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *ptr)
7291+{
7292+ u64 res;
7293+
7294+ /*
7295+ * Note, we inline this atomic64_unchecked_t primitive because
7296+ * it only clobbers EAX/EDX and leaves the others
7297+ * untouched. We also (somewhat subtly) rely on the
7298+ * fact that cmpxchg8b returns the current 64-bit value
7299+ * of the memory location we are touching:
7300+ */
7301+ asm volatile(
7302+ "mov %%ebx, %%eax\n\t"
7303+ "mov %%ecx, %%edx\n\t"
7304+ LOCK_PREFIX "cmpxchg8b %1\n"
7305+ : "=&A" (res)
7306+ : "m" (*ptr)
7307+ );
7308+
7309+ return res;
7310+}
7311
7312 /**
7313 * atomic64_add_return - add and return
7314@@ -332,8 +606,11 @@ extern u64 atomic64_add_return(u64 delta
7315 * Other variants with different arithmetic operators:
7316 */
7317 extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr);
7318+extern u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7319 extern u64 atomic64_inc_return(atomic64_t *ptr);
7320+extern u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr);
7321 extern u64 atomic64_dec_return(atomic64_t *ptr);
7322+extern u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr);
7323
7324 /**
7325 * atomic64_add - add integer to atomic64 variable
7326@@ -345,6 +622,15 @@ extern u64 atomic64_dec_return(atomic64_
7327 extern void atomic64_add(u64 delta, atomic64_t *ptr);
7328
7329 /**
7330+ * atomic64_add_unchecked - add integer to atomic64 variable
7331+ * @delta: integer value to add
7332+ * @ptr: pointer to type atomic64_unchecked_t
7333+ *
7334+ * Atomically adds @delta to @ptr.
7335+ */
7336+extern void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7337+
7338+/**
7339 * atomic64_sub - subtract the atomic64 variable
7340 * @delta: integer value to subtract
7341 * @ptr: pointer to type atomic64_t
7342@@ -354,6 +640,15 @@ extern void atomic64_add(u64 delta, atom
7343 extern void atomic64_sub(u64 delta, atomic64_t *ptr);
7344
7345 /**
7346+ * atomic64_sub_unchecked - subtract the atomic64 variable
7347+ * @delta: integer value to subtract
7348+ * @ptr: pointer to type atomic64_unchecked_t
7349+ *
7350+ * Atomically subtracts @delta from @ptr.
7351+ */
7352+extern void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7353+
7354+/**
7355 * atomic64_sub_and_test - subtract value from variable and test result
7356 * @delta: integer value to subtract
7357 * @ptr: pointer to type atomic64_t
7358@@ -373,6 +668,14 @@ extern int atomic64_sub_and_test(u64 del
7359 extern void atomic64_inc(atomic64_t *ptr);
7360
7361 /**
7362+ * atomic64_inc_unchecked - increment atomic64 variable
7363+ * @ptr: pointer to type atomic64_unchecked_t
7364+ *
7365+ * Atomically increments @ptr by 1.
7366+ */
7367+extern void atomic64_inc_unchecked(atomic64_unchecked_t *ptr);
7368+
7369+/**
7370 * atomic64_dec - decrement atomic64 variable
7371 * @ptr: pointer to type atomic64_t
7372 *
7373@@ -381,6 +684,14 @@ extern void atomic64_inc(atomic64_t *ptr
7374 extern void atomic64_dec(atomic64_t *ptr);
7375
7376 /**
7377+ * atomic64_dec_unchecked - decrement atomic64 variable
7378+ * @ptr: pointer to type atomic64_unchecked_t
7379+ *
7380+ * Atomically decrements @ptr by 1.
7381+ */
7382+extern void atomic64_dec_unchecked(atomic64_unchecked_t *ptr);
7383+
7384+/**
7385 * atomic64_dec_and_test - decrement and test
7386 * @ptr: pointer to type atomic64_t
7387 *
7388diff -urNp linux-2.6.32.42/arch/x86/include/asm/atomic_64.h linux-2.6.32.42/arch/x86/include/asm/atomic_64.h
7389--- linux-2.6.32.42/arch/x86/include/asm/atomic_64.h 2011-03-27 14:31:47.000000000 -0400
7390+++ linux-2.6.32.42/arch/x86/include/asm/atomic_64.h 2011-05-04 18:35:31.000000000 -0400
7391@@ -24,6 +24,17 @@ static inline int atomic_read(const atom
7392 }
7393
7394 /**
7395+ * atomic_read_unchecked - read atomic variable
7396+ * @v: pointer of type atomic_unchecked_t
7397+ *
7398+ * Atomically reads the value of @v.
7399+ */
7400+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7401+{
7402+ return v->counter;
7403+}
7404+
7405+/**
7406 * atomic_set - set atomic variable
7407 * @v: pointer of type atomic_t
7408 * @i: required value
7409@@ -36,6 +47,18 @@ static inline void atomic_set(atomic_t *
7410 }
7411
7412 /**
7413+ * atomic_set_unchecked - set atomic variable
7414+ * @v: pointer of type atomic_unchecked_t
7415+ * @i: required value
7416+ *
7417+ * Atomically sets the value of @v to @i.
7418+ */
7419+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7420+{
7421+ v->counter = i;
7422+}
7423+
7424+/**
7425 * atomic_add - add integer to atomic variable
7426 * @i: integer value to add
7427 * @v: pointer of type atomic_t
7428@@ -44,7 +67,29 @@ static inline void atomic_set(atomic_t *
7429 */
7430 static inline void atomic_add(int i, atomic_t *v)
7431 {
7432- asm volatile(LOCK_PREFIX "addl %1,%0"
7433+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
7434+
7435+#ifdef CONFIG_PAX_REFCOUNT
7436+ "jno 0f\n"
7437+ LOCK_PREFIX "subl %1,%0\n"
7438+ "int $4\n0:\n"
7439+ _ASM_EXTABLE(0b, 0b)
7440+#endif
7441+
7442+ : "=m" (v->counter)
7443+ : "ir" (i), "m" (v->counter));
7444+}
7445+
7446+/**
7447+ * atomic_add_unchecked - add integer to atomic variable
7448+ * @i: integer value to add
7449+ * @v: pointer of type atomic_unchecked_t
7450+ *
7451+ * Atomically adds @i to @v.
7452+ */
7453+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
7454+{
7455+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
7456 : "=m" (v->counter)
7457 : "ir" (i), "m" (v->counter));
7458 }
7459@@ -58,7 +103,29 @@ static inline void atomic_add(int i, ato
7460 */
7461 static inline void atomic_sub(int i, atomic_t *v)
7462 {
7463- asm volatile(LOCK_PREFIX "subl %1,%0"
7464+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
7465+
7466+#ifdef CONFIG_PAX_REFCOUNT
7467+ "jno 0f\n"
7468+ LOCK_PREFIX "addl %1,%0\n"
7469+ "int $4\n0:\n"
7470+ _ASM_EXTABLE(0b, 0b)
7471+#endif
7472+
7473+ : "=m" (v->counter)
7474+ : "ir" (i), "m" (v->counter));
7475+}
7476+
7477+/**
7478+ * atomic_sub_unchecked - subtract the atomic variable
7479+ * @i: integer value to subtract
7480+ * @v: pointer of type atomic_unchecked_t
7481+ *
7482+ * Atomically subtracts @i from @v.
7483+ */
7484+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
7485+{
7486+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
7487 : "=m" (v->counter)
7488 : "ir" (i), "m" (v->counter));
7489 }
7490@@ -76,7 +143,16 @@ static inline int atomic_sub_and_test(in
7491 {
7492 unsigned char c;
7493
7494- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
7495+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
7496+
7497+#ifdef CONFIG_PAX_REFCOUNT
7498+ "jno 0f\n"
7499+ LOCK_PREFIX "addl %2,%0\n"
7500+ "int $4\n0:\n"
7501+ _ASM_EXTABLE(0b, 0b)
7502+#endif
7503+
7504+ "sete %1\n"
7505 : "=m" (v->counter), "=qm" (c)
7506 : "ir" (i), "m" (v->counter) : "memory");
7507 return c;
7508@@ -90,7 +166,28 @@ static inline int atomic_sub_and_test(in
7509 */
7510 static inline void atomic_inc(atomic_t *v)
7511 {
7512- asm volatile(LOCK_PREFIX "incl %0"
7513+ asm volatile(LOCK_PREFIX "incl %0\n"
7514+
7515+#ifdef CONFIG_PAX_REFCOUNT
7516+ "jno 0f\n"
7517+ LOCK_PREFIX "decl %0\n"
7518+ "int $4\n0:\n"
7519+ _ASM_EXTABLE(0b, 0b)
7520+#endif
7521+
7522+ : "=m" (v->counter)
7523+ : "m" (v->counter));
7524+}
7525+
7526+/**
7527+ * atomic_inc_unchecked - increment atomic variable
7528+ * @v: pointer of type atomic_unchecked_t
7529+ *
7530+ * Atomically increments @v by 1.
7531+ */
7532+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7533+{
7534+ asm volatile(LOCK_PREFIX "incl %0\n"
7535 : "=m" (v->counter)
7536 : "m" (v->counter));
7537 }
7538@@ -103,7 +200,28 @@ static inline void atomic_inc(atomic_t *
7539 */
7540 static inline void atomic_dec(atomic_t *v)
7541 {
7542- asm volatile(LOCK_PREFIX "decl %0"
7543+ asm volatile(LOCK_PREFIX "decl %0\n"
7544+
7545+#ifdef CONFIG_PAX_REFCOUNT
7546+ "jno 0f\n"
7547+ LOCK_PREFIX "incl %0\n"
7548+ "int $4\n0:\n"
7549+ _ASM_EXTABLE(0b, 0b)
7550+#endif
7551+
7552+ : "=m" (v->counter)
7553+ : "m" (v->counter));
7554+}
7555+
7556+/**
7557+ * atomic_dec_unchecked - decrement atomic variable
7558+ * @v: pointer of type atomic_unchecked_t
7559+ *
7560+ * Atomically decrements @v by 1.
7561+ */
7562+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7563+{
7564+ asm volatile(LOCK_PREFIX "decl %0\n"
7565 : "=m" (v->counter)
7566 : "m" (v->counter));
7567 }
7568@@ -120,7 +238,16 @@ static inline int atomic_dec_and_test(at
7569 {
7570 unsigned char c;
7571
7572- asm volatile(LOCK_PREFIX "decl %0; sete %1"
7573+ asm volatile(LOCK_PREFIX "decl %0\n"
7574+
7575+#ifdef CONFIG_PAX_REFCOUNT
7576+ "jno 0f\n"
7577+ LOCK_PREFIX "incl %0\n"
7578+ "int $4\n0:\n"
7579+ _ASM_EXTABLE(0b, 0b)
7580+#endif
7581+
7582+ "sete %1\n"
7583 : "=m" (v->counter), "=qm" (c)
7584 : "m" (v->counter) : "memory");
7585 return c != 0;
7586@@ -138,7 +265,35 @@ static inline int atomic_inc_and_test(at
7587 {
7588 unsigned char c;
7589
7590- asm volatile(LOCK_PREFIX "incl %0; sete %1"
7591+ asm volatile(LOCK_PREFIX "incl %0\n"
7592+
7593+#ifdef CONFIG_PAX_REFCOUNT
7594+ "jno 0f\n"
7595+ LOCK_PREFIX "decl %0\n"
7596+ "int $4\n0:\n"
7597+ _ASM_EXTABLE(0b, 0b)
7598+#endif
7599+
7600+ "sete %1\n"
7601+ : "=m" (v->counter), "=qm" (c)
7602+ : "m" (v->counter) : "memory");
7603+ return c != 0;
7604+}
7605+
7606+/**
7607+ * atomic_inc_and_test_unchecked - increment and test
7608+ * @v: pointer of type atomic_unchecked_t
7609+ *
7610+ * Atomically increments @v by 1
7611+ * and returns true if the result is zero, or false for all
7612+ * other cases.
7613+ */
7614+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7615+{
7616+ unsigned char c;
7617+
7618+ asm volatile(LOCK_PREFIX "incl %0\n"
7619+ "sete %1\n"
7620 : "=m" (v->counter), "=qm" (c)
7621 : "m" (v->counter) : "memory");
7622 return c != 0;
7623@@ -157,7 +312,16 @@ static inline int atomic_add_negative(in
7624 {
7625 unsigned char c;
7626
7627- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7628+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
7629+
7630+#ifdef CONFIG_PAX_REFCOUNT
7631+ "jno 0f\n"
7632+ LOCK_PREFIX "subl %2,%0\n"
7633+ "int $4\n0:\n"
7634+ _ASM_EXTABLE(0b, 0b)
7635+#endif
7636+
7637+ "sets %1\n"
7638 : "=m" (v->counter), "=qm" (c)
7639 : "ir" (i), "m" (v->counter) : "memory");
7640 return c;
7641@@ -173,7 +337,31 @@ static inline int atomic_add_negative(in
7642 static inline int atomic_add_return(int i, atomic_t *v)
7643 {
7644 int __i = i;
7645- asm volatile(LOCK_PREFIX "xaddl %0, %1"
7646+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7647+
7648+#ifdef CONFIG_PAX_REFCOUNT
7649+ "jno 0f\n"
7650+ "movl %0, %1\n"
7651+ "int $4\n0:\n"
7652+ _ASM_EXTABLE(0b, 0b)
7653+#endif
7654+
7655+ : "+r" (i), "+m" (v->counter)
7656+ : : "memory");
7657+ return i + __i;
7658+}
7659+
7660+/**
7661+ * atomic_add_return_unchecked - add and return
7662+ * @i: integer value to add
7663+ * @v: pointer of type atomic_unchecked_t
7664+ *
7665+ * Atomically adds @i to @v and returns @i + @v
7666+ */
7667+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7668+{
7669+ int __i = i;
7670+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7671 : "+r" (i), "+m" (v->counter)
7672 : : "memory");
7673 return i + __i;
7674@@ -185,6 +373,10 @@ static inline int atomic_sub_return(int
7675 }
7676
7677 #define atomic_inc_return(v) (atomic_add_return(1, v))
7678+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7679+{
7680+ return atomic_add_return_unchecked(1, v);
7681+}
7682 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7683
7684 /* The 64-bit atomic type */
7685@@ -204,6 +396,18 @@ static inline long atomic64_read(const a
7686 }
7687
7688 /**
7689+ * atomic64_read_unchecked - read atomic64 variable
7690+ * @v: pointer of type atomic64_unchecked_t
7691+ *
7692+ * Atomically reads the value of @v.
7693+ * Doesn't imply a read memory barrier.
7694+ */
7695+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7696+{
7697+ return v->counter;
7698+}
7699+
7700+/**
7701 * atomic64_set - set atomic64 variable
7702 * @v: pointer to type atomic64_t
7703 * @i: required value
7704@@ -216,6 +420,18 @@ static inline void atomic64_set(atomic64
7705 }
7706
7707 /**
7708+ * atomic64_set_unchecked - set atomic64 variable
7709+ * @v: pointer to type atomic64_unchecked_t
7710+ * @i: required value
7711+ *
7712+ * Atomically sets the value of @v to @i.
7713+ */
7714+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
7715+{
7716+ v->counter = i;
7717+}
7718+
7719+/**
7720 * atomic64_add - add integer to atomic64 variable
7721 * @i: integer value to add
7722 * @v: pointer to type atomic64_t
7723@@ -224,6 +440,28 @@ static inline void atomic64_set(atomic64
7724 */
7725 static inline void atomic64_add(long i, atomic64_t *v)
7726 {
7727+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
7728+
7729+#ifdef CONFIG_PAX_REFCOUNT
7730+ "jno 0f\n"
7731+ LOCK_PREFIX "subq %1,%0\n"
7732+ "int $4\n0:\n"
7733+ _ASM_EXTABLE(0b, 0b)
7734+#endif
7735+
7736+ : "=m" (v->counter)
7737+ : "er" (i), "m" (v->counter));
7738+}
7739+
7740+/**
7741+ * atomic64_add_unchecked - add integer to atomic64 variable
7742+ * @i: integer value to add
7743+ * @v: pointer to type atomic64_unchecked_t
7744+ *
7745+ * Atomically adds @i to @v.
7746+ */
7747+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
7748+{
7749 asm volatile(LOCK_PREFIX "addq %1,%0"
7750 : "=m" (v->counter)
7751 : "er" (i), "m" (v->counter));
7752@@ -238,7 +476,15 @@ static inline void atomic64_add(long i,
7753 */
7754 static inline void atomic64_sub(long i, atomic64_t *v)
7755 {
7756- asm volatile(LOCK_PREFIX "subq %1,%0"
7757+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
7758+
7759+#ifdef CONFIG_PAX_REFCOUNT
7760+ "jno 0f\n"
7761+ LOCK_PREFIX "addq %1,%0\n"
7762+ "int $4\n0:\n"
7763+ _ASM_EXTABLE(0b, 0b)
7764+#endif
7765+
7766 : "=m" (v->counter)
7767 : "er" (i), "m" (v->counter));
7768 }
7769@@ -256,7 +502,16 @@ static inline int atomic64_sub_and_test(
7770 {
7771 unsigned char c;
7772
7773- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
7774+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
7775+
7776+#ifdef CONFIG_PAX_REFCOUNT
7777+ "jno 0f\n"
7778+ LOCK_PREFIX "addq %2,%0\n"
7779+ "int $4\n0:\n"
7780+ _ASM_EXTABLE(0b, 0b)
7781+#endif
7782+
7783+ "sete %1\n"
7784 : "=m" (v->counter), "=qm" (c)
7785 : "er" (i), "m" (v->counter) : "memory");
7786 return c;
7787@@ -270,6 +525,27 @@ static inline int atomic64_sub_and_test(
7788 */
7789 static inline void atomic64_inc(atomic64_t *v)
7790 {
7791+ asm volatile(LOCK_PREFIX "incq %0\n"
7792+
7793+#ifdef CONFIG_PAX_REFCOUNT
7794+ "jno 0f\n"
7795+ LOCK_PREFIX "decq %0\n"
7796+ "int $4\n0:\n"
7797+ _ASM_EXTABLE(0b, 0b)
7798+#endif
7799+
7800+ : "=m" (v->counter)
7801+ : "m" (v->counter));
7802+}
7803+
7804+/**
7805+ * atomic64_inc_unchecked - increment atomic64 variable
7806+ * @v: pointer to type atomic64_unchecked_t
7807+ *
7808+ * Atomically increments @v by 1.
7809+ */
7810+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
7811+{
7812 asm volatile(LOCK_PREFIX "incq %0"
7813 : "=m" (v->counter)
7814 : "m" (v->counter));
7815@@ -283,7 +559,28 @@ static inline void atomic64_inc(atomic64
7816 */
7817 static inline void atomic64_dec(atomic64_t *v)
7818 {
7819- asm volatile(LOCK_PREFIX "decq %0"
7820+ asm volatile(LOCK_PREFIX "decq %0\n"
7821+
7822+#ifdef CONFIG_PAX_REFCOUNT
7823+ "jno 0f\n"
7824+ LOCK_PREFIX "incq %0\n"
7825+ "int $4\n0:\n"
7826+ _ASM_EXTABLE(0b, 0b)
7827+#endif
7828+
7829+ : "=m" (v->counter)
7830+ : "m" (v->counter));
7831+}
7832+
7833+/**
7834+ * atomic64_dec_unchecked - decrement atomic64 variable
7835+ * @v: pointer to type atomic64_t
7836+ *
7837+ * Atomically decrements @v by 1.
7838+ */
7839+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
7840+{
7841+ asm volatile(LOCK_PREFIX "decq %0\n"
7842 : "=m" (v->counter)
7843 : "m" (v->counter));
7844 }
7845@@ -300,7 +597,16 @@ static inline int atomic64_dec_and_test(
7846 {
7847 unsigned char c;
7848
7849- asm volatile(LOCK_PREFIX "decq %0; sete %1"
7850+ asm volatile(LOCK_PREFIX "decq %0\n"
7851+
7852+#ifdef CONFIG_PAX_REFCOUNT
7853+ "jno 0f\n"
7854+ LOCK_PREFIX "incq %0\n"
7855+ "int $4\n0:\n"
7856+ _ASM_EXTABLE(0b, 0b)
7857+#endif
7858+
7859+ "sete %1\n"
7860 : "=m" (v->counter), "=qm" (c)
7861 : "m" (v->counter) : "memory");
7862 return c != 0;
7863@@ -318,7 +624,16 @@ static inline int atomic64_inc_and_test(
7864 {
7865 unsigned char c;
7866
7867- asm volatile(LOCK_PREFIX "incq %0; sete %1"
7868+ asm volatile(LOCK_PREFIX "incq %0\n"
7869+
7870+#ifdef CONFIG_PAX_REFCOUNT
7871+ "jno 0f\n"
7872+ LOCK_PREFIX "decq %0\n"
7873+ "int $4\n0:\n"
7874+ _ASM_EXTABLE(0b, 0b)
7875+#endif
7876+
7877+ "sete %1\n"
7878 : "=m" (v->counter), "=qm" (c)
7879 : "m" (v->counter) : "memory");
7880 return c != 0;
7881@@ -337,7 +652,16 @@ static inline int atomic64_add_negative(
7882 {
7883 unsigned char c;
7884
7885- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
7886+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
7887+
7888+#ifdef CONFIG_PAX_REFCOUNT
7889+ "jno 0f\n"
7890+ LOCK_PREFIX "subq %2,%0\n"
7891+ "int $4\n0:\n"
7892+ _ASM_EXTABLE(0b, 0b)
7893+#endif
7894+
7895+ "sets %1\n"
7896 : "=m" (v->counter), "=qm" (c)
7897 : "er" (i), "m" (v->counter) : "memory");
7898 return c;
7899@@ -353,7 +677,31 @@ static inline int atomic64_add_negative(
7900 static inline long atomic64_add_return(long i, atomic64_t *v)
7901 {
7902 long __i = i;
7903- asm volatile(LOCK_PREFIX "xaddq %0, %1;"
7904+ asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
7905+
7906+#ifdef CONFIG_PAX_REFCOUNT
7907+ "jno 0f\n"
7908+ "movq %0, %1\n"
7909+ "int $4\n0:\n"
7910+ _ASM_EXTABLE(0b, 0b)
7911+#endif
7912+
7913+ : "+r" (i), "+m" (v->counter)
7914+ : : "memory");
7915+ return i + __i;
7916+}
7917+
7918+/**
7919+ * atomic64_add_return_unchecked - add and return
7920+ * @i: integer value to add
7921+ * @v: pointer to type atomic64_unchecked_t
7922+ *
7923+ * Atomically adds @i to @v and returns @i + @v
7924+ */
7925+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
7926+{
7927+ long __i = i;
7928+ asm volatile(LOCK_PREFIX "xaddq %0, %1"
7929 : "+r" (i), "+m" (v->counter)
7930 : : "memory");
7931 return i + __i;
7932@@ -365,6 +713,10 @@ static inline long atomic64_sub_return(l
7933 }
7934
7935 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
7936+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7937+{
7938+ return atomic64_add_return_unchecked(1, v);
7939+}
7940 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
7941
7942 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
7943@@ -372,21 +724,41 @@ static inline long atomic64_cmpxchg(atom
7944 return cmpxchg(&v->counter, old, new);
7945 }
7946
7947+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
7948+{
7949+ return cmpxchg(&v->counter, old, new);
7950+}
7951+
7952 static inline long atomic64_xchg(atomic64_t *v, long new)
7953 {
7954 return xchg(&v->counter, new);
7955 }
7956
7957+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
7958+{
7959+ return xchg(&v->counter, new);
7960+}
7961+
7962 static inline long atomic_cmpxchg(atomic_t *v, int old, int new)
7963 {
7964 return cmpxchg(&v->counter, old, new);
7965 }
7966
7967+static inline long atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7968+{
7969+ return cmpxchg(&v->counter, old, new);
7970+}
7971+
7972 static inline long atomic_xchg(atomic_t *v, int new)
7973 {
7974 return xchg(&v->counter, new);
7975 }
7976
7977+static inline long atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7978+{
7979+ return xchg(&v->counter, new);
7980+}
7981+
7982 /**
7983 * atomic_add_unless - add unless the number is a given value
7984 * @v: pointer of type atomic_t
7985@@ -398,17 +770,30 @@ static inline long atomic_xchg(atomic_t
7986 */
7987 static inline int atomic_add_unless(atomic_t *v, int a, int u)
7988 {
7989- int c, old;
7990+ int c, old, new;
7991 c = atomic_read(v);
7992 for (;;) {
7993- if (unlikely(c == (u)))
7994+ if (unlikely(c == u))
7995 break;
7996- old = atomic_cmpxchg((v), c, c + (a));
7997+
7998+ asm volatile("addl %2,%0\n"
7999+
8000+#ifdef CONFIG_PAX_REFCOUNT
8001+ "jno 0f\n"
8002+ "subl %2,%0\n"
8003+ "int $4\n0:\n"
8004+ _ASM_EXTABLE(0b, 0b)
8005+#endif
8006+
8007+ : "=r" (new)
8008+ : "0" (c), "ir" (a));
8009+
8010+ old = atomic_cmpxchg(v, c, new);
8011 if (likely(old == c))
8012 break;
8013 c = old;
8014 }
8015- return c != (u);
8016+ return c != u;
8017 }
8018
8019 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
8020@@ -424,17 +809,30 @@ static inline int atomic_add_unless(atom
8021 */
8022 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
8023 {
8024- long c, old;
8025+ long c, old, new;
8026 c = atomic64_read(v);
8027 for (;;) {
8028- if (unlikely(c == (u)))
8029+ if (unlikely(c == u))
8030 break;
8031- old = atomic64_cmpxchg((v), c, c + (a));
8032+
8033+ asm volatile("addq %2,%0\n"
8034+
8035+#ifdef CONFIG_PAX_REFCOUNT
8036+ "jno 0f\n"
8037+ "subq %2,%0\n"
8038+ "int $4\n0:\n"
8039+ _ASM_EXTABLE(0b, 0b)
8040+#endif
8041+
8042+ : "=r" (new)
8043+ : "0" (c), "er" (a));
8044+
8045+ old = atomic64_cmpxchg(v, c, new);
8046 if (likely(old == c))
8047 break;
8048 c = old;
8049 }
8050- return c != (u);
8051+ return c != u;
8052 }
8053
8054 /**
8055diff -urNp linux-2.6.32.42/arch/x86/include/asm/bitops.h linux-2.6.32.42/arch/x86/include/asm/bitops.h
8056--- linux-2.6.32.42/arch/x86/include/asm/bitops.h 2011-03-27 14:31:47.000000000 -0400
8057+++ linux-2.6.32.42/arch/x86/include/asm/bitops.h 2011-04-17 15:56:46.000000000 -0400
8058@@ -38,7 +38,7 @@
8059 * a mask operation on a byte.
8060 */
8061 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
8062-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
8063+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
8064 #define CONST_MASK(nr) (1 << ((nr) & 7))
8065
8066 /**
8067diff -urNp linux-2.6.32.42/arch/x86/include/asm/boot.h linux-2.6.32.42/arch/x86/include/asm/boot.h
8068--- linux-2.6.32.42/arch/x86/include/asm/boot.h 2011-03-27 14:31:47.000000000 -0400
8069+++ linux-2.6.32.42/arch/x86/include/asm/boot.h 2011-04-17 15:56:46.000000000 -0400
8070@@ -11,10 +11,15 @@
8071 #include <asm/pgtable_types.h>
8072
8073 /* Physical address where kernel should be loaded. */
8074-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8075+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8076 + (CONFIG_PHYSICAL_ALIGN - 1)) \
8077 & ~(CONFIG_PHYSICAL_ALIGN - 1))
8078
8079+#ifndef __ASSEMBLY__
8080+extern unsigned char __LOAD_PHYSICAL_ADDR[];
8081+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
8082+#endif
8083+
8084 /* Minimum kernel alignment, as a power of two */
8085 #ifdef CONFIG_X86_64
8086 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
8087diff -urNp linux-2.6.32.42/arch/x86/include/asm/cacheflush.h linux-2.6.32.42/arch/x86/include/asm/cacheflush.h
8088--- linux-2.6.32.42/arch/x86/include/asm/cacheflush.h 2011-03-27 14:31:47.000000000 -0400
8089+++ linux-2.6.32.42/arch/x86/include/asm/cacheflush.h 2011-04-17 15:56:46.000000000 -0400
8090@@ -60,7 +60,7 @@ PAGEFLAG(WC, WC)
8091 static inline unsigned long get_page_memtype(struct page *pg)
8092 {
8093 if (!PageUncached(pg) && !PageWC(pg))
8094- return -1;
8095+ return ~0UL;
8096 else if (!PageUncached(pg) && PageWC(pg))
8097 return _PAGE_CACHE_WC;
8098 else if (PageUncached(pg) && !PageWC(pg))
8099@@ -85,7 +85,7 @@ static inline void set_page_memtype(stru
8100 SetPageWC(pg);
8101 break;
8102 default:
8103- case -1:
8104+ case ~0UL:
8105 ClearPageUncached(pg);
8106 ClearPageWC(pg);
8107 break;
8108diff -urNp linux-2.6.32.42/arch/x86/include/asm/cache.h linux-2.6.32.42/arch/x86/include/asm/cache.h
8109--- linux-2.6.32.42/arch/x86/include/asm/cache.h 2011-03-27 14:31:47.000000000 -0400
8110+++ linux-2.6.32.42/arch/x86/include/asm/cache.h 2011-07-06 19:53:33.000000000 -0400
8111@@ -5,9 +5,10 @@
8112
8113 /* L1 cache line size */
8114 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
8115-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8116+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8117
8118 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
8119+#define __read_only __attribute__((__section__(".data.read_only")))
8120
8121 #ifdef CONFIG_X86_VSMP
8122 /* vSMP Internode cacheline shift */
8123diff -urNp linux-2.6.32.42/arch/x86/include/asm/checksum_32.h linux-2.6.32.42/arch/x86/include/asm/checksum_32.h
8124--- linux-2.6.32.42/arch/x86/include/asm/checksum_32.h 2011-03-27 14:31:47.000000000 -0400
8125+++ linux-2.6.32.42/arch/x86/include/asm/checksum_32.h 2011-04-17 15:56:46.000000000 -0400
8126@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
8127 int len, __wsum sum,
8128 int *src_err_ptr, int *dst_err_ptr);
8129
8130+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
8131+ int len, __wsum sum,
8132+ int *src_err_ptr, int *dst_err_ptr);
8133+
8134+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
8135+ int len, __wsum sum,
8136+ int *src_err_ptr, int *dst_err_ptr);
8137+
8138 /*
8139 * Note: when you get a NULL pointer exception here this means someone
8140 * passed in an incorrect kernel address to one of these functions.
8141@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
8142 int *err_ptr)
8143 {
8144 might_sleep();
8145- return csum_partial_copy_generic((__force void *)src, dst,
8146+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
8147 len, sum, err_ptr, NULL);
8148 }
8149
8150@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
8151 {
8152 might_sleep();
8153 if (access_ok(VERIFY_WRITE, dst, len))
8154- return csum_partial_copy_generic(src, (__force void *)dst,
8155+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
8156 len, sum, NULL, err_ptr);
8157
8158 if (len)
8159diff -urNp linux-2.6.32.42/arch/x86/include/asm/desc_defs.h linux-2.6.32.42/arch/x86/include/asm/desc_defs.h
8160--- linux-2.6.32.42/arch/x86/include/asm/desc_defs.h 2011-03-27 14:31:47.000000000 -0400
8161+++ linux-2.6.32.42/arch/x86/include/asm/desc_defs.h 2011-04-17 15:56:46.000000000 -0400
8162@@ -31,6 +31,12 @@ struct desc_struct {
8163 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
8164 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
8165 };
8166+ struct {
8167+ u16 offset_low;
8168+ u16 seg;
8169+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
8170+ unsigned offset_high: 16;
8171+ } gate;
8172 };
8173 } __attribute__((packed));
8174
8175diff -urNp linux-2.6.32.42/arch/x86/include/asm/desc.h linux-2.6.32.42/arch/x86/include/asm/desc.h
8176--- linux-2.6.32.42/arch/x86/include/asm/desc.h 2011-03-27 14:31:47.000000000 -0400
8177+++ linux-2.6.32.42/arch/x86/include/asm/desc.h 2011-04-23 12:56:10.000000000 -0400
8178@@ -4,6 +4,7 @@
8179 #include <asm/desc_defs.h>
8180 #include <asm/ldt.h>
8181 #include <asm/mmu.h>
8182+#include <asm/pgtable.h>
8183 #include <linux/smp.h>
8184
8185 static inline void fill_ldt(struct desc_struct *desc,
8186@@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_
8187 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
8188 desc->type = (info->read_exec_only ^ 1) << 1;
8189 desc->type |= info->contents << 2;
8190+ desc->type |= info->seg_not_present ^ 1;
8191 desc->s = 1;
8192 desc->dpl = 0x3;
8193 desc->p = info->seg_not_present ^ 1;
8194@@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_
8195 }
8196
8197 extern struct desc_ptr idt_descr;
8198-extern gate_desc idt_table[];
8199-
8200-struct gdt_page {
8201- struct desc_struct gdt[GDT_ENTRIES];
8202-} __attribute__((aligned(PAGE_SIZE)));
8203-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
8204+extern gate_desc idt_table[256];
8205
8206+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
8207 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
8208 {
8209- return per_cpu(gdt_page, cpu).gdt;
8210+ return cpu_gdt_table[cpu];
8211 }
8212
8213 #ifdef CONFIG_X86_64
8214@@ -65,9 +63,14 @@ static inline void pack_gate(gate_desc *
8215 unsigned long base, unsigned dpl, unsigned flags,
8216 unsigned short seg)
8217 {
8218- gate->a = (seg << 16) | (base & 0xffff);
8219- gate->b = (base & 0xffff0000) |
8220- (((0x80 | type | (dpl << 5)) & 0xff) << 8);
8221+ gate->gate.offset_low = base;
8222+ gate->gate.seg = seg;
8223+ gate->gate.reserved = 0;
8224+ gate->gate.type = type;
8225+ gate->gate.s = 0;
8226+ gate->gate.dpl = dpl;
8227+ gate->gate.p = 1;
8228+ gate->gate.offset_high = base >> 16;
8229 }
8230
8231 #endif
8232@@ -115,13 +118,17 @@ static inline void paravirt_free_ldt(str
8233 static inline void native_write_idt_entry(gate_desc *idt, int entry,
8234 const gate_desc *gate)
8235 {
8236+ pax_open_kernel();
8237 memcpy(&idt[entry], gate, sizeof(*gate));
8238+ pax_close_kernel();
8239 }
8240
8241 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
8242 const void *desc)
8243 {
8244+ pax_open_kernel();
8245 memcpy(&ldt[entry], desc, 8);
8246+ pax_close_kernel();
8247 }
8248
8249 static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
8250@@ -139,7 +146,10 @@ static inline void native_write_gdt_entr
8251 size = sizeof(struct desc_struct);
8252 break;
8253 }
8254+
8255+ pax_open_kernel();
8256 memcpy(&gdt[entry], desc, size);
8257+ pax_close_kernel();
8258 }
8259
8260 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
8261@@ -211,7 +221,9 @@ static inline void native_set_ldt(const
8262
8263 static inline void native_load_tr_desc(void)
8264 {
8265+ pax_open_kernel();
8266 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
8267+ pax_close_kernel();
8268 }
8269
8270 static inline void native_load_gdt(const struct desc_ptr *dtr)
8271@@ -246,8 +258,10 @@ static inline void native_load_tls(struc
8272 unsigned int i;
8273 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
8274
8275+ pax_open_kernel();
8276 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
8277 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
8278+ pax_close_kernel();
8279 }
8280
8281 #define _LDT_empty(info) \
8282@@ -309,7 +323,7 @@ static inline void set_desc_limit(struct
8283 desc->limit = (limit >> 16) & 0xf;
8284 }
8285
8286-static inline void _set_gate(int gate, unsigned type, void *addr,
8287+static inline void _set_gate(int gate, unsigned type, const void *addr,
8288 unsigned dpl, unsigned ist, unsigned seg)
8289 {
8290 gate_desc s;
8291@@ -327,7 +341,7 @@ static inline void _set_gate(int gate, u
8292 * Pentium F0 0F bugfix can have resulted in the mapped
8293 * IDT being write-protected.
8294 */
8295-static inline void set_intr_gate(unsigned int n, void *addr)
8296+static inline void set_intr_gate(unsigned int n, const void *addr)
8297 {
8298 BUG_ON((unsigned)n > 0xFF);
8299 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
8300@@ -356,19 +370,19 @@ static inline void alloc_intr_gate(unsig
8301 /*
8302 * This routine sets up an interrupt gate at directory privilege level 3.
8303 */
8304-static inline void set_system_intr_gate(unsigned int n, void *addr)
8305+static inline void set_system_intr_gate(unsigned int n, const void *addr)
8306 {
8307 BUG_ON((unsigned)n > 0xFF);
8308 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
8309 }
8310
8311-static inline void set_system_trap_gate(unsigned int n, void *addr)
8312+static inline void set_system_trap_gate(unsigned int n, const void *addr)
8313 {
8314 BUG_ON((unsigned)n > 0xFF);
8315 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
8316 }
8317
8318-static inline void set_trap_gate(unsigned int n, void *addr)
8319+static inline void set_trap_gate(unsigned int n, const void *addr)
8320 {
8321 BUG_ON((unsigned)n > 0xFF);
8322 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
8323@@ -377,19 +391,31 @@ static inline void set_trap_gate(unsigne
8324 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
8325 {
8326 BUG_ON((unsigned)n > 0xFF);
8327- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
8328+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
8329 }
8330
8331-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
8332+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
8333 {
8334 BUG_ON((unsigned)n > 0xFF);
8335 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
8336 }
8337
8338-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
8339+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
8340 {
8341 BUG_ON((unsigned)n > 0xFF);
8342 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
8343 }
8344
8345+#ifdef CONFIG_X86_32
8346+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
8347+{
8348+ struct desc_struct d;
8349+
8350+ if (likely(limit))
8351+ limit = (limit - 1UL) >> PAGE_SHIFT;
8352+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
8353+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
8354+}
8355+#endif
8356+
8357 #endif /* _ASM_X86_DESC_H */
8358diff -urNp linux-2.6.32.42/arch/x86/include/asm/device.h linux-2.6.32.42/arch/x86/include/asm/device.h
8359--- linux-2.6.32.42/arch/x86/include/asm/device.h 2011-03-27 14:31:47.000000000 -0400
8360+++ linux-2.6.32.42/arch/x86/include/asm/device.h 2011-04-17 15:56:46.000000000 -0400
8361@@ -6,7 +6,7 @@ struct dev_archdata {
8362 void *acpi_handle;
8363 #endif
8364 #ifdef CONFIG_X86_64
8365-struct dma_map_ops *dma_ops;
8366+ const struct dma_map_ops *dma_ops;
8367 #endif
8368 #ifdef CONFIG_DMAR
8369 void *iommu; /* hook for IOMMU specific extension */
8370diff -urNp linux-2.6.32.42/arch/x86/include/asm/dma-mapping.h linux-2.6.32.42/arch/x86/include/asm/dma-mapping.h
8371--- linux-2.6.32.42/arch/x86/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
8372+++ linux-2.6.32.42/arch/x86/include/asm/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
8373@@ -25,9 +25,9 @@ extern int iommu_merge;
8374 extern struct device x86_dma_fallback_dev;
8375 extern int panic_on_overflow;
8376
8377-extern struct dma_map_ops *dma_ops;
8378+extern const struct dma_map_ops *dma_ops;
8379
8380-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
8381+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
8382 {
8383 #ifdef CONFIG_X86_32
8384 return dma_ops;
8385@@ -44,7 +44,7 @@ static inline struct dma_map_ops *get_dm
8386 /* Make sure we keep the same behaviour */
8387 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
8388 {
8389- struct dma_map_ops *ops = get_dma_ops(dev);
8390+ const struct dma_map_ops *ops = get_dma_ops(dev);
8391 if (ops->mapping_error)
8392 return ops->mapping_error(dev, dma_addr);
8393
8394@@ -122,7 +122,7 @@ static inline void *
8395 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
8396 gfp_t gfp)
8397 {
8398- struct dma_map_ops *ops = get_dma_ops(dev);
8399+ const struct dma_map_ops *ops = get_dma_ops(dev);
8400 void *memory;
8401
8402 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
8403@@ -149,7 +149,7 @@ dma_alloc_coherent(struct device *dev, s
8404 static inline void dma_free_coherent(struct device *dev, size_t size,
8405 void *vaddr, dma_addr_t bus)
8406 {
8407- struct dma_map_ops *ops = get_dma_ops(dev);
8408+ const struct dma_map_ops *ops = get_dma_ops(dev);
8409
8410 WARN_ON(irqs_disabled()); /* for portability */
8411
8412diff -urNp linux-2.6.32.42/arch/x86/include/asm/e820.h linux-2.6.32.42/arch/x86/include/asm/e820.h
8413--- linux-2.6.32.42/arch/x86/include/asm/e820.h 2011-03-27 14:31:47.000000000 -0400
8414+++ linux-2.6.32.42/arch/x86/include/asm/e820.h 2011-04-17 15:56:46.000000000 -0400
8415@@ -133,7 +133,7 @@ extern char *default_machine_specific_me
8416 #define ISA_END_ADDRESS 0x100000
8417 #define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS)
8418
8419-#define BIOS_BEGIN 0x000a0000
8420+#define BIOS_BEGIN 0x000c0000
8421 #define BIOS_END 0x00100000
8422
8423 #ifdef __KERNEL__
8424diff -urNp linux-2.6.32.42/arch/x86/include/asm/elf.h linux-2.6.32.42/arch/x86/include/asm/elf.h
8425--- linux-2.6.32.42/arch/x86/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
8426+++ linux-2.6.32.42/arch/x86/include/asm/elf.h 2011-04-17 15:56:46.000000000 -0400
8427@@ -257,7 +257,25 @@ extern int force_personality32;
8428 the loader. We need to make sure that it is out of the way of the program
8429 that it will "exec", and that there is sufficient room for the brk. */
8430
8431+#ifdef CONFIG_PAX_SEGMEXEC
8432+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
8433+#else
8434 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
8435+#endif
8436+
8437+#ifdef CONFIG_PAX_ASLR
8438+#ifdef CONFIG_X86_32
8439+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
8440+
8441+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8442+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8443+#else
8444+#define PAX_ELF_ET_DYN_BASE 0x400000UL
8445+
8446+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8447+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8448+#endif
8449+#endif
8450
8451 /* This yields a mask that user programs can use to figure out what
8452 instruction set this CPU supports. This could be done in user space,
8453@@ -311,8 +329,7 @@ do { \
8454 #define ARCH_DLINFO \
8455 do { \
8456 if (vdso_enabled) \
8457- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
8458- (unsigned long)current->mm->context.vdso); \
8459+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso);\
8460 } while (0)
8461
8462 #define AT_SYSINFO 32
8463@@ -323,7 +340,7 @@ do { \
8464
8465 #endif /* !CONFIG_X86_32 */
8466
8467-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
8468+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
8469
8470 #define VDSO_ENTRY \
8471 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
8472@@ -337,7 +354,4 @@ extern int arch_setup_additional_pages(s
8473 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
8474 #define compat_arch_setup_additional_pages syscall32_setup_pages
8475
8476-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8477-#define arch_randomize_brk arch_randomize_brk
8478-
8479 #endif /* _ASM_X86_ELF_H */
8480diff -urNp linux-2.6.32.42/arch/x86/include/asm/emergency-restart.h linux-2.6.32.42/arch/x86/include/asm/emergency-restart.h
8481--- linux-2.6.32.42/arch/x86/include/asm/emergency-restart.h 2011-03-27 14:31:47.000000000 -0400
8482+++ linux-2.6.32.42/arch/x86/include/asm/emergency-restart.h 2011-05-22 23:02:06.000000000 -0400
8483@@ -15,6 +15,6 @@ enum reboot_type {
8484
8485 extern enum reboot_type reboot_type;
8486
8487-extern void machine_emergency_restart(void);
8488+extern void machine_emergency_restart(void) __noreturn;
8489
8490 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
8491diff -urNp linux-2.6.32.42/arch/x86/include/asm/futex.h linux-2.6.32.42/arch/x86/include/asm/futex.h
8492--- linux-2.6.32.42/arch/x86/include/asm/futex.h 2011-03-27 14:31:47.000000000 -0400
8493+++ linux-2.6.32.42/arch/x86/include/asm/futex.h 2011-04-17 15:56:46.000000000 -0400
8494@@ -12,16 +12,18 @@
8495 #include <asm/system.h>
8496
8497 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
8498+ typecheck(u32 *, uaddr); \
8499 asm volatile("1:\t" insn "\n" \
8500 "2:\t.section .fixup,\"ax\"\n" \
8501 "3:\tmov\t%3, %1\n" \
8502 "\tjmp\t2b\n" \
8503 "\t.previous\n" \
8504 _ASM_EXTABLE(1b, 3b) \
8505- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
8506+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 *)____m(uaddr))\
8507 : "i" (-EFAULT), "0" (oparg), "1" (0))
8508
8509 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
8510+ typecheck(u32 *, uaddr); \
8511 asm volatile("1:\tmovl %2, %0\n" \
8512 "\tmovl\t%0, %3\n" \
8513 "\t" insn "\n" \
8514@@ -34,10 +36,10 @@
8515 _ASM_EXTABLE(1b, 4b) \
8516 _ASM_EXTABLE(2b, 4b) \
8517 : "=&a" (oldval), "=&r" (ret), \
8518- "+m" (*uaddr), "=&r" (tem) \
8519+ "+m" (*(u32 *)____m(uaddr)), "=&r" (tem) \
8520 : "r" (oparg), "i" (-EFAULT), "1" (0))
8521
8522-static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
8523+static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8524 {
8525 int op = (encoded_op >> 28) & 7;
8526 int cmp = (encoded_op >> 24) & 15;
8527@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
8528
8529 switch (op) {
8530 case FUTEX_OP_SET:
8531- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
8532+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
8533 break;
8534 case FUTEX_OP_ADD:
8535- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
8536+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
8537 uaddr, oparg);
8538 break;
8539 case FUTEX_OP_OR:
8540@@ -109,7 +111,7 @@ static inline int futex_atomic_op_inuser
8541 return ret;
8542 }
8543
8544-static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
8545+static inline int futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval,
8546 int newval)
8547 {
8548
8549@@ -119,16 +121,16 @@ static inline int futex_atomic_cmpxchg_i
8550 return -ENOSYS;
8551 #endif
8552
8553- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
8554+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
8555 return -EFAULT;
8556
8557- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
8558+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %1\n"
8559 "2:\t.section .fixup, \"ax\"\n"
8560 "3:\tmov %2, %0\n"
8561 "\tjmp 2b\n"
8562 "\t.previous\n"
8563 _ASM_EXTABLE(1b, 3b)
8564- : "=a" (oldval), "+m" (*uaddr)
8565+ : "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
8566 : "i" (-EFAULT), "r" (newval), "0" (oldval)
8567 : "memory"
8568 );
8569diff -urNp linux-2.6.32.42/arch/x86/include/asm/hw_irq.h linux-2.6.32.42/arch/x86/include/asm/hw_irq.h
8570--- linux-2.6.32.42/arch/x86/include/asm/hw_irq.h 2011-03-27 14:31:47.000000000 -0400
8571+++ linux-2.6.32.42/arch/x86/include/asm/hw_irq.h 2011-05-04 17:56:28.000000000 -0400
8572@@ -92,8 +92,8 @@ extern void setup_ioapic_dest(void);
8573 extern void enable_IO_APIC(void);
8574
8575 /* Statistics */
8576-extern atomic_t irq_err_count;
8577-extern atomic_t irq_mis_count;
8578+extern atomic_unchecked_t irq_err_count;
8579+extern atomic_unchecked_t irq_mis_count;
8580
8581 /* EISA */
8582 extern void eisa_set_level_irq(unsigned int irq);
8583diff -urNp linux-2.6.32.42/arch/x86/include/asm/i387.h linux-2.6.32.42/arch/x86/include/asm/i387.h
8584--- linux-2.6.32.42/arch/x86/include/asm/i387.h 2011-03-27 14:31:47.000000000 -0400
8585+++ linux-2.6.32.42/arch/x86/include/asm/i387.h 2011-04-17 15:56:46.000000000 -0400
8586@@ -60,6 +60,11 @@ static inline int fxrstor_checking(struc
8587 {
8588 int err;
8589
8590+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8591+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8592+ fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
8593+#endif
8594+
8595 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
8596 "2:\n"
8597 ".section .fixup,\"ax\"\n"
8598@@ -105,6 +110,11 @@ static inline int fxsave_user(struct i38
8599 {
8600 int err;
8601
8602+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8603+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8604+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
8605+#endif
8606+
8607 asm volatile("1: rex64/fxsave (%[fx])\n\t"
8608 "2:\n"
8609 ".section .fixup,\"ax\"\n"
8610@@ -195,13 +205,8 @@ static inline int fxrstor_checking(struc
8611 }
8612
8613 /* We need a safe address that is cheap to find and that is already
8614- in L1 during context switch. The best choices are unfortunately
8615- different for UP and SMP */
8616-#ifdef CONFIG_SMP
8617-#define safe_address (__per_cpu_offset[0])
8618-#else
8619-#define safe_address (kstat_cpu(0).cpustat.user)
8620-#endif
8621+ in L1 during context switch. */
8622+#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
8623
8624 /*
8625 * These must be called with preempt disabled
8626@@ -291,7 +296,7 @@ static inline void kernel_fpu_begin(void
8627 struct thread_info *me = current_thread_info();
8628 preempt_disable();
8629 if (me->status & TS_USEDFPU)
8630- __save_init_fpu(me->task);
8631+ __save_init_fpu(current);
8632 else
8633 clts();
8634 }
8635diff -urNp linux-2.6.32.42/arch/x86/include/asm/io_32.h linux-2.6.32.42/arch/x86/include/asm/io_32.h
8636--- linux-2.6.32.42/arch/x86/include/asm/io_32.h 2011-03-27 14:31:47.000000000 -0400
8637+++ linux-2.6.32.42/arch/x86/include/asm/io_32.h 2011-04-17 15:56:46.000000000 -0400
8638@@ -3,6 +3,7 @@
8639
8640 #include <linux/string.h>
8641 #include <linux/compiler.h>
8642+#include <asm/processor.h>
8643
8644 /*
8645 * This file contains the definitions for the x86 IO instructions
8646@@ -42,6 +43,17 @@
8647
8648 #ifdef __KERNEL__
8649
8650+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8651+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8652+{
8653+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8654+}
8655+
8656+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8657+{
8658+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8659+}
8660+
8661 #include <asm-generic/iomap.h>
8662
8663 #include <linux/vmalloc.h>
8664diff -urNp linux-2.6.32.42/arch/x86/include/asm/io_64.h linux-2.6.32.42/arch/x86/include/asm/io_64.h
8665--- linux-2.6.32.42/arch/x86/include/asm/io_64.h 2011-03-27 14:31:47.000000000 -0400
8666+++ linux-2.6.32.42/arch/x86/include/asm/io_64.h 2011-04-17 15:56:46.000000000 -0400
8667@@ -140,6 +140,17 @@ __OUTS(l)
8668
8669 #include <linux/vmalloc.h>
8670
8671+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8672+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8673+{
8674+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8675+}
8676+
8677+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8678+{
8679+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8680+}
8681+
8682 #include <asm-generic/iomap.h>
8683
8684 void __memcpy_fromio(void *, unsigned long, unsigned);
8685diff -urNp linux-2.6.32.42/arch/x86/include/asm/iommu.h linux-2.6.32.42/arch/x86/include/asm/iommu.h
8686--- linux-2.6.32.42/arch/x86/include/asm/iommu.h 2011-03-27 14:31:47.000000000 -0400
8687+++ linux-2.6.32.42/arch/x86/include/asm/iommu.h 2011-04-17 15:56:46.000000000 -0400
8688@@ -3,7 +3,7 @@
8689
8690 extern void pci_iommu_shutdown(void);
8691 extern void no_iommu_init(void);
8692-extern struct dma_map_ops nommu_dma_ops;
8693+extern const struct dma_map_ops nommu_dma_ops;
8694 extern int force_iommu, no_iommu;
8695 extern int iommu_detected;
8696 extern int iommu_pass_through;
8697diff -urNp linux-2.6.32.42/arch/x86/include/asm/irqflags.h linux-2.6.32.42/arch/x86/include/asm/irqflags.h
8698--- linux-2.6.32.42/arch/x86/include/asm/irqflags.h 2011-03-27 14:31:47.000000000 -0400
8699+++ linux-2.6.32.42/arch/x86/include/asm/irqflags.h 2011-04-17 15:56:46.000000000 -0400
8700@@ -142,6 +142,11 @@ static inline unsigned long __raw_local_
8701 sti; \
8702 sysexit
8703
8704+#define GET_CR0_INTO_RDI mov %cr0, %rdi
8705+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
8706+#define GET_CR3_INTO_RDI mov %cr3, %rdi
8707+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
8708+
8709 #else
8710 #define INTERRUPT_RETURN iret
8711 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
8712diff -urNp linux-2.6.32.42/arch/x86/include/asm/kprobes.h linux-2.6.32.42/arch/x86/include/asm/kprobes.h
8713--- linux-2.6.32.42/arch/x86/include/asm/kprobes.h 2011-03-27 14:31:47.000000000 -0400
8714+++ linux-2.6.32.42/arch/x86/include/asm/kprobes.h 2011-04-23 12:56:12.000000000 -0400
8715@@ -34,13 +34,8 @@ typedef u8 kprobe_opcode_t;
8716 #define BREAKPOINT_INSTRUCTION 0xcc
8717 #define RELATIVEJUMP_INSTRUCTION 0xe9
8718 #define MAX_INSN_SIZE 16
8719-#define MAX_STACK_SIZE 64
8720-#define MIN_STACK_SIZE(ADDR) \
8721- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
8722- THREAD_SIZE - (unsigned long)(ADDR))) \
8723- ? (MAX_STACK_SIZE) \
8724- : (((unsigned long)current_thread_info()) + \
8725- THREAD_SIZE - (unsigned long)(ADDR)))
8726+#define MAX_STACK_SIZE 64UL
8727+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
8728
8729 #define flush_insn_slot(p) do { } while (0)
8730
8731diff -urNp linux-2.6.32.42/arch/x86/include/asm/kvm_host.h linux-2.6.32.42/arch/x86/include/asm/kvm_host.h
8732--- linux-2.6.32.42/arch/x86/include/asm/kvm_host.h 2011-05-10 22:12:01.000000000 -0400
8733+++ linux-2.6.32.42/arch/x86/include/asm/kvm_host.h 2011-05-10 22:12:26.000000000 -0400
8734@@ -536,7 +536,7 @@ struct kvm_x86_ops {
8735 const struct trace_print_flags *exit_reasons_str;
8736 };
8737
8738-extern struct kvm_x86_ops *kvm_x86_ops;
8739+extern const struct kvm_x86_ops *kvm_x86_ops;
8740
8741 int kvm_mmu_module_init(void);
8742 void kvm_mmu_module_exit(void);
8743diff -urNp linux-2.6.32.42/arch/x86/include/asm/local.h linux-2.6.32.42/arch/x86/include/asm/local.h
8744--- linux-2.6.32.42/arch/x86/include/asm/local.h 2011-03-27 14:31:47.000000000 -0400
8745+++ linux-2.6.32.42/arch/x86/include/asm/local.h 2011-04-17 15:56:46.000000000 -0400
8746@@ -18,26 +18,58 @@ typedef struct {
8747
8748 static inline void local_inc(local_t *l)
8749 {
8750- asm volatile(_ASM_INC "%0"
8751+ asm volatile(_ASM_INC "%0\n"
8752+
8753+#ifdef CONFIG_PAX_REFCOUNT
8754+ "jno 0f\n"
8755+ _ASM_DEC "%0\n"
8756+ "int $4\n0:\n"
8757+ _ASM_EXTABLE(0b, 0b)
8758+#endif
8759+
8760 : "+m" (l->a.counter));
8761 }
8762
8763 static inline void local_dec(local_t *l)
8764 {
8765- asm volatile(_ASM_DEC "%0"
8766+ asm volatile(_ASM_DEC "%0\n"
8767+
8768+#ifdef CONFIG_PAX_REFCOUNT
8769+ "jno 0f\n"
8770+ _ASM_INC "%0\n"
8771+ "int $4\n0:\n"
8772+ _ASM_EXTABLE(0b, 0b)
8773+#endif
8774+
8775 : "+m" (l->a.counter));
8776 }
8777
8778 static inline void local_add(long i, local_t *l)
8779 {
8780- asm volatile(_ASM_ADD "%1,%0"
8781+ asm volatile(_ASM_ADD "%1,%0\n"
8782+
8783+#ifdef CONFIG_PAX_REFCOUNT
8784+ "jno 0f\n"
8785+ _ASM_SUB "%1,%0\n"
8786+ "int $4\n0:\n"
8787+ _ASM_EXTABLE(0b, 0b)
8788+#endif
8789+
8790 : "+m" (l->a.counter)
8791 : "ir" (i));
8792 }
8793
8794 static inline void local_sub(long i, local_t *l)
8795 {
8796- asm volatile(_ASM_SUB "%1,%0"
8797+ asm volatile(_ASM_SUB "%1,%0\n"
8798+
8799+#ifdef CONFIG_PAX_REFCOUNT
8800+ "jno 0f\n"
8801+ _ASM_ADD "%1,%0\n"
8802+ "int $4\n0:\n"
8803+ _ASM_EXTABLE(0b, 0b)
8804+#endif
8805+
8806 : "+m" (l->a.counter)
8807 : "ir" (i));
8808 }
8809@@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
8810 {
8811 unsigned char c;
8812
8813- asm volatile(_ASM_SUB "%2,%0; sete %1"
8814+ asm volatile(_ASM_SUB "%2,%0\n"
8815+
8816+#ifdef CONFIG_PAX_REFCOUNT
8817+ "jno 0f\n"
8818+ _ASM_ADD "%2,%0\n"
8819+ "int $4\n0:\n"
8820+ _ASM_EXTABLE(0b, 0b)
8821+#endif
8822+
8823+ "sete %1\n"
8824 : "+m" (l->a.counter), "=qm" (c)
8825 : "ir" (i) : "memory");
8826 return c;
8827@@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
8828 {
8829 unsigned char c;
8830
8831- asm volatile(_ASM_DEC "%0; sete %1"
8832+ asm volatile(_ASM_DEC "%0\n"
8833+
8834+#ifdef CONFIG_PAX_REFCOUNT
8835+ "jno 0f\n"
8836+ _ASM_INC "%0\n"
8837+ "int $4\n0:\n"
8838+ _ASM_EXTABLE(0b, 0b)
8839+#endif
8840+
8841+ "sete %1\n"
8842 : "+m" (l->a.counter), "=qm" (c)
8843 : : "memory");
8844 return c != 0;
8845@@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
8846 {
8847 unsigned char c;
8848
8849- asm volatile(_ASM_INC "%0; sete %1"
8850+ asm volatile(_ASM_INC "%0\n"
8851+
8852+#ifdef CONFIG_PAX_REFCOUNT
8853+ "jno 0f\n"
8854+ _ASM_DEC "%0\n"
8855+ "int $4\n0:\n"
8856+ _ASM_EXTABLE(0b, 0b)
8857+#endif
8858+
8859+ "sete %1\n"
8860 : "+m" (l->a.counter), "=qm" (c)
8861 : : "memory");
8862 return c != 0;
8863@@ -110,7 +169,16 @@ static inline int local_add_negative(lon
8864 {
8865 unsigned char c;
8866
8867- asm volatile(_ASM_ADD "%2,%0; sets %1"
8868+ asm volatile(_ASM_ADD "%2,%0\n"
8869+
8870+#ifdef CONFIG_PAX_REFCOUNT
8871+ "jno 0f\n"
8872+ _ASM_SUB "%2,%0\n"
8873+ "int $4\n0:\n"
8874+ _ASM_EXTABLE(0b, 0b)
8875+#endif
8876+
8877+ "sets %1\n"
8878 : "+m" (l->a.counter), "=qm" (c)
8879 : "ir" (i) : "memory");
8880 return c;
8881@@ -133,7 +201,15 @@ static inline long local_add_return(long
8882 #endif
8883 /* Modern 486+ processor */
8884 __i = i;
8885- asm volatile(_ASM_XADD "%0, %1;"
8886+ asm volatile(_ASM_XADD "%0, %1\n"
8887+
8888+#ifdef CONFIG_PAX_REFCOUNT
8889+ "jno 0f\n"
8890+ _ASM_MOV "%0,%1\n"
8891+ "int $4\n0:\n"
8892+ _ASM_EXTABLE(0b, 0b)
8893+#endif
8894+
8895 : "+r" (i), "+m" (l->a.counter)
8896 : : "memory");
8897 return i + __i;
8898diff -urNp linux-2.6.32.42/arch/x86/include/asm/microcode.h linux-2.6.32.42/arch/x86/include/asm/microcode.h
8899--- linux-2.6.32.42/arch/x86/include/asm/microcode.h 2011-03-27 14:31:47.000000000 -0400
8900+++ linux-2.6.32.42/arch/x86/include/asm/microcode.h 2011-04-17 15:56:46.000000000 -0400
8901@@ -12,13 +12,13 @@ struct device;
8902 enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
8903
8904 struct microcode_ops {
8905- enum ucode_state (*request_microcode_user) (int cpu,
8906+ enum ucode_state (* const request_microcode_user) (int cpu,
8907 const void __user *buf, size_t size);
8908
8909- enum ucode_state (*request_microcode_fw) (int cpu,
8910+ enum ucode_state (* const request_microcode_fw) (int cpu,
8911 struct device *device);
8912
8913- void (*microcode_fini_cpu) (int cpu);
8914+ void (* const microcode_fini_cpu) (int cpu);
8915
8916 /*
8917 * The generic 'microcode_core' part guarantees that
8918@@ -38,18 +38,18 @@ struct ucode_cpu_info {
8919 extern struct ucode_cpu_info ucode_cpu_info[];
8920
8921 #ifdef CONFIG_MICROCODE_INTEL
8922-extern struct microcode_ops * __init init_intel_microcode(void);
8923+extern const struct microcode_ops * __init init_intel_microcode(void);
8924 #else
8925-static inline struct microcode_ops * __init init_intel_microcode(void)
8926+static inline const struct microcode_ops * __init init_intel_microcode(void)
8927 {
8928 return NULL;
8929 }
8930 #endif /* CONFIG_MICROCODE_INTEL */
8931
8932 #ifdef CONFIG_MICROCODE_AMD
8933-extern struct microcode_ops * __init init_amd_microcode(void);
8934+extern const struct microcode_ops * __init init_amd_microcode(void);
8935 #else
8936-static inline struct microcode_ops * __init init_amd_microcode(void)
8937+static inline const struct microcode_ops * __init init_amd_microcode(void)
8938 {
8939 return NULL;
8940 }
8941diff -urNp linux-2.6.32.42/arch/x86/include/asm/mman.h linux-2.6.32.42/arch/x86/include/asm/mman.h
8942--- linux-2.6.32.42/arch/x86/include/asm/mman.h 2011-03-27 14:31:47.000000000 -0400
8943+++ linux-2.6.32.42/arch/x86/include/asm/mman.h 2011-04-17 15:56:46.000000000 -0400
8944@@ -5,4 +5,14 @@
8945
8946 #include <asm-generic/mman.h>
8947
8948+#ifdef __KERNEL__
8949+#ifndef __ASSEMBLY__
8950+#ifdef CONFIG_X86_32
8951+#define arch_mmap_check i386_mmap_check
8952+int i386_mmap_check(unsigned long addr, unsigned long len,
8953+ unsigned long flags);
8954+#endif
8955+#endif
8956+#endif
8957+
8958 #endif /* _ASM_X86_MMAN_H */
8959diff -urNp linux-2.6.32.42/arch/x86/include/asm/mmu_context.h linux-2.6.32.42/arch/x86/include/asm/mmu_context.h
8960--- linux-2.6.32.42/arch/x86/include/asm/mmu_context.h 2011-03-27 14:31:47.000000000 -0400
8961+++ linux-2.6.32.42/arch/x86/include/asm/mmu_context.h 2011-04-17 15:56:46.000000000 -0400
8962@@ -24,6 +24,21 @@ void destroy_context(struct mm_struct *m
8963
8964 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
8965 {
8966+
8967+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8968+ unsigned int i;
8969+ pgd_t *pgd;
8970+
8971+ pax_open_kernel();
8972+ pgd = get_cpu_pgd(smp_processor_id());
8973+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
8974+ if (paravirt_enabled())
8975+ set_pgd(pgd+i, native_make_pgd(0));
8976+ else
8977+ pgd[i] = native_make_pgd(0);
8978+ pax_close_kernel();
8979+#endif
8980+
8981 #ifdef CONFIG_SMP
8982 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
8983 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
8984@@ -34,16 +49,30 @@ static inline void switch_mm(struct mm_s
8985 struct task_struct *tsk)
8986 {
8987 unsigned cpu = smp_processor_id();
8988+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP)
8989+ int tlbstate = TLBSTATE_OK;
8990+#endif
8991
8992 if (likely(prev != next)) {
8993 #ifdef CONFIG_SMP
8994+#ifdef CONFIG_X86_32
8995+ tlbstate = percpu_read(cpu_tlbstate.state);
8996+#endif
8997 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
8998 percpu_write(cpu_tlbstate.active_mm, next);
8999 #endif
9000 cpumask_set_cpu(cpu, mm_cpumask(next));
9001
9002 /* Re-load page tables */
9003+#ifdef CONFIG_PAX_PER_CPU_PGD
9004+ pax_open_kernel();
9005+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
9006+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
9007+ pax_close_kernel();
9008+ load_cr3(get_cpu_pgd(cpu));
9009+#else
9010 load_cr3(next->pgd);
9011+#endif
9012
9013 /* stop flush ipis for the previous mm */
9014 cpumask_clear_cpu(cpu, mm_cpumask(prev));
9015@@ -53,9 +82,38 @@ static inline void switch_mm(struct mm_s
9016 */
9017 if (unlikely(prev->context.ldt != next->context.ldt))
9018 load_LDT_nolock(&next->context);
9019- }
9020+
9021+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
9022+ if (!nx_enabled) {
9023+ smp_mb__before_clear_bit();
9024+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
9025+ smp_mb__after_clear_bit();
9026+ cpu_set(cpu, next->context.cpu_user_cs_mask);
9027+ }
9028+#endif
9029+
9030+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9031+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
9032+ prev->context.user_cs_limit != next->context.user_cs_limit))
9033+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9034 #ifdef CONFIG_SMP
9035+ else if (unlikely(tlbstate != TLBSTATE_OK))
9036+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9037+#endif
9038+#endif
9039+
9040+ }
9041 else {
9042+
9043+#ifdef CONFIG_PAX_PER_CPU_PGD
9044+ pax_open_kernel();
9045+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
9046+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
9047+ pax_close_kernel();
9048+ load_cr3(get_cpu_pgd(cpu));
9049+#endif
9050+
9051+#ifdef CONFIG_SMP
9052 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
9053 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
9054
9055@@ -64,11 +122,28 @@ static inline void switch_mm(struct mm_s
9056 * tlb flush IPI delivery. We must reload CR3
9057 * to make sure to use no freed page tables.
9058 */
9059+
9060+#ifndef CONFIG_PAX_PER_CPU_PGD
9061 load_cr3(next->pgd);
9062+#endif
9063+
9064 load_LDT_nolock(&next->context);
9065+
9066+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
9067+ if (!nx_enabled)
9068+ cpu_set(cpu, next->context.cpu_user_cs_mask);
9069+#endif
9070+
9071+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9072+#ifdef CONFIG_PAX_PAGEEXEC
9073+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && nx_enabled))
9074+#endif
9075+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9076+#endif
9077+
9078 }
9079- }
9080 #endif
9081+ }
9082 }
9083
9084 #define activate_mm(prev, next) \
9085diff -urNp linux-2.6.32.42/arch/x86/include/asm/mmu.h linux-2.6.32.42/arch/x86/include/asm/mmu.h
9086--- linux-2.6.32.42/arch/x86/include/asm/mmu.h 2011-03-27 14:31:47.000000000 -0400
9087+++ linux-2.6.32.42/arch/x86/include/asm/mmu.h 2011-04-17 15:56:46.000000000 -0400
9088@@ -9,10 +9,23 @@
9089 * we put the segment information here.
9090 */
9091 typedef struct {
9092- void *ldt;
9093+ struct desc_struct *ldt;
9094 int size;
9095 struct mutex lock;
9096- void *vdso;
9097+ unsigned long vdso;
9098+
9099+#ifdef CONFIG_X86_32
9100+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
9101+ unsigned long user_cs_base;
9102+ unsigned long user_cs_limit;
9103+
9104+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
9105+ cpumask_t cpu_user_cs_mask;
9106+#endif
9107+
9108+#endif
9109+#endif
9110+
9111 } mm_context_t;
9112
9113 #ifdef CONFIG_SMP
9114diff -urNp linux-2.6.32.42/arch/x86/include/asm/module.h linux-2.6.32.42/arch/x86/include/asm/module.h
9115--- linux-2.6.32.42/arch/x86/include/asm/module.h 2011-03-27 14:31:47.000000000 -0400
9116+++ linux-2.6.32.42/arch/x86/include/asm/module.h 2011-04-23 13:18:57.000000000 -0400
9117@@ -5,6 +5,7 @@
9118
9119 #ifdef CONFIG_X86_64
9120 /* X86_64 does not define MODULE_PROC_FAMILY */
9121+#define MODULE_PROC_FAMILY ""
9122 #elif defined CONFIG_M386
9123 #define MODULE_PROC_FAMILY "386 "
9124 #elif defined CONFIG_M486
9125@@ -59,13 +60,36 @@
9126 #error unknown processor family
9127 #endif
9128
9129-#ifdef CONFIG_X86_32
9130-# ifdef CONFIG_4KSTACKS
9131-# define MODULE_STACKSIZE "4KSTACKS "
9132-# else
9133-# define MODULE_STACKSIZE ""
9134-# endif
9135-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
9136+#ifdef CONFIG_PAX_MEMORY_UDEREF
9137+#define MODULE_PAX_UDEREF "UDEREF "
9138+#else
9139+#define MODULE_PAX_UDEREF ""
9140+#endif
9141+
9142+#ifdef CONFIG_PAX_KERNEXEC
9143+#define MODULE_PAX_KERNEXEC "KERNEXEC "
9144+#else
9145+#define MODULE_PAX_KERNEXEC ""
9146+#endif
9147+
9148+#ifdef CONFIG_PAX_REFCOUNT
9149+#define MODULE_PAX_REFCOUNT "REFCOUNT "
9150+#else
9151+#define MODULE_PAX_REFCOUNT ""
9152 #endif
9153
9154+#if defined(CONFIG_X86_32) && defined(CONFIG_4KSTACKS)
9155+#define MODULE_STACKSIZE "4KSTACKS "
9156+#else
9157+#define MODULE_STACKSIZE ""
9158+#endif
9159+
9160+#ifdef CONFIG_GRKERNSEC
9161+#define MODULE_GRSEC "GRSECURITY "
9162+#else
9163+#define MODULE_GRSEC ""
9164+#endif
9165+
9166+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_GRSEC MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF MODULE_PAX_REFCOUNT
9167+
9168 #endif /* _ASM_X86_MODULE_H */
9169diff -urNp linux-2.6.32.42/arch/x86/include/asm/page_64_types.h linux-2.6.32.42/arch/x86/include/asm/page_64_types.h
9170--- linux-2.6.32.42/arch/x86/include/asm/page_64_types.h 2011-03-27 14:31:47.000000000 -0400
9171+++ linux-2.6.32.42/arch/x86/include/asm/page_64_types.h 2011-04-17 15:56:46.000000000 -0400
9172@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
9173
9174 /* duplicated to the one in bootmem.h */
9175 extern unsigned long max_pfn;
9176-extern unsigned long phys_base;
9177+extern const unsigned long phys_base;
9178
9179 extern unsigned long __phys_addr(unsigned long);
9180 #define __phys_reloc_hide(x) (x)
9181diff -urNp linux-2.6.32.42/arch/x86/include/asm/paravirt.h linux-2.6.32.42/arch/x86/include/asm/paravirt.h
9182--- linux-2.6.32.42/arch/x86/include/asm/paravirt.h 2011-03-27 14:31:47.000000000 -0400
9183+++ linux-2.6.32.42/arch/x86/include/asm/paravirt.h 2011-04-17 15:56:46.000000000 -0400
9184@@ -729,6 +729,21 @@ static inline void __set_fixmap(unsigned
9185 pv_mmu_ops.set_fixmap(idx, phys, flags);
9186 }
9187
9188+#ifdef CONFIG_PAX_KERNEXEC
9189+static inline unsigned long pax_open_kernel(void)
9190+{
9191+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
9192+}
9193+
9194+static inline unsigned long pax_close_kernel(void)
9195+{
9196+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
9197+}
9198+#else
9199+static inline unsigned long pax_open_kernel(void) { return 0; }
9200+static inline unsigned long pax_close_kernel(void) { return 0; }
9201+#endif
9202+
9203 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
9204
9205 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
9206@@ -945,7 +960,7 @@ extern void default_banner(void);
9207
9208 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
9209 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
9210-#define PARA_INDIRECT(addr) *%cs:addr
9211+#define PARA_INDIRECT(addr) *%ss:addr
9212 #endif
9213
9214 #define INTERRUPT_RETURN \
9215@@ -1022,6 +1037,21 @@ extern void default_banner(void);
9216 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
9217 CLBR_NONE, \
9218 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
9219+
9220+#define GET_CR0_INTO_RDI \
9221+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
9222+ mov %rax,%rdi
9223+
9224+#define SET_RDI_INTO_CR0 \
9225+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
9226+
9227+#define GET_CR3_INTO_RDI \
9228+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
9229+ mov %rax,%rdi
9230+
9231+#define SET_RDI_INTO_CR3 \
9232+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
9233+
9234 #endif /* CONFIG_X86_32 */
9235
9236 #endif /* __ASSEMBLY__ */
9237diff -urNp linux-2.6.32.42/arch/x86/include/asm/paravirt_types.h linux-2.6.32.42/arch/x86/include/asm/paravirt_types.h
9238--- linux-2.6.32.42/arch/x86/include/asm/paravirt_types.h 2011-03-27 14:31:47.000000000 -0400
9239+++ linux-2.6.32.42/arch/x86/include/asm/paravirt_types.h 2011-04-17 15:56:46.000000000 -0400
9240@@ -316,6 +316,12 @@ struct pv_mmu_ops {
9241 an mfn. We can tell which is which from the index. */
9242 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
9243 phys_addr_t phys, pgprot_t flags);
9244+
9245+#ifdef CONFIG_PAX_KERNEXEC
9246+ unsigned long (*pax_open_kernel)(void);
9247+ unsigned long (*pax_close_kernel)(void);
9248+#endif
9249+
9250 };
9251
9252 struct raw_spinlock;
9253diff -urNp linux-2.6.32.42/arch/x86/include/asm/pci_x86.h linux-2.6.32.42/arch/x86/include/asm/pci_x86.h
9254--- linux-2.6.32.42/arch/x86/include/asm/pci_x86.h 2011-03-27 14:31:47.000000000 -0400
9255+++ linux-2.6.32.42/arch/x86/include/asm/pci_x86.h 2011-04-17 15:56:46.000000000 -0400
9256@@ -89,16 +89,16 @@ extern int (*pcibios_enable_irq)(struct
9257 extern void (*pcibios_disable_irq)(struct pci_dev *dev);
9258
9259 struct pci_raw_ops {
9260- int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
9261+ int (* const read)(unsigned int domain, unsigned int bus, unsigned int devfn,
9262 int reg, int len, u32 *val);
9263- int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
9264+ int (* const write)(unsigned int domain, unsigned int bus, unsigned int devfn,
9265 int reg, int len, u32 val);
9266 };
9267
9268-extern struct pci_raw_ops *raw_pci_ops;
9269-extern struct pci_raw_ops *raw_pci_ext_ops;
9270+extern const struct pci_raw_ops *raw_pci_ops;
9271+extern const struct pci_raw_ops *raw_pci_ext_ops;
9272
9273-extern struct pci_raw_ops pci_direct_conf1;
9274+extern const struct pci_raw_ops pci_direct_conf1;
9275 extern bool port_cf9_safe;
9276
9277 /* arch_initcall level */
9278diff -urNp linux-2.6.32.42/arch/x86/include/asm/pgalloc.h linux-2.6.32.42/arch/x86/include/asm/pgalloc.h
9279--- linux-2.6.32.42/arch/x86/include/asm/pgalloc.h 2011-03-27 14:31:47.000000000 -0400
9280+++ linux-2.6.32.42/arch/x86/include/asm/pgalloc.h 2011-04-17 15:56:46.000000000 -0400
9281@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
9282 pmd_t *pmd, pte_t *pte)
9283 {
9284 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9285+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
9286+}
9287+
9288+static inline void pmd_populate_user(struct mm_struct *mm,
9289+ pmd_t *pmd, pte_t *pte)
9290+{
9291+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9292 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
9293 }
9294
9295diff -urNp linux-2.6.32.42/arch/x86/include/asm/pgtable-2level.h linux-2.6.32.42/arch/x86/include/asm/pgtable-2level.h
9296--- linux-2.6.32.42/arch/x86/include/asm/pgtable-2level.h 2011-03-27 14:31:47.000000000 -0400
9297+++ linux-2.6.32.42/arch/x86/include/asm/pgtable-2level.h 2011-04-17 15:56:46.000000000 -0400
9298@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
9299
9300 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9301 {
9302+ pax_open_kernel();
9303 *pmdp = pmd;
9304+ pax_close_kernel();
9305 }
9306
9307 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9308diff -urNp linux-2.6.32.42/arch/x86/include/asm/pgtable_32.h linux-2.6.32.42/arch/x86/include/asm/pgtable_32.h
9309--- linux-2.6.32.42/arch/x86/include/asm/pgtable_32.h 2011-03-27 14:31:47.000000000 -0400
9310+++ linux-2.6.32.42/arch/x86/include/asm/pgtable_32.h 2011-04-17 15:56:46.000000000 -0400
9311@@ -26,9 +26,6 @@
9312 struct mm_struct;
9313 struct vm_area_struct;
9314
9315-extern pgd_t swapper_pg_dir[1024];
9316-extern pgd_t trampoline_pg_dir[1024];
9317-
9318 static inline void pgtable_cache_init(void) { }
9319 static inline void check_pgt_cache(void) { }
9320 void paging_init(void);
9321@@ -49,6 +46,12 @@ extern void set_pmd_pfn(unsigned long, u
9322 # include <asm/pgtable-2level.h>
9323 #endif
9324
9325+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
9326+extern pgd_t trampoline_pg_dir[PTRS_PER_PGD];
9327+#ifdef CONFIG_X86_PAE
9328+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
9329+#endif
9330+
9331 #if defined(CONFIG_HIGHPTE)
9332 #define __KM_PTE \
9333 (in_nmi() ? KM_NMI_PTE : \
9334@@ -73,7 +76,9 @@ extern void set_pmd_pfn(unsigned long, u
9335 /* Clear a kernel PTE and flush it from the TLB */
9336 #define kpte_clear_flush(ptep, vaddr) \
9337 do { \
9338+ pax_open_kernel(); \
9339 pte_clear(&init_mm, (vaddr), (ptep)); \
9340+ pax_close_kernel(); \
9341 __flush_tlb_one((vaddr)); \
9342 } while (0)
9343
9344@@ -85,6 +90,9 @@ do { \
9345
9346 #endif /* !__ASSEMBLY__ */
9347
9348+#define HAVE_ARCH_UNMAPPED_AREA
9349+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
9350+
9351 /*
9352 * kern_addr_valid() is (1) for FLATMEM and (0) for
9353 * SPARSEMEM and DISCONTIGMEM
9354diff -urNp linux-2.6.32.42/arch/x86/include/asm/pgtable_32_types.h linux-2.6.32.42/arch/x86/include/asm/pgtable_32_types.h
9355--- linux-2.6.32.42/arch/x86/include/asm/pgtable_32_types.h 2011-03-27 14:31:47.000000000 -0400
9356+++ linux-2.6.32.42/arch/x86/include/asm/pgtable_32_types.h 2011-04-17 15:56:46.000000000 -0400
9357@@ -8,7 +8,7 @@
9358 */
9359 #ifdef CONFIG_X86_PAE
9360 # include <asm/pgtable-3level_types.h>
9361-# define PMD_SIZE (1UL << PMD_SHIFT)
9362+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
9363 # define PMD_MASK (~(PMD_SIZE - 1))
9364 #else
9365 # include <asm/pgtable-2level_types.h>
9366@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
9367 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
9368 #endif
9369
9370+#ifdef CONFIG_PAX_KERNEXEC
9371+#ifndef __ASSEMBLY__
9372+extern unsigned char MODULES_EXEC_VADDR[];
9373+extern unsigned char MODULES_EXEC_END[];
9374+#endif
9375+#include <asm/boot.h>
9376+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
9377+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
9378+#else
9379+#define ktla_ktva(addr) (addr)
9380+#define ktva_ktla(addr) (addr)
9381+#endif
9382+
9383 #define MODULES_VADDR VMALLOC_START
9384 #define MODULES_END VMALLOC_END
9385 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
9386diff -urNp linux-2.6.32.42/arch/x86/include/asm/pgtable-3level.h linux-2.6.32.42/arch/x86/include/asm/pgtable-3level.h
9387--- linux-2.6.32.42/arch/x86/include/asm/pgtable-3level.h 2011-03-27 14:31:47.000000000 -0400
9388+++ linux-2.6.32.42/arch/x86/include/asm/pgtable-3level.h 2011-04-17 15:56:46.000000000 -0400
9389@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
9390
9391 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9392 {
9393+ pax_open_kernel();
9394 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
9395+ pax_close_kernel();
9396 }
9397
9398 static inline void native_set_pud(pud_t *pudp, pud_t pud)
9399 {
9400+ pax_open_kernel();
9401 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
9402+ pax_close_kernel();
9403 }
9404
9405 /*
9406diff -urNp linux-2.6.32.42/arch/x86/include/asm/pgtable_64.h linux-2.6.32.42/arch/x86/include/asm/pgtable_64.h
9407--- linux-2.6.32.42/arch/x86/include/asm/pgtable_64.h 2011-03-27 14:31:47.000000000 -0400
9408+++ linux-2.6.32.42/arch/x86/include/asm/pgtable_64.h 2011-04-17 15:56:46.000000000 -0400
9409@@ -16,10 +16,13 @@
9410
9411 extern pud_t level3_kernel_pgt[512];
9412 extern pud_t level3_ident_pgt[512];
9413+extern pud_t level3_vmalloc_pgt[512];
9414+extern pud_t level3_vmemmap_pgt[512];
9415+extern pud_t level2_vmemmap_pgt[512];
9416 extern pmd_t level2_kernel_pgt[512];
9417 extern pmd_t level2_fixmap_pgt[512];
9418-extern pmd_t level2_ident_pgt[512];
9419-extern pgd_t init_level4_pgt[];
9420+extern pmd_t level2_ident_pgt[512*2];
9421+extern pgd_t init_level4_pgt[512];
9422
9423 #define swapper_pg_dir init_level4_pgt
9424
9425@@ -74,7 +77,9 @@ static inline pte_t native_ptep_get_and_
9426
9427 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9428 {
9429+ pax_open_kernel();
9430 *pmdp = pmd;
9431+ pax_close_kernel();
9432 }
9433
9434 static inline void native_pmd_clear(pmd_t *pmd)
9435@@ -94,7 +99,9 @@ static inline void native_pud_clear(pud_
9436
9437 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
9438 {
9439+ pax_open_kernel();
9440 *pgdp = pgd;
9441+ pax_close_kernel();
9442 }
9443
9444 static inline void native_pgd_clear(pgd_t *pgd)
9445diff -urNp linux-2.6.32.42/arch/x86/include/asm/pgtable_64_types.h linux-2.6.32.42/arch/x86/include/asm/pgtable_64_types.h
9446--- linux-2.6.32.42/arch/x86/include/asm/pgtable_64_types.h 2011-03-27 14:31:47.000000000 -0400
9447+++ linux-2.6.32.42/arch/x86/include/asm/pgtable_64_types.h 2011-04-17 15:56:46.000000000 -0400
9448@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
9449 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
9450 #define MODULES_END _AC(0xffffffffff000000, UL)
9451 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
9452+#define MODULES_EXEC_VADDR MODULES_VADDR
9453+#define MODULES_EXEC_END MODULES_END
9454+
9455+#define ktla_ktva(addr) (addr)
9456+#define ktva_ktla(addr) (addr)
9457
9458 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
9459diff -urNp linux-2.6.32.42/arch/x86/include/asm/pgtable.h linux-2.6.32.42/arch/x86/include/asm/pgtable.h
9460--- linux-2.6.32.42/arch/x86/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
9461+++ linux-2.6.32.42/arch/x86/include/asm/pgtable.h 2011-04-17 15:56:46.000000000 -0400
9462@@ -74,12 +74,51 @@ extern struct list_head pgd_list;
9463
9464 #define arch_end_context_switch(prev) do {} while(0)
9465
9466+#define pax_open_kernel() native_pax_open_kernel()
9467+#define pax_close_kernel() native_pax_close_kernel()
9468 #endif /* CONFIG_PARAVIRT */
9469
9470+#define __HAVE_ARCH_PAX_OPEN_KERNEL
9471+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
9472+
9473+#ifdef CONFIG_PAX_KERNEXEC
9474+static inline unsigned long native_pax_open_kernel(void)
9475+{
9476+ unsigned long cr0;
9477+
9478+ preempt_disable();
9479+ barrier();
9480+ cr0 = read_cr0() ^ X86_CR0_WP;
9481+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
9482+ write_cr0(cr0);
9483+ return cr0 ^ X86_CR0_WP;
9484+}
9485+
9486+static inline unsigned long native_pax_close_kernel(void)
9487+{
9488+ unsigned long cr0;
9489+
9490+ cr0 = read_cr0() ^ X86_CR0_WP;
9491+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
9492+ write_cr0(cr0);
9493+ barrier();
9494+ preempt_enable_no_resched();
9495+ return cr0 ^ X86_CR0_WP;
9496+}
9497+#else
9498+static inline unsigned long native_pax_open_kernel(void) { return 0; }
9499+static inline unsigned long native_pax_close_kernel(void) { return 0; }
9500+#endif
9501+
9502 /*
9503 * The following only work if pte_present() is true.
9504 * Undefined behaviour if not..
9505 */
9506+static inline int pte_user(pte_t pte)
9507+{
9508+ return pte_val(pte) & _PAGE_USER;
9509+}
9510+
9511 static inline int pte_dirty(pte_t pte)
9512 {
9513 return pte_flags(pte) & _PAGE_DIRTY;
9514@@ -167,9 +206,29 @@ static inline pte_t pte_wrprotect(pte_t
9515 return pte_clear_flags(pte, _PAGE_RW);
9516 }
9517
9518+static inline pte_t pte_mkread(pte_t pte)
9519+{
9520+ return __pte(pte_val(pte) | _PAGE_USER);
9521+}
9522+
9523 static inline pte_t pte_mkexec(pte_t pte)
9524 {
9525- return pte_clear_flags(pte, _PAGE_NX);
9526+#ifdef CONFIG_X86_PAE
9527+ if (__supported_pte_mask & _PAGE_NX)
9528+ return pte_clear_flags(pte, _PAGE_NX);
9529+ else
9530+#endif
9531+ return pte_set_flags(pte, _PAGE_USER);
9532+}
9533+
9534+static inline pte_t pte_exprotect(pte_t pte)
9535+{
9536+#ifdef CONFIG_X86_PAE
9537+ if (__supported_pte_mask & _PAGE_NX)
9538+ return pte_set_flags(pte, _PAGE_NX);
9539+ else
9540+#endif
9541+ return pte_clear_flags(pte, _PAGE_USER);
9542 }
9543
9544 static inline pte_t pte_mkdirty(pte_t pte)
9545@@ -302,6 +361,15 @@ pte_t *populate_extra_pte(unsigned long
9546 #endif
9547
9548 #ifndef __ASSEMBLY__
9549+
9550+#ifdef CONFIG_PAX_PER_CPU_PGD
9551+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
9552+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
9553+{
9554+ return cpu_pgd[cpu];
9555+}
9556+#endif
9557+
9558 #include <linux/mm_types.h>
9559
9560 static inline int pte_none(pte_t pte)
9561@@ -472,7 +540,7 @@ static inline pud_t *pud_offset(pgd_t *p
9562
9563 static inline int pgd_bad(pgd_t pgd)
9564 {
9565- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
9566+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
9567 }
9568
9569 static inline int pgd_none(pgd_t pgd)
9570@@ -495,7 +563,12 @@ static inline int pgd_none(pgd_t pgd)
9571 * pgd_offset() returns a (pgd_t *)
9572 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
9573 */
9574-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
9575+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
9576+
9577+#ifdef CONFIG_PAX_PER_CPU_PGD
9578+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
9579+#endif
9580+
9581 /*
9582 * a shortcut which implies the use of the kernel's pgd, instead
9583 * of a process's
9584@@ -506,6 +579,20 @@ static inline int pgd_none(pgd_t pgd)
9585 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
9586 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
9587
9588+#ifdef CONFIG_X86_32
9589+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
9590+#else
9591+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
9592+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
9593+
9594+#ifdef CONFIG_PAX_MEMORY_UDEREF
9595+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
9596+#else
9597+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
9598+#endif
9599+
9600+#endif
9601+
9602 #ifndef __ASSEMBLY__
9603
9604 extern int direct_gbpages;
9605@@ -611,11 +698,23 @@ static inline void ptep_set_wrprotect(st
9606 * dst and src can be on the same page, but the range must not overlap,
9607 * and must not cross a page boundary.
9608 */
9609-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
9610+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
9611 {
9612- memcpy(dst, src, count * sizeof(pgd_t));
9613+ pax_open_kernel();
9614+ while (count--)
9615+ *dst++ = *src++;
9616+ pax_close_kernel();
9617 }
9618
9619+#ifdef CONFIG_PAX_PER_CPU_PGD
9620+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9621+#endif
9622+
9623+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9624+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9625+#else
9626+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
9627+#endif
9628
9629 #include <asm-generic/pgtable.h>
9630 #endif /* __ASSEMBLY__ */
9631diff -urNp linux-2.6.32.42/arch/x86/include/asm/pgtable_types.h linux-2.6.32.42/arch/x86/include/asm/pgtable_types.h
9632--- linux-2.6.32.42/arch/x86/include/asm/pgtable_types.h 2011-03-27 14:31:47.000000000 -0400
9633+++ linux-2.6.32.42/arch/x86/include/asm/pgtable_types.h 2011-04-17 15:56:46.000000000 -0400
9634@@ -16,12 +16,11 @@
9635 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
9636 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
9637 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
9638-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
9639+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
9640 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
9641 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
9642 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
9643-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
9644-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
9645+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
9646 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
9647
9648 /* If _PAGE_BIT_PRESENT is clear, we use these: */
9649@@ -39,7 +38,6 @@
9650 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
9651 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
9652 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
9653-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
9654 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
9655 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
9656 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
9657@@ -55,8 +53,10 @@
9658
9659 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
9660 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
9661-#else
9662+#elif defined(CONFIG_KMEMCHECK)
9663 #define _PAGE_NX (_AT(pteval_t, 0))
9664+#else
9665+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
9666 #endif
9667
9668 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
9669@@ -93,6 +93,9 @@
9670 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
9671 _PAGE_ACCESSED)
9672
9673+#define PAGE_READONLY_NOEXEC PAGE_READONLY
9674+#define PAGE_SHARED_NOEXEC PAGE_SHARED
9675+
9676 #define __PAGE_KERNEL_EXEC \
9677 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
9678 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
9679@@ -103,8 +106,8 @@
9680 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
9681 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
9682 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
9683-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
9684-#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
9685+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
9686+#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
9687 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
9688 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
9689 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
9690@@ -163,8 +166,8 @@
9691 * bits are combined, this will alow user to access the high address mapped
9692 * VDSO in the presence of CONFIG_COMPAT_VDSO
9693 */
9694-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
9695-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
9696+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9697+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9698 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
9699 #endif
9700
9701@@ -202,7 +205,17 @@ static inline pgdval_t pgd_flags(pgd_t p
9702 {
9703 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
9704 }
9705+#endif
9706
9707+#if PAGETABLE_LEVELS == 3
9708+#include <asm-generic/pgtable-nopud.h>
9709+#endif
9710+
9711+#if PAGETABLE_LEVELS == 2
9712+#include <asm-generic/pgtable-nopmd.h>
9713+#endif
9714+
9715+#ifndef __ASSEMBLY__
9716 #if PAGETABLE_LEVELS > 3
9717 typedef struct { pudval_t pud; } pud_t;
9718
9719@@ -216,8 +229,6 @@ static inline pudval_t native_pud_val(pu
9720 return pud.pud;
9721 }
9722 #else
9723-#include <asm-generic/pgtable-nopud.h>
9724-
9725 static inline pudval_t native_pud_val(pud_t pud)
9726 {
9727 return native_pgd_val(pud.pgd);
9728@@ -237,8 +248,6 @@ static inline pmdval_t native_pmd_val(pm
9729 return pmd.pmd;
9730 }
9731 #else
9732-#include <asm-generic/pgtable-nopmd.h>
9733-
9734 static inline pmdval_t native_pmd_val(pmd_t pmd)
9735 {
9736 return native_pgd_val(pmd.pud.pgd);
9737@@ -278,7 +287,16 @@ typedef struct page *pgtable_t;
9738
9739 extern pteval_t __supported_pte_mask;
9740 extern void set_nx(void);
9741+
9742+#ifdef CONFIG_X86_32
9743+#ifdef CONFIG_X86_PAE
9744 extern int nx_enabled;
9745+#else
9746+#define nx_enabled (0)
9747+#endif
9748+#else
9749+#define nx_enabled (1)
9750+#endif
9751
9752 #define pgprot_writecombine pgprot_writecombine
9753 extern pgprot_t pgprot_writecombine(pgprot_t prot);
9754diff -urNp linux-2.6.32.42/arch/x86/include/asm/processor.h linux-2.6.32.42/arch/x86/include/asm/processor.h
9755--- linux-2.6.32.42/arch/x86/include/asm/processor.h 2011-04-22 19:16:29.000000000 -0400
9756+++ linux-2.6.32.42/arch/x86/include/asm/processor.h 2011-05-11 18:25:15.000000000 -0400
9757@@ -272,7 +272,7 @@ struct tss_struct {
9758
9759 } ____cacheline_aligned;
9760
9761-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
9762+extern struct tss_struct init_tss[NR_CPUS];
9763
9764 /*
9765 * Save the original ist values for checking stack pointers during debugging
9766@@ -888,11 +888,18 @@ static inline void spin_lock_prefetch(co
9767 */
9768 #define TASK_SIZE PAGE_OFFSET
9769 #define TASK_SIZE_MAX TASK_SIZE
9770+
9771+#ifdef CONFIG_PAX_SEGMEXEC
9772+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
9773+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
9774+#else
9775 #define STACK_TOP TASK_SIZE
9776-#define STACK_TOP_MAX STACK_TOP
9777+#endif
9778+
9779+#define STACK_TOP_MAX TASK_SIZE
9780
9781 #define INIT_THREAD { \
9782- .sp0 = sizeof(init_stack) + (long)&init_stack, \
9783+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9784 .vm86_info = NULL, \
9785 .sysenter_cs = __KERNEL_CS, \
9786 .io_bitmap_ptr = NULL, \
9787@@ -906,7 +913,7 @@ static inline void spin_lock_prefetch(co
9788 */
9789 #define INIT_TSS { \
9790 .x86_tss = { \
9791- .sp0 = sizeof(init_stack) + (long)&init_stack, \
9792+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9793 .ss0 = __KERNEL_DS, \
9794 .ss1 = __KERNEL_CS, \
9795 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
9796@@ -917,11 +924,7 @@ static inline void spin_lock_prefetch(co
9797 extern unsigned long thread_saved_pc(struct task_struct *tsk);
9798
9799 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
9800-#define KSTK_TOP(info) \
9801-({ \
9802- unsigned long *__ptr = (unsigned long *)(info); \
9803- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
9804-})
9805+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
9806
9807 /*
9808 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
9809@@ -936,7 +939,7 @@ extern unsigned long thread_saved_pc(str
9810 #define task_pt_regs(task) \
9811 ({ \
9812 struct pt_regs *__regs__; \
9813- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
9814+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
9815 __regs__ - 1; \
9816 })
9817
9818@@ -946,13 +949,13 @@ extern unsigned long thread_saved_pc(str
9819 /*
9820 * User space process size. 47bits minus one guard page.
9821 */
9822-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
9823+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
9824
9825 /* This decides where the kernel will search for a free chunk of vm
9826 * space during mmap's.
9827 */
9828 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
9829- 0xc0000000 : 0xFFFFe000)
9830+ 0xc0000000 : 0xFFFFf000)
9831
9832 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
9833 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
9834@@ -963,11 +966,11 @@ extern unsigned long thread_saved_pc(str
9835 #define STACK_TOP_MAX TASK_SIZE_MAX
9836
9837 #define INIT_THREAD { \
9838- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9839+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9840 }
9841
9842 #define INIT_TSS { \
9843- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9844+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9845 }
9846
9847 /*
9848@@ -989,6 +992,10 @@ extern void start_thread(struct pt_regs
9849 */
9850 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
9851
9852+#ifdef CONFIG_PAX_SEGMEXEC
9853+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
9854+#endif
9855+
9856 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
9857
9858 /* Get/set a process' ability to use the timestamp counter instruction */
9859diff -urNp linux-2.6.32.42/arch/x86/include/asm/ptrace.h linux-2.6.32.42/arch/x86/include/asm/ptrace.h
9860--- linux-2.6.32.42/arch/x86/include/asm/ptrace.h 2011-03-27 14:31:47.000000000 -0400
9861+++ linux-2.6.32.42/arch/x86/include/asm/ptrace.h 2011-04-17 15:56:46.000000000 -0400
9862@@ -151,28 +151,29 @@ static inline unsigned long regs_return_
9863 }
9864
9865 /*
9866- * user_mode_vm(regs) determines whether a register set came from user mode.
9867+ * user_mode(regs) determines whether a register set came from user mode.
9868 * This is true if V8086 mode was enabled OR if the register set was from
9869 * protected mode with RPL-3 CS value. This tricky test checks that with
9870 * one comparison. Many places in the kernel can bypass this full check
9871- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
9872+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
9873+ * be used.
9874 */
9875-static inline int user_mode(struct pt_regs *regs)
9876+static inline int user_mode_novm(struct pt_regs *regs)
9877 {
9878 #ifdef CONFIG_X86_32
9879 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
9880 #else
9881- return !!(regs->cs & 3);
9882+ return !!(regs->cs & SEGMENT_RPL_MASK);
9883 #endif
9884 }
9885
9886-static inline int user_mode_vm(struct pt_regs *regs)
9887+static inline int user_mode(struct pt_regs *regs)
9888 {
9889 #ifdef CONFIG_X86_32
9890 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
9891 USER_RPL;
9892 #else
9893- return user_mode(regs);
9894+ return user_mode_novm(regs);
9895 #endif
9896 }
9897
9898diff -urNp linux-2.6.32.42/arch/x86/include/asm/reboot.h linux-2.6.32.42/arch/x86/include/asm/reboot.h
9899--- linux-2.6.32.42/arch/x86/include/asm/reboot.h 2011-03-27 14:31:47.000000000 -0400
9900+++ linux-2.6.32.42/arch/x86/include/asm/reboot.h 2011-05-22 23:02:03.000000000 -0400
9901@@ -6,19 +6,19 @@
9902 struct pt_regs;
9903
9904 struct machine_ops {
9905- void (*restart)(char *cmd);
9906- void (*halt)(void);
9907- void (*power_off)(void);
9908+ void (* __noreturn restart)(char *cmd);
9909+ void (* __noreturn halt)(void);
9910+ void (* __noreturn power_off)(void);
9911 void (*shutdown)(void);
9912 void (*crash_shutdown)(struct pt_regs *);
9913- void (*emergency_restart)(void);
9914+ void (* __noreturn emergency_restart)(void);
9915 };
9916
9917 extern struct machine_ops machine_ops;
9918
9919 void native_machine_crash_shutdown(struct pt_regs *regs);
9920 void native_machine_shutdown(void);
9921-void machine_real_restart(const unsigned char *code, int length);
9922+void machine_real_restart(const unsigned char *code, unsigned int length) __noreturn;
9923
9924 typedef void (*nmi_shootdown_cb)(int, struct die_args*);
9925 void nmi_shootdown_cpus(nmi_shootdown_cb callback);
9926diff -urNp linux-2.6.32.42/arch/x86/include/asm/rwsem.h linux-2.6.32.42/arch/x86/include/asm/rwsem.h
9927--- linux-2.6.32.42/arch/x86/include/asm/rwsem.h 2011-03-27 14:31:47.000000000 -0400
9928+++ linux-2.6.32.42/arch/x86/include/asm/rwsem.h 2011-04-17 15:56:46.000000000 -0400
9929@@ -118,6 +118,14 @@ static inline void __down_read(struct rw
9930 {
9931 asm volatile("# beginning down_read\n\t"
9932 LOCK_PREFIX _ASM_INC "(%1)\n\t"
9933+
9934+#ifdef CONFIG_PAX_REFCOUNT
9935+ "jno 0f\n"
9936+ LOCK_PREFIX _ASM_DEC "(%1)\n\t"
9937+ "int $4\n0:\n"
9938+ _ASM_EXTABLE(0b, 0b)
9939+#endif
9940+
9941 /* adds 0x00000001, returns the old value */
9942 " jns 1f\n"
9943 " call call_rwsem_down_read_failed\n"
9944@@ -139,6 +147,14 @@ static inline int __down_read_trylock(st
9945 "1:\n\t"
9946 " mov %1,%2\n\t"
9947 " add %3,%2\n\t"
9948+
9949+#ifdef CONFIG_PAX_REFCOUNT
9950+ "jno 0f\n"
9951+ "sub %3,%2\n"
9952+ "int $4\n0:\n"
9953+ _ASM_EXTABLE(0b, 0b)
9954+#endif
9955+
9956 " jle 2f\n\t"
9957 LOCK_PREFIX " cmpxchg %2,%0\n\t"
9958 " jnz 1b\n\t"
9959@@ -160,6 +176,14 @@ static inline void __down_write_nested(s
9960 tmp = RWSEM_ACTIVE_WRITE_BIAS;
9961 asm volatile("# beginning down_write\n\t"
9962 LOCK_PREFIX " xadd %1,(%2)\n\t"
9963+
9964+#ifdef CONFIG_PAX_REFCOUNT
9965+ "jno 0f\n"
9966+ "mov %1,(%2)\n"
9967+ "int $4\n0:\n"
9968+ _ASM_EXTABLE(0b, 0b)
9969+#endif
9970+
9971 /* subtract 0x0000ffff, returns the old value */
9972 " test %1,%1\n\t"
9973 /* was the count 0 before? */
9974@@ -198,6 +222,14 @@ static inline void __up_read(struct rw_s
9975 rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
9976 asm volatile("# beginning __up_read\n\t"
9977 LOCK_PREFIX " xadd %1,(%2)\n\t"
9978+
9979+#ifdef CONFIG_PAX_REFCOUNT
9980+ "jno 0f\n"
9981+ "mov %1,(%2)\n"
9982+ "int $4\n0:\n"
9983+ _ASM_EXTABLE(0b, 0b)
9984+#endif
9985+
9986 /* subtracts 1, returns the old value */
9987 " jns 1f\n\t"
9988 " call call_rwsem_wake\n"
9989@@ -216,6 +248,14 @@ static inline void __up_write(struct rw_
9990 rwsem_count_t tmp;
9991 asm volatile("# beginning __up_write\n\t"
9992 LOCK_PREFIX " xadd %1,(%2)\n\t"
9993+
9994+#ifdef CONFIG_PAX_REFCOUNT
9995+ "jno 0f\n"
9996+ "mov %1,(%2)\n"
9997+ "int $4\n0:\n"
9998+ _ASM_EXTABLE(0b, 0b)
9999+#endif
10000+
10001 /* tries to transition
10002 0xffff0001 -> 0x00000000 */
10003 " jz 1f\n"
10004@@ -234,6 +274,14 @@ static inline void __downgrade_write(str
10005 {
10006 asm volatile("# beginning __downgrade_write\n\t"
10007 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
10008+
10009+#ifdef CONFIG_PAX_REFCOUNT
10010+ "jno 0f\n"
10011+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
10012+ "int $4\n0:\n"
10013+ _ASM_EXTABLE(0b, 0b)
10014+#endif
10015+
10016 /*
10017 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
10018 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
10019@@ -253,7 +301,15 @@ static inline void __downgrade_write(str
10020 static inline void rwsem_atomic_add(rwsem_count_t delta,
10021 struct rw_semaphore *sem)
10022 {
10023- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
10024+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
10025+
10026+#ifdef CONFIG_PAX_REFCOUNT
10027+ "jno 0f\n"
10028+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
10029+ "int $4\n0:\n"
10030+ _ASM_EXTABLE(0b, 0b)
10031+#endif
10032+
10033 : "+m" (sem->count)
10034 : "er" (delta));
10035 }
10036@@ -266,7 +322,15 @@ static inline rwsem_count_t rwsem_atomic
10037 {
10038 rwsem_count_t tmp = delta;
10039
10040- asm volatile(LOCK_PREFIX "xadd %0,%1"
10041+ asm volatile(LOCK_PREFIX "xadd %0,%1\n"
10042+
10043+#ifdef CONFIG_PAX_REFCOUNT
10044+ "jno 0f\n"
10045+ "mov %0,%1\n"
10046+ "int $4\n0:\n"
10047+ _ASM_EXTABLE(0b, 0b)
10048+#endif
10049+
10050 : "+r" (tmp), "+m" (sem->count)
10051 : : "memory");
10052
10053diff -urNp linux-2.6.32.42/arch/x86/include/asm/segment.h linux-2.6.32.42/arch/x86/include/asm/segment.h
10054--- linux-2.6.32.42/arch/x86/include/asm/segment.h 2011-03-27 14:31:47.000000000 -0400
10055+++ linux-2.6.32.42/arch/x86/include/asm/segment.h 2011-04-17 15:56:46.000000000 -0400
10056@@ -62,8 +62,8 @@
10057 * 26 - ESPFIX small SS
10058 * 27 - per-cpu [ offset to per-cpu data area ]
10059 * 28 - stack_canary-20 [ for stack protector ]
10060- * 29 - unused
10061- * 30 - unused
10062+ * 29 - PCI BIOS CS
10063+ * 30 - PCI BIOS DS
10064 * 31 - TSS for double fault handler
10065 */
10066 #define GDT_ENTRY_TLS_MIN 6
10067@@ -77,6 +77,8 @@
10068
10069 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
10070
10071+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
10072+
10073 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
10074
10075 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
10076@@ -88,7 +90,7 @@
10077 #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
10078 #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
10079
10080-#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
10081+#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
10082 #ifdef CONFIG_SMP
10083 #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
10084 #else
10085@@ -102,6 +104,12 @@
10086 #define __KERNEL_STACK_CANARY 0
10087 #endif
10088
10089+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE + 17)
10090+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
10091+
10092+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE + 18)
10093+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
10094+
10095 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
10096
10097 /*
10098@@ -139,7 +147,7 @@
10099 */
10100
10101 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
10102-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
10103+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
10104
10105
10106 #else
10107@@ -163,6 +171,8 @@
10108 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
10109 #define __USER32_DS __USER_DS
10110
10111+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
10112+
10113 #define GDT_ENTRY_TSS 8 /* needs two entries */
10114 #define GDT_ENTRY_LDT 10 /* needs two entries */
10115 #define GDT_ENTRY_TLS_MIN 12
10116@@ -183,6 +193,7 @@
10117 #endif
10118
10119 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
10120+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS * 8)
10121 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
10122 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3)
10123 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3)
10124diff -urNp linux-2.6.32.42/arch/x86/include/asm/smp.h linux-2.6.32.42/arch/x86/include/asm/smp.h
10125--- linux-2.6.32.42/arch/x86/include/asm/smp.h 2011-03-27 14:31:47.000000000 -0400
10126+++ linux-2.6.32.42/arch/x86/include/asm/smp.h 2011-07-01 19:00:40.000000000 -0400
10127@@ -24,7 +24,7 @@ extern unsigned int num_processors;
10128 DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
10129 DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
10130 DECLARE_PER_CPU(u16, cpu_llc_id);
10131-DECLARE_PER_CPU(int, cpu_number);
10132+DECLARE_PER_CPU(unsigned int, cpu_number);
10133
10134 static inline struct cpumask *cpu_sibling_mask(int cpu)
10135 {
10136@@ -40,10 +40,7 @@ DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_ap
10137 DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
10138
10139 /* Static state in head.S used to set up a CPU */
10140-extern struct {
10141- void *sp;
10142- unsigned short ss;
10143-} stack_start;
10144+extern unsigned long stack_start; /* Initial stack pointer address */
10145
10146 struct smp_ops {
10147 void (*smp_prepare_boot_cpu)(void);
10148@@ -175,14 +172,8 @@ extern unsigned disabled_cpus __cpuinitd
10149 extern int safe_smp_processor_id(void);
10150
10151 #elif defined(CONFIG_X86_64_SMP)
10152-#define raw_smp_processor_id() (percpu_read(cpu_number))
10153-
10154-#define stack_smp_processor_id() \
10155-({ \
10156- struct thread_info *ti; \
10157- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
10158- ti->cpu; \
10159-})
10160+#define raw_smp_processor_id() (percpu_read(cpu_number))
10161+#define stack_smp_processor_id() raw_smp_processor_id()
10162 #define safe_smp_processor_id() smp_processor_id()
10163
10164 #endif
10165diff -urNp linux-2.6.32.42/arch/x86/include/asm/spinlock.h linux-2.6.32.42/arch/x86/include/asm/spinlock.h
10166--- linux-2.6.32.42/arch/x86/include/asm/spinlock.h 2011-03-27 14:31:47.000000000 -0400
10167+++ linux-2.6.32.42/arch/x86/include/asm/spinlock.h 2011-04-17 15:56:46.000000000 -0400
10168@@ -249,6 +249,14 @@ static inline int __raw_write_can_lock(r
10169 static inline void __raw_read_lock(raw_rwlock_t *rw)
10170 {
10171 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
10172+
10173+#ifdef CONFIG_PAX_REFCOUNT
10174+ "jno 0f\n"
10175+ LOCK_PREFIX " addl $1,(%0)\n"
10176+ "int $4\n0:\n"
10177+ _ASM_EXTABLE(0b, 0b)
10178+#endif
10179+
10180 "jns 1f\n"
10181 "call __read_lock_failed\n\t"
10182 "1:\n"
10183@@ -258,6 +266,14 @@ static inline void __raw_read_lock(raw_r
10184 static inline void __raw_write_lock(raw_rwlock_t *rw)
10185 {
10186 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
10187+
10188+#ifdef CONFIG_PAX_REFCOUNT
10189+ "jno 0f\n"
10190+ LOCK_PREFIX " addl %1,(%0)\n"
10191+ "int $4\n0:\n"
10192+ _ASM_EXTABLE(0b, 0b)
10193+#endif
10194+
10195 "jz 1f\n"
10196 "call __write_lock_failed\n\t"
10197 "1:\n"
10198@@ -286,12 +302,29 @@ static inline int __raw_write_trylock(ra
10199
10200 static inline void __raw_read_unlock(raw_rwlock_t *rw)
10201 {
10202- asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
10203+ asm volatile(LOCK_PREFIX "incl %0\n"
10204+
10205+#ifdef CONFIG_PAX_REFCOUNT
10206+ "jno 0f\n"
10207+ LOCK_PREFIX "decl %0\n"
10208+ "int $4\n0:\n"
10209+ _ASM_EXTABLE(0b, 0b)
10210+#endif
10211+
10212+ :"+m" (rw->lock) : : "memory");
10213 }
10214
10215 static inline void __raw_write_unlock(raw_rwlock_t *rw)
10216 {
10217- asm volatile(LOCK_PREFIX "addl %1, %0"
10218+ asm volatile(LOCK_PREFIX "addl %1, %0\n"
10219+
10220+#ifdef CONFIG_PAX_REFCOUNT
10221+ "jno 0f\n"
10222+ LOCK_PREFIX "subl %1, %0\n"
10223+ "int $4\n0:\n"
10224+ _ASM_EXTABLE(0b, 0b)
10225+#endif
10226+
10227 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
10228 }
10229
10230diff -urNp linux-2.6.32.42/arch/x86/include/asm/stackprotector.h linux-2.6.32.42/arch/x86/include/asm/stackprotector.h
10231--- linux-2.6.32.42/arch/x86/include/asm/stackprotector.h 2011-03-27 14:31:47.000000000 -0400
10232+++ linux-2.6.32.42/arch/x86/include/asm/stackprotector.h 2011-07-06 19:53:33.000000000 -0400
10233@@ -48,7 +48,7 @@
10234 * head_32 for boot CPU and setup_per_cpu_areas() for others.
10235 */
10236 #define GDT_STACK_CANARY_INIT \
10237- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
10238+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
10239
10240 /*
10241 * Initialize the stackprotector canary value.
10242@@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
10243
10244 static inline void load_stack_canary_segment(void)
10245 {
10246-#ifdef CONFIG_X86_32
10247+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
10248 asm volatile ("mov %0, %%gs" : : "r" (0));
10249 #endif
10250 }
10251diff -urNp linux-2.6.32.42/arch/x86/include/asm/system.h linux-2.6.32.42/arch/x86/include/asm/system.h
10252--- linux-2.6.32.42/arch/x86/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
10253+++ linux-2.6.32.42/arch/x86/include/asm/system.h 2011-05-22 23:02:03.000000000 -0400
10254@@ -132,7 +132,7 @@ do { \
10255 "thread_return:\n\t" \
10256 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
10257 __switch_canary \
10258- "movq %P[thread_info](%%rsi),%%r8\n\t" \
10259+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
10260 "movq %%rax,%%rdi\n\t" \
10261 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
10262 "jnz ret_from_fork\n\t" \
10263@@ -143,7 +143,7 @@ do { \
10264 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
10265 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
10266 [_tif_fork] "i" (_TIF_FORK), \
10267- [thread_info] "i" (offsetof(struct task_struct, stack)), \
10268+ [thread_info] "m" (per_cpu_var(current_tinfo)), \
10269 [current_task] "m" (per_cpu_var(current_task)) \
10270 __switch_canary_iparam \
10271 : "memory", "cc" __EXTRA_CLOBBER)
10272@@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
10273 {
10274 unsigned long __limit;
10275 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
10276- return __limit + 1;
10277+ return __limit;
10278 }
10279
10280 static inline void native_clts(void)
10281@@ -340,12 +340,12 @@ void enable_hlt(void);
10282
10283 void cpu_idle_wait(void);
10284
10285-extern unsigned long arch_align_stack(unsigned long sp);
10286+#define arch_align_stack(x) ((x) & ~0xfUL)
10287 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
10288
10289 void default_idle(void);
10290
10291-void stop_this_cpu(void *dummy);
10292+void stop_this_cpu(void *dummy) __noreturn;
10293
10294 /*
10295 * Force strict CPU ordering.
10296diff -urNp linux-2.6.32.42/arch/x86/include/asm/thread_info.h linux-2.6.32.42/arch/x86/include/asm/thread_info.h
10297--- linux-2.6.32.42/arch/x86/include/asm/thread_info.h 2011-03-27 14:31:47.000000000 -0400
10298+++ linux-2.6.32.42/arch/x86/include/asm/thread_info.h 2011-05-17 19:26:34.000000000 -0400
10299@@ -10,6 +10,7 @@
10300 #include <linux/compiler.h>
10301 #include <asm/page.h>
10302 #include <asm/types.h>
10303+#include <asm/percpu.h>
10304
10305 /*
10306 * low level task data that entry.S needs immediate access to
10307@@ -24,7 +25,6 @@ struct exec_domain;
10308 #include <asm/atomic.h>
10309
10310 struct thread_info {
10311- struct task_struct *task; /* main task structure */
10312 struct exec_domain *exec_domain; /* execution domain */
10313 __u32 flags; /* low level flags */
10314 __u32 status; /* thread synchronous flags */
10315@@ -34,18 +34,12 @@ struct thread_info {
10316 mm_segment_t addr_limit;
10317 struct restart_block restart_block;
10318 void __user *sysenter_return;
10319-#ifdef CONFIG_X86_32
10320- unsigned long previous_esp; /* ESP of the previous stack in
10321- case of nested (IRQ) stacks
10322- */
10323- __u8 supervisor_stack[0];
10324-#endif
10325+ unsigned long lowest_stack;
10326 int uaccess_err;
10327 };
10328
10329-#define INIT_THREAD_INFO(tsk) \
10330+#define INIT_THREAD_INFO \
10331 { \
10332- .task = &tsk, \
10333 .exec_domain = &default_exec_domain, \
10334 .flags = 0, \
10335 .cpu = 0, \
10336@@ -56,7 +50,7 @@ struct thread_info {
10337 }, \
10338 }
10339
10340-#define init_thread_info (init_thread_union.thread_info)
10341+#define init_thread_info (init_thread_union.stack)
10342 #define init_stack (init_thread_union.stack)
10343
10344 #else /* !__ASSEMBLY__ */
10345@@ -163,6 +157,23 @@ struct thread_info {
10346 #define alloc_thread_info(tsk) \
10347 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
10348
10349+#ifdef __ASSEMBLY__
10350+/* how to get the thread information struct from ASM */
10351+#define GET_THREAD_INFO(reg) \
10352+ mov PER_CPU_VAR(current_tinfo), reg
10353+
10354+/* use this one if reg already contains %esp */
10355+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
10356+#else
10357+/* how to get the thread information struct from C */
10358+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
10359+
10360+static __always_inline struct thread_info *current_thread_info(void)
10361+{
10362+ return percpu_read_stable(current_tinfo);
10363+}
10364+#endif
10365+
10366 #ifdef CONFIG_X86_32
10367
10368 #define STACK_WARN (THREAD_SIZE/8)
10369@@ -173,35 +184,13 @@ struct thread_info {
10370 */
10371 #ifndef __ASSEMBLY__
10372
10373-
10374 /* how to get the current stack pointer from C */
10375 register unsigned long current_stack_pointer asm("esp") __used;
10376
10377-/* how to get the thread information struct from C */
10378-static inline struct thread_info *current_thread_info(void)
10379-{
10380- return (struct thread_info *)
10381- (current_stack_pointer & ~(THREAD_SIZE - 1));
10382-}
10383-
10384-#else /* !__ASSEMBLY__ */
10385-
10386-/* how to get the thread information struct from ASM */
10387-#define GET_THREAD_INFO(reg) \
10388- movl $-THREAD_SIZE, reg; \
10389- andl %esp, reg
10390-
10391-/* use this one if reg already contains %esp */
10392-#define GET_THREAD_INFO_WITH_ESP(reg) \
10393- andl $-THREAD_SIZE, reg
10394-
10395 #endif
10396
10397 #else /* X86_32 */
10398
10399-#include <asm/percpu.h>
10400-#define KERNEL_STACK_OFFSET (5*8)
10401-
10402 /*
10403 * macros/functions for gaining access to the thread information structure
10404 * preempt_count needs to be 1 initially, until the scheduler is functional.
10405@@ -209,21 +198,8 @@ static inline struct thread_info *curren
10406 #ifndef __ASSEMBLY__
10407 DECLARE_PER_CPU(unsigned long, kernel_stack);
10408
10409-static inline struct thread_info *current_thread_info(void)
10410-{
10411- struct thread_info *ti;
10412- ti = (void *)(percpu_read_stable(kernel_stack) +
10413- KERNEL_STACK_OFFSET - THREAD_SIZE);
10414- return ti;
10415-}
10416-
10417-#else /* !__ASSEMBLY__ */
10418-
10419-/* how to get the thread information struct from ASM */
10420-#define GET_THREAD_INFO(reg) \
10421- movq PER_CPU_VAR(kernel_stack),reg ; \
10422- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
10423-
10424+/* how to get the current stack pointer from C */
10425+register unsigned long current_stack_pointer asm("rsp") __used;
10426 #endif
10427
10428 #endif /* !X86_32 */
10429@@ -260,5 +236,16 @@ extern void arch_task_cache_init(void);
10430 extern void free_thread_info(struct thread_info *ti);
10431 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
10432 #define arch_task_cache_init arch_task_cache_init
10433+
10434+#define __HAVE_THREAD_FUNCTIONS
10435+#define task_thread_info(task) (&(task)->tinfo)
10436+#define task_stack_page(task) ((task)->stack)
10437+#define setup_thread_stack(p, org) do {} while (0)
10438+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
10439+
10440+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
10441+extern struct task_struct *alloc_task_struct(void);
10442+extern void free_task_struct(struct task_struct *);
10443+
10444 #endif
10445 #endif /* _ASM_X86_THREAD_INFO_H */
10446diff -urNp linux-2.6.32.42/arch/x86/include/asm/uaccess_32.h linux-2.6.32.42/arch/x86/include/asm/uaccess_32.h
10447--- linux-2.6.32.42/arch/x86/include/asm/uaccess_32.h 2011-03-27 14:31:47.000000000 -0400
10448+++ linux-2.6.32.42/arch/x86/include/asm/uaccess_32.h 2011-05-16 21:46:57.000000000 -0400
10449@@ -44,6 +44,11 @@ unsigned long __must_check __copy_from_u
10450 static __always_inline unsigned long __must_check
10451 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10452 {
10453+ pax_track_stack();
10454+
10455+ if ((long)n < 0)
10456+ return n;
10457+
10458 if (__builtin_constant_p(n)) {
10459 unsigned long ret;
10460
10461@@ -62,6 +67,8 @@ __copy_to_user_inatomic(void __user *to,
10462 return ret;
10463 }
10464 }
10465+ if (!__builtin_constant_p(n))
10466+ check_object_size(from, n, true);
10467 return __copy_to_user_ll(to, from, n);
10468 }
10469
10470@@ -83,12 +90,16 @@ static __always_inline unsigned long __m
10471 __copy_to_user(void __user *to, const void *from, unsigned long n)
10472 {
10473 might_fault();
10474+
10475 return __copy_to_user_inatomic(to, from, n);
10476 }
10477
10478 static __always_inline unsigned long
10479 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
10480 {
10481+ if ((long)n < 0)
10482+ return n;
10483+
10484 /* Avoid zeroing the tail if the copy fails..
10485 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
10486 * but as the zeroing behaviour is only significant when n is not
10487@@ -138,6 +149,12 @@ static __always_inline unsigned long
10488 __copy_from_user(void *to, const void __user *from, unsigned long n)
10489 {
10490 might_fault();
10491+
10492+ pax_track_stack();
10493+
10494+ if ((long)n < 0)
10495+ return n;
10496+
10497 if (__builtin_constant_p(n)) {
10498 unsigned long ret;
10499
10500@@ -153,6 +170,8 @@ __copy_from_user(void *to, const void __
10501 return ret;
10502 }
10503 }
10504+ if (!__builtin_constant_p(n))
10505+ check_object_size(to, n, false);
10506 return __copy_from_user_ll(to, from, n);
10507 }
10508
10509@@ -160,6 +179,10 @@ static __always_inline unsigned long __c
10510 const void __user *from, unsigned long n)
10511 {
10512 might_fault();
10513+
10514+ if ((long)n < 0)
10515+ return n;
10516+
10517 if (__builtin_constant_p(n)) {
10518 unsigned long ret;
10519
10520@@ -182,14 +205,62 @@ static __always_inline unsigned long
10521 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
10522 unsigned long n)
10523 {
10524- return __copy_from_user_ll_nocache_nozero(to, from, n);
10525+ if ((long)n < 0)
10526+ return n;
10527+
10528+ return __copy_from_user_ll_nocache_nozero(to, from, n);
10529+}
10530+
10531+/**
10532+ * copy_to_user: - Copy a block of data into user space.
10533+ * @to: Destination address, in user space.
10534+ * @from: Source address, in kernel space.
10535+ * @n: Number of bytes to copy.
10536+ *
10537+ * Context: User context only. This function may sleep.
10538+ *
10539+ * Copy data from kernel space to user space.
10540+ *
10541+ * Returns number of bytes that could not be copied.
10542+ * On success, this will be zero.
10543+ */
10544+static __always_inline unsigned long __must_check
10545+copy_to_user(void __user *to, const void *from, unsigned long n)
10546+{
10547+ if (access_ok(VERIFY_WRITE, to, n))
10548+ n = __copy_to_user(to, from, n);
10549+ return n;
10550+}
10551+
10552+/**
10553+ * copy_from_user: - Copy a block of data from user space.
10554+ * @to: Destination address, in kernel space.
10555+ * @from: Source address, in user space.
10556+ * @n: Number of bytes to copy.
10557+ *
10558+ * Context: User context only. This function may sleep.
10559+ *
10560+ * Copy data from user space to kernel space.
10561+ *
10562+ * Returns number of bytes that could not be copied.
10563+ * On success, this will be zero.
10564+ *
10565+ * If some data could not be copied, this function will pad the copied
10566+ * data to the requested size using zero bytes.
10567+ */
10568+static __always_inline unsigned long __must_check
10569+copy_from_user(void *to, const void __user *from, unsigned long n)
10570+{
10571+ if (access_ok(VERIFY_READ, from, n))
10572+ n = __copy_from_user(to, from, n);
10573+ else if ((long)n > 0) {
10574+ if (!__builtin_constant_p(n))
10575+ check_object_size(to, n, false);
10576+ memset(to, 0, n);
10577+ }
10578+ return n;
10579 }
10580
10581-unsigned long __must_check copy_to_user(void __user *to,
10582- const void *from, unsigned long n);
10583-unsigned long __must_check copy_from_user(void *to,
10584- const void __user *from,
10585- unsigned long n);
10586 long __must_check strncpy_from_user(char *dst, const char __user *src,
10587 long count);
10588 long __must_check __strncpy_from_user(char *dst,
10589diff -urNp linux-2.6.32.42/arch/x86/include/asm/uaccess_64.h linux-2.6.32.42/arch/x86/include/asm/uaccess_64.h
10590--- linux-2.6.32.42/arch/x86/include/asm/uaccess_64.h 2011-03-27 14:31:47.000000000 -0400
10591+++ linux-2.6.32.42/arch/x86/include/asm/uaccess_64.h 2011-05-16 21:46:57.000000000 -0400
10592@@ -9,6 +9,9 @@
10593 #include <linux/prefetch.h>
10594 #include <linux/lockdep.h>
10595 #include <asm/page.h>
10596+#include <asm/pgtable.h>
10597+
10598+#define set_fs(x) (current_thread_info()->addr_limit = (x))
10599
10600 /*
10601 * Copy To/From Userspace
10602@@ -19,113 +22,203 @@ __must_check unsigned long
10603 copy_user_generic(void *to, const void *from, unsigned len);
10604
10605 __must_check unsigned long
10606-copy_to_user(void __user *to, const void *from, unsigned len);
10607-__must_check unsigned long
10608-copy_from_user(void *to, const void __user *from, unsigned len);
10609-__must_check unsigned long
10610 copy_in_user(void __user *to, const void __user *from, unsigned len);
10611
10612 static __always_inline __must_check
10613-int __copy_from_user(void *dst, const void __user *src, unsigned size)
10614+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
10615 {
10616- int ret = 0;
10617+ unsigned ret = 0;
10618
10619 might_fault();
10620- if (!__builtin_constant_p(size))
10621- return copy_user_generic(dst, (__force void *)src, size);
10622+
10623+ if ((int)size < 0)
10624+ return size;
10625+
10626+#ifdef CONFIG_PAX_MEMORY_UDEREF
10627+ if (!__access_ok(VERIFY_READ, src, size))
10628+ return size;
10629+#endif
10630+
10631+ if (!__builtin_constant_p(size)) {
10632+ check_object_size(dst, size, false);
10633+
10634+#ifdef CONFIG_PAX_MEMORY_UDEREF
10635+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10636+ src += PAX_USER_SHADOW_BASE;
10637+#endif
10638+
10639+ return copy_user_generic(dst, (__force const void *)src, size);
10640+ }
10641 switch (size) {
10642- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
10643+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
10644 ret, "b", "b", "=q", 1);
10645 return ret;
10646- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
10647+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
10648 ret, "w", "w", "=r", 2);
10649 return ret;
10650- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
10651+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
10652 ret, "l", "k", "=r", 4);
10653 return ret;
10654- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
10655+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10656 ret, "q", "", "=r", 8);
10657 return ret;
10658 case 10:
10659- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10660+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10661 ret, "q", "", "=r", 10);
10662 if (unlikely(ret))
10663 return ret;
10664 __get_user_asm(*(u16 *)(8 + (char *)dst),
10665- (u16 __user *)(8 + (char __user *)src),
10666+ (const u16 __user *)(8 + (const char __user *)src),
10667 ret, "w", "w", "=r", 2);
10668 return ret;
10669 case 16:
10670- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10671+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10672 ret, "q", "", "=r", 16);
10673 if (unlikely(ret))
10674 return ret;
10675 __get_user_asm(*(u64 *)(8 + (char *)dst),
10676- (u64 __user *)(8 + (char __user *)src),
10677+ (const u64 __user *)(8 + (const char __user *)src),
10678 ret, "q", "", "=r", 8);
10679 return ret;
10680 default:
10681- return copy_user_generic(dst, (__force void *)src, size);
10682+
10683+#ifdef CONFIG_PAX_MEMORY_UDEREF
10684+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10685+ src += PAX_USER_SHADOW_BASE;
10686+#endif
10687+
10688+ return copy_user_generic(dst, (__force const void *)src, size);
10689 }
10690 }
10691
10692 static __always_inline __must_check
10693-int __copy_to_user(void __user *dst, const void *src, unsigned size)
10694+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
10695 {
10696- int ret = 0;
10697+ unsigned ret = 0;
10698
10699 might_fault();
10700- if (!__builtin_constant_p(size))
10701+
10702+ pax_track_stack();
10703+
10704+ if ((int)size < 0)
10705+ return size;
10706+
10707+#ifdef CONFIG_PAX_MEMORY_UDEREF
10708+ if (!__access_ok(VERIFY_WRITE, dst, size))
10709+ return size;
10710+#endif
10711+
10712+ if (!__builtin_constant_p(size)) {
10713+ check_object_size(src, size, true);
10714+
10715+#ifdef CONFIG_PAX_MEMORY_UDEREF
10716+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10717+ dst += PAX_USER_SHADOW_BASE;
10718+#endif
10719+
10720 return copy_user_generic((__force void *)dst, src, size);
10721+ }
10722 switch (size) {
10723- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
10724+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
10725 ret, "b", "b", "iq", 1);
10726 return ret;
10727- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
10728+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
10729 ret, "w", "w", "ir", 2);
10730 return ret;
10731- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
10732+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
10733 ret, "l", "k", "ir", 4);
10734 return ret;
10735- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
10736+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10737 ret, "q", "", "er", 8);
10738 return ret;
10739 case 10:
10740- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
10741+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10742 ret, "q", "", "er", 10);
10743 if (unlikely(ret))
10744 return ret;
10745 asm("":::"memory");
10746- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
10747+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
10748 ret, "w", "w", "ir", 2);
10749 return ret;
10750 case 16:
10751- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
10752+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10753 ret, "q", "", "er", 16);
10754 if (unlikely(ret))
10755 return ret;
10756 asm("":::"memory");
10757- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
10758+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
10759 ret, "q", "", "er", 8);
10760 return ret;
10761 default:
10762+
10763+#ifdef CONFIG_PAX_MEMORY_UDEREF
10764+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10765+ dst += PAX_USER_SHADOW_BASE;
10766+#endif
10767+
10768 return copy_user_generic((__force void *)dst, src, size);
10769 }
10770 }
10771
10772 static __always_inline __must_check
10773-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
10774+unsigned long copy_to_user(void __user *to, const void *from, unsigned len)
10775+{
10776+ if (access_ok(VERIFY_WRITE, to, len))
10777+ len = __copy_to_user(to, from, len);
10778+ return len;
10779+}
10780+
10781+static __always_inline __must_check
10782+unsigned long copy_from_user(void *to, const void __user *from, unsigned len)
10783+{
10784+ if ((int)len < 0)
10785+ return len;
10786+
10787+ if (access_ok(VERIFY_READ, from, len))
10788+ len = __copy_from_user(to, from, len);
10789+ else if ((int)len > 0) {
10790+ if (!__builtin_constant_p(len))
10791+ check_object_size(to, len, false);
10792+ memset(to, 0, len);
10793+ }
10794+ return len;
10795+}
10796+
10797+static __always_inline __must_check
10798+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
10799 {
10800- int ret = 0;
10801+ unsigned ret = 0;
10802
10803 might_fault();
10804- if (!__builtin_constant_p(size))
10805+
10806+ pax_track_stack();
10807+
10808+ if ((int)size < 0)
10809+ return size;
10810+
10811+#ifdef CONFIG_PAX_MEMORY_UDEREF
10812+ if (!__access_ok(VERIFY_READ, src, size))
10813+ return size;
10814+ if (!__access_ok(VERIFY_WRITE, dst, size))
10815+ return size;
10816+#endif
10817+
10818+ if (!__builtin_constant_p(size)) {
10819+
10820+#ifdef CONFIG_PAX_MEMORY_UDEREF
10821+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10822+ src += PAX_USER_SHADOW_BASE;
10823+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10824+ dst += PAX_USER_SHADOW_BASE;
10825+#endif
10826+
10827 return copy_user_generic((__force void *)dst,
10828- (__force void *)src, size);
10829+ (__force const void *)src, size);
10830+ }
10831 switch (size) {
10832 case 1: {
10833 u8 tmp;
10834- __get_user_asm(tmp, (u8 __user *)src,
10835+ __get_user_asm(tmp, (const u8 __user *)src,
10836 ret, "b", "b", "=q", 1);
10837 if (likely(!ret))
10838 __put_user_asm(tmp, (u8 __user *)dst,
10839@@ -134,7 +227,7 @@ int __copy_in_user(void __user *dst, con
10840 }
10841 case 2: {
10842 u16 tmp;
10843- __get_user_asm(tmp, (u16 __user *)src,
10844+ __get_user_asm(tmp, (const u16 __user *)src,
10845 ret, "w", "w", "=r", 2);
10846 if (likely(!ret))
10847 __put_user_asm(tmp, (u16 __user *)dst,
10848@@ -144,7 +237,7 @@ int __copy_in_user(void __user *dst, con
10849
10850 case 4: {
10851 u32 tmp;
10852- __get_user_asm(tmp, (u32 __user *)src,
10853+ __get_user_asm(tmp, (const u32 __user *)src,
10854 ret, "l", "k", "=r", 4);
10855 if (likely(!ret))
10856 __put_user_asm(tmp, (u32 __user *)dst,
10857@@ -153,7 +246,7 @@ int __copy_in_user(void __user *dst, con
10858 }
10859 case 8: {
10860 u64 tmp;
10861- __get_user_asm(tmp, (u64 __user *)src,
10862+ __get_user_asm(tmp, (const u64 __user *)src,
10863 ret, "q", "", "=r", 8);
10864 if (likely(!ret))
10865 __put_user_asm(tmp, (u64 __user *)dst,
10866@@ -161,8 +254,16 @@ int __copy_in_user(void __user *dst, con
10867 return ret;
10868 }
10869 default:
10870+
10871+#ifdef CONFIG_PAX_MEMORY_UDEREF
10872+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10873+ src += PAX_USER_SHADOW_BASE;
10874+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10875+ dst += PAX_USER_SHADOW_BASE;
10876+#endif
10877+
10878 return copy_user_generic((__force void *)dst,
10879- (__force void *)src, size);
10880+ (__force const void *)src, size);
10881 }
10882 }
10883
10884@@ -176,33 +277,75 @@ __must_check long strlen_user(const char
10885 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
10886 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
10887
10888-__must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
10889- unsigned size);
10890+static __must_check __always_inline unsigned long
10891+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
10892+{
10893+ pax_track_stack();
10894+
10895+ if ((int)size < 0)
10896+ return size;
10897
10898-static __must_check __always_inline int
10899+#ifdef CONFIG_PAX_MEMORY_UDEREF
10900+ if (!__access_ok(VERIFY_READ, src, size))
10901+ return size;
10902+
10903+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10904+ src += PAX_USER_SHADOW_BASE;
10905+#endif
10906+
10907+ return copy_user_generic(dst, (__force const void *)src, size);
10908+}
10909+
10910+static __must_check __always_inline unsigned long
10911 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
10912 {
10913+ if ((int)size < 0)
10914+ return size;
10915+
10916+#ifdef CONFIG_PAX_MEMORY_UDEREF
10917+ if (!__access_ok(VERIFY_WRITE, dst, size))
10918+ return size;
10919+
10920+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10921+ dst += PAX_USER_SHADOW_BASE;
10922+#endif
10923+
10924 return copy_user_generic((__force void *)dst, src, size);
10925 }
10926
10927-extern long __copy_user_nocache(void *dst, const void __user *src,
10928+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
10929 unsigned size, int zerorest);
10930
10931-static inline int
10932-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
10933+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
10934 {
10935 might_sleep();
10936+
10937+ if ((int)size < 0)
10938+ return size;
10939+
10940+#ifdef CONFIG_PAX_MEMORY_UDEREF
10941+ if (!__access_ok(VERIFY_READ, src, size))
10942+ return size;
10943+#endif
10944+
10945 return __copy_user_nocache(dst, src, size, 1);
10946 }
10947
10948-static inline int
10949-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
10950+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
10951 unsigned size)
10952 {
10953+ if ((int)size < 0)
10954+ return size;
10955+
10956+#ifdef CONFIG_PAX_MEMORY_UDEREF
10957+ if (!__access_ok(VERIFY_READ, src, size))
10958+ return size;
10959+#endif
10960+
10961 return __copy_user_nocache(dst, src, size, 0);
10962 }
10963
10964-unsigned long
10965+extern unsigned long
10966 copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
10967
10968 #endif /* _ASM_X86_UACCESS_64_H */
10969diff -urNp linux-2.6.32.42/arch/x86/include/asm/uaccess.h linux-2.6.32.42/arch/x86/include/asm/uaccess.h
10970--- linux-2.6.32.42/arch/x86/include/asm/uaccess.h 2011-06-25 12:55:34.000000000 -0400
10971+++ linux-2.6.32.42/arch/x86/include/asm/uaccess.h 2011-06-25 12:56:37.000000000 -0400
10972@@ -8,12 +8,15 @@
10973 #include <linux/thread_info.h>
10974 #include <linux/prefetch.h>
10975 #include <linux/string.h>
10976+#include <linux/sched.h>
10977 #include <asm/asm.h>
10978 #include <asm/page.h>
10979
10980 #define VERIFY_READ 0
10981 #define VERIFY_WRITE 1
10982
10983+extern void check_object_size(const void *ptr, unsigned long n, bool to);
10984+
10985 /*
10986 * The fs value determines whether argument validity checking should be
10987 * performed or not. If get_fs() == USER_DS, checking is performed, with
10988@@ -29,7 +32,12 @@
10989
10990 #define get_ds() (KERNEL_DS)
10991 #define get_fs() (current_thread_info()->addr_limit)
10992+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10993+void __set_fs(mm_segment_t x);
10994+void set_fs(mm_segment_t x);
10995+#else
10996 #define set_fs(x) (current_thread_info()->addr_limit = (x))
10997+#endif
10998
10999 #define segment_eq(a, b) ((a).seg == (b).seg)
11000
11001@@ -77,7 +85,33 @@
11002 * checks that the pointer is in the user space range - after calling
11003 * this function, memory access functions may still return -EFAULT.
11004 */
11005-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
11006+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
11007+#define access_ok(type, addr, size) \
11008+({ \
11009+ long __size = size; \
11010+ unsigned long __addr = (unsigned long)addr; \
11011+ unsigned long __addr_ao = __addr & PAGE_MASK; \
11012+ unsigned long __end_ao = __addr + __size - 1; \
11013+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
11014+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
11015+ while(__addr_ao <= __end_ao) { \
11016+ char __c_ao; \
11017+ __addr_ao += PAGE_SIZE; \
11018+ if (__size > PAGE_SIZE) \
11019+ cond_resched(); \
11020+ if (__get_user(__c_ao, (char __user *)__addr)) \
11021+ break; \
11022+ if (type != VERIFY_WRITE) { \
11023+ __addr = __addr_ao; \
11024+ continue; \
11025+ } \
11026+ if (__put_user(__c_ao, (char __user *)__addr)) \
11027+ break; \
11028+ __addr = __addr_ao; \
11029+ } \
11030+ } \
11031+ __ret_ao; \
11032+})
11033
11034 /*
11035 * The exception table consists of pairs of addresses: the first is the
11036@@ -183,12 +217,20 @@ extern int __get_user_bad(void);
11037 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
11038 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
11039
11040-
11041+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
11042+#define __copyuser_seg "gs;"
11043+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
11044+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
11045+#else
11046+#define __copyuser_seg
11047+#define __COPYUSER_SET_ES
11048+#define __COPYUSER_RESTORE_ES
11049+#endif
11050
11051 #ifdef CONFIG_X86_32
11052 #define __put_user_asm_u64(x, addr, err, errret) \
11053- asm volatile("1: movl %%eax,0(%2)\n" \
11054- "2: movl %%edx,4(%2)\n" \
11055+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
11056+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
11057 "3:\n" \
11058 ".section .fixup,\"ax\"\n" \
11059 "4: movl %3,%0\n" \
11060@@ -200,8 +242,8 @@ extern int __get_user_bad(void);
11061 : "A" (x), "r" (addr), "i" (errret), "0" (err))
11062
11063 #define __put_user_asm_ex_u64(x, addr) \
11064- asm volatile("1: movl %%eax,0(%1)\n" \
11065- "2: movl %%edx,4(%1)\n" \
11066+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
11067+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
11068 "3:\n" \
11069 _ASM_EXTABLE(1b, 2b - 1b) \
11070 _ASM_EXTABLE(2b, 3b - 2b) \
11071@@ -374,7 +416,7 @@ do { \
11072 } while (0)
11073
11074 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
11075- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
11076+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
11077 "2:\n" \
11078 ".section .fixup,\"ax\"\n" \
11079 "3: mov %3,%0\n" \
11080@@ -382,7 +424,7 @@ do { \
11081 " jmp 2b\n" \
11082 ".previous\n" \
11083 _ASM_EXTABLE(1b, 3b) \
11084- : "=r" (err), ltype(x) \
11085+ : "=r" (err), ltype (x) \
11086 : "m" (__m(addr)), "i" (errret), "0" (err))
11087
11088 #define __get_user_size_ex(x, ptr, size) \
11089@@ -407,7 +449,7 @@ do { \
11090 } while (0)
11091
11092 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
11093- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
11094+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
11095 "2:\n" \
11096 _ASM_EXTABLE(1b, 2b - 1b) \
11097 : ltype(x) : "m" (__m(addr)))
11098@@ -424,13 +466,24 @@ do { \
11099 int __gu_err; \
11100 unsigned long __gu_val; \
11101 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
11102- (x) = (__force __typeof__(*(ptr)))__gu_val; \
11103+ (x) = (__typeof__(*(ptr)))__gu_val; \
11104 __gu_err; \
11105 })
11106
11107 /* FIXME: this hack is definitely wrong -AK */
11108 struct __large_struct { unsigned long buf[100]; };
11109-#define __m(x) (*(struct __large_struct __user *)(x))
11110+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11111+#define ____m(x) \
11112+({ \
11113+ unsigned long ____x = (unsigned long)(x); \
11114+ if (____x < PAX_USER_SHADOW_BASE) \
11115+ ____x += PAX_USER_SHADOW_BASE; \
11116+ (void __user *)____x; \
11117+})
11118+#else
11119+#define ____m(x) (x)
11120+#endif
11121+#define __m(x) (*(struct __large_struct __user *)____m(x))
11122
11123 /*
11124 * Tell gcc we read from memory instead of writing: this is because
11125@@ -438,7 +491,7 @@ struct __large_struct { unsigned long bu
11126 * aliasing issues.
11127 */
11128 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
11129- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
11130+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
11131 "2:\n" \
11132 ".section .fixup,\"ax\"\n" \
11133 "3: mov %3,%0\n" \
11134@@ -446,10 +499,10 @@ struct __large_struct { unsigned long bu
11135 ".previous\n" \
11136 _ASM_EXTABLE(1b, 3b) \
11137 : "=r"(err) \
11138- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
11139+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
11140
11141 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
11142- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
11143+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
11144 "2:\n" \
11145 _ASM_EXTABLE(1b, 2b - 1b) \
11146 : : ltype(x), "m" (__m(addr)))
11147@@ -488,8 +541,12 @@ struct __large_struct { unsigned long bu
11148 * On error, the variable @x is set to zero.
11149 */
11150
11151+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11152+#define __get_user(x, ptr) get_user((x), (ptr))
11153+#else
11154 #define __get_user(x, ptr) \
11155 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
11156+#endif
11157
11158 /**
11159 * __put_user: - Write a simple value into user space, with less checking.
11160@@ -511,8 +568,12 @@ struct __large_struct { unsigned long bu
11161 * Returns zero on success, or -EFAULT on error.
11162 */
11163
11164+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11165+#define __put_user(x, ptr) put_user((x), (ptr))
11166+#else
11167 #define __put_user(x, ptr) \
11168 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
11169+#endif
11170
11171 #define __get_user_unaligned __get_user
11172 #define __put_user_unaligned __put_user
11173@@ -530,7 +591,7 @@ struct __large_struct { unsigned long bu
11174 #define get_user_ex(x, ptr) do { \
11175 unsigned long __gue_val; \
11176 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
11177- (x) = (__force __typeof__(*(ptr)))__gue_val; \
11178+ (x) = (__typeof__(*(ptr)))__gue_val; \
11179 } while (0)
11180
11181 #ifdef CONFIG_X86_WP_WORKS_OK
11182@@ -567,6 +628,7 @@ extern struct movsl_mask {
11183
11184 #define ARCH_HAS_NOCACHE_UACCESS 1
11185
11186+#define ARCH_HAS_SORT_EXTABLE
11187 #ifdef CONFIG_X86_32
11188 # include "uaccess_32.h"
11189 #else
11190diff -urNp linux-2.6.32.42/arch/x86/include/asm/vgtod.h linux-2.6.32.42/arch/x86/include/asm/vgtod.h
11191--- linux-2.6.32.42/arch/x86/include/asm/vgtod.h 2011-03-27 14:31:47.000000000 -0400
11192+++ linux-2.6.32.42/arch/x86/include/asm/vgtod.h 2011-04-17 15:56:46.000000000 -0400
11193@@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
11194 int sysctl_enabled;
11195 struct timezone sys_tz;
11196 struct { /* extract of a clocksource struct */
11197+ char name[8];
11198 cycle_t (*vread)(void);
11199 cycle_t cycle_last;
11200 cycle_t mask;
11201diff -urNp linux-2.6.32.42/arch/x86/include/asm/vmi.h linux-2.6.32.42/arch/x86/include/asm/vmi.h
11202--- linux-2.6.32.42/arch/x86/include/asm/vmi.h 2011-03-27 14:31:47.000000000 -0400
11203+++ linux-2.6.32.42/arch/x86/include/asm/vmi.h 2011-04-17 15:56:46.000000000 -0400
11204@@ -191,6 +191,7 @@ struct vrom_header {
11205 u8 reserved[96]; /* Reserved for headers */
11206 char vmi_init[8]; /* VMI_Init jump point */
11207 char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
11208+ char rom_data[8048]; /* rest of the option ROM */
11209 } __attribute__((packed));
11210
11211 struct pnp_header {
11212diff -urNp linux-2.6.32.42/arch/x86/include/asm/vsyscall.h linux-2.6.32.42/arch/x86/include/asm/vsyscall.h
11213--- linux-2.6.32.42/arch/x86/include/asm/vsyscall.h 2011-03-27 14:31:47.000000000 -0400
11214+++ linux-2.6.32.42/arch/x86/include/asm/vsyscall.h 2011-04-17 15:56:46.000000000 -0400
11215@@ -15,9 +15,10 @@ enum vsyscall_num {
11216
11217 #ifdef __KERNEL__
11218 #include <linux/seqlock.h>
11219+#include <linux/getcpu.h>
11220+#include <linux/time.h>
11221
11222 #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
11223-#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
11224
11225 /* Definitions for CONFIG_GENERIC_TIME definitions */
11226 #define __section_vsyscall_gtod_data __attribute__ \
11227@@ -31,7 +32,6 @@ enum vsyscall_num {
11228 #define VGETCPU_LSL 2
11229
11230 extern int __vgetcpu_mode;
11231-extern volatile unsigned long __jiffies;
11232
11233 /* kernel space (writeable) */
11234 extern int vgetcpu_mode;
11235@@ -39,6 +39,9 @@ extern struct timezone sys_tz;
11236
11237 extern void map_vsyscall(void);
11238
11239+extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
11240+extern time_t vtime(time_t *t);
11241+extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
11242 #endif /* __KERNEL__ */
11243
11244 #endif /* _ASM_X86_VSYSCALL_H */
11245diff -urNp linux-2.6.32.42/arch/x86/include/asm/xsave.h linux-2.6.32.42/arch/x86/include/asm/xsave.h
11246--- linux-2.6.32.42/arch/x86/include/asm/xsave.h 2011-03-27 14:31:47.000000000 -0400
11247+++ linux-2.6.32.42/arch/x86/include/asm/xsave.h 2011-04-17 15:56:46.000000000 -0400
11248@@ -56,6 +56,12 @@ static inline int xrstor_checking(struct
11249 static inline int xsave_user(struct xsave_struct __user *buf)
11250 {
11251 int err;
11252+
11253+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11254+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
11255+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
11256+#endif
11257+
11258 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
11259 "2:\n"
11260 ".section .fixup,\"ax\"\n"
11261@@ -82,6 +88,11 @@ static inline int xrestore_user(struct x
11262 u32 lmask = mask;
11263 u32 hmask = mask >> 32;
11264
11265+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11266+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
11267+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
11268+#endif
11269+
11270 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
11271 "2:\n"
11272 ".section .fixup,\"ax\"\n"
11273diff -urNp linux-2.6.32.42/arch/x86/Kconfig linux-2.6.32.42/arch/x86/Kconfig
11274--- linux-2.6.32.42/arch/x86/Kconfig 2011-03-27 14:31:47.000000000 -0400
11275+++ linux-2.6.32.42/arch/x86/Kconfig 2011-04-17 15:56:46.000000000 -0400
11276@@ -223,7 +223,7 @@ config X86_TRAMPOLINE
11277
11278 config X86_32_LAZY_GS
11279 def_bool y
11280- depends on X86_32 && !CC_STACKPROTECTOR
11281+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
11282
11283 config KTIME_SCALAR
11284 def_bool X86_32
11285@@ -1008,7 +1008,7 @@ choice
11286
11287 config NOHIGHMEM
11288 bool "off"
11289- depends on !X86_NUMAQ
11290+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11291 ---help---
11292 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
11293 However, the address space of 32-bit x86 processors is only 4
11294@@ -1045,7 +1045,7 @@ config NOHIGHMEM
11295
11296 config HIGHMEM4G
11297 bool "4GB"
11298- depends on !X86_NUMAQ
11299+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11300 ---help---
11301 Select this if you have a 32-bit processor and between 1 and 4
11302 gigabytes of physical RAM.
11303@@ -1099,7 +1099,7 @@ config PAGE_OFFSET
11304 hex
11305 default 0xB0000000 if VMSPLIT_3G_OPT
11306 default 0x80000000 if VMSPLIT_2G
11307- default 0x78000000 if VMSPLIT_2G_OPT
11308+ default 0x70000000 if VMSPLIT_2G_OPT
11309 default 0x40000000 if VMSPLIT_1G
11310 default 0xC0000000
11311 depends on X86_32
11312@@ -1430,7 +1430,7 @@ config ARCH_USES_PG_UNCACHED
11313
11314 config EFI
11315 bool "EFI runtime service support"
11316- depends on ACPI
11317+ depends on ACPI && !PAX_KERNEXEC
11318 ---help---
11319 This enables the kernel to use EFI runtime services that are
11320 available (such as the EFI variable services).
11321@@ -1460,6 +1460,7 @@ config SECCOMP
11322
11323 config CC_STACKPROTECTOR
11324 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
11325+ depends on X86_64 || !PAX_MEMORY_UDEREF
11326 ---help---
11327 This option turns on the -fstack-protector GCC feature. This
11328 feature puts, at the beginning of functions, a canary value on
11329@@ -1517,6 +1518,7 @@ config KEXEC_JUMP
11330 config PHYSICAL_START
11331 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
11332 default "0x1000000"
11333+ range 0x400000 0x40000000
11334 ---help---
11335 This gives the physical address where the kernel is loaded.
11336
11337@@ -1581,6 +1583,7 @@ config PHYSICAL_ALIGN
11338 hex
11339 prompt "Alignment value to which kernel should be aligned" if X86_32
11340 default "0x1000000"
11341+ range 0x400000 0x1000000 if PAX_KERNEXEC
11342 range 0x2000 0x1000000
11343 ---help---
11344 This value puts the alignment restrictions on physical address
11345@@ -1612,9 +1615,10 @@ config HOTPLUG_CPU
11346 Say N if you want to disable CPU hotplug.
11347
11348 config COMPAT_VDSO
11349- def_bool y
11350+ def_bool n
11351 prompt "Compat VDSO support"
11352 depends on X86_32 || IA32_EMULATION
11353+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
11354 ---help---
11355 Map the 32-bit VDSO to the predictable old-style address too.
11356 ---help---
11357diff -urNp linux-2.6.32.42/arch/x86/Kconfig.cpu linux-2.6.32.42/arch/x86/Kconfig.cpu
11358--- linux-2.6.32.42/arch/x86/Kconfig.cpu 2011-03-27 14:31:47.000000000 -0400
11359+++ linux-2.6.32.42/arch/x86/Kconfig.cpu 2011-04-17 15:56:46.000000000 -0400
11360@@ -340,7 +340,7 @@ config X86_PPRO_FENCE
11361
11362 config X86_F00F_BUG
11363 def_bool y
11364- depends on M586MMX || M586TSC || M586 || M486 || M386
11365+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
11366
11367 config X86_WP_WORKS_OK
11368 def_bool y
11369@@ -360,7 +360,7 @@ config X86_POPAD_OK
11370
11371 config X86_ALIGNMENT_16
11372 def_bool y
11373- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11374+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11375
11376 config X86_INTEL_USERCOPY
11377 def_bool y
11378@@ -406,7 +406,7 @@ config X86_CMPXCHG64
11379 # generates cmov.
11380 config X86_CMOV
11381 def_bool y
11382- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
11383+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
11384
11385 config X86_MINIMUM_CPU_FAMILY
11386 int
11387diff -urNp linux-2.6.32.42/arch/x86/Kconfig.debug linux-2.6.32.42/arch/x86/Kconfig.debug
11388--- linux-2.6.32.42/arch/x86/Kconfig.debug 2011-03-27 14:31:47.000000000 -0400
11389+++ linux-2.6.32.42/arch/x86/Kconfig.debug 2011-04-17 15:56:46.000000000 -0400
11390@@ -99,7 +99,7 @@ config X86_PTDUMP
11391 config DEBUG_RODATA
11392 bool "Write protect kernel read-only data structures"
11393 default y
11394- depends on DEBUG_KERNEL
11395+ depends on DEBUG_KERNEL && BROKEN
11396 ---help---
11397 Mark the kernel read-only data as write-protected in the pagetables,
11398 in order to catch accidental (and incorrect) writes to such const
11399diff -urNp linux-2.6.32.42/arch/x86/kernel/acpi/realmode/wakeup.S linux-2.6.32.42/arch/x86/kernel/acpi/realmode/wakeup.S
11400--- linux-2.6.32.42/arch/x86/kernel/acpi/realmode/wakeup.S 2011-03-27 14:31:47.000000000 -0400
11401+++ linux-2.6.32.42/arch/x86/kernel/acpi/realmode/wakeup.S 2011-07-01 18:53:40.000000000 -0400
11402@@ -91,6 +91,9 @@ _start:
11403 /* Do any other stuff... */
11404
11405 #ifndef CONFIG_64BIT
11406+ /* Recheck NX bit overrides (64bit path does this in trampoline) */
11407+ call verify_cpu
11408+
11409 /* This could also be done in C code... */
11410 movl pmode_cr3, %eax
11411 movl %eax, %cr3
11412@@ -104,7 +107,7 @@ _start:
11413 movl %eax, %ecx
11414 orl %edx, %ecx
11415 jz 1f
11416- movl $0xc0000080, %ecx
11417+ mov $MSR_EFER, %ecx
11418 wrmsr
11419 1:
11420
11421@@ -114,6 +117,7 @@ _start:
11422 movl pmode_cr0, %eax
11423 movl %eax, %cr0
11424 jmp pmode_return
11425+# include "../../verify_cpu.S"
11426 #else
11427 pushw $0
11428 pushw trampoline_segment
11429diff -urNp linux-2.6.32.42/arch/x86/kernel/acpi/sleep.c linux-2.6.32.42/arch/x86/kernel/acpi/sleep.c
11430--- linux-2.6.32.42/arch/x86/kernel/acpi/sleep.c 2011-03-27 14:31:47.000000000 -0400
11431+++ linux-2.6.32.42/arch/x86/kernel/acpi/sleep.c 2011-07-01 19:01:34.000000000 -0400
11432@@ -11,11 +11,12 @@
11433 #include <linux/cpumask.h>
11434 #include <asm/segment.h>
11435 #include <asm/desc.h>
11436+#include <asm/e820.h>
11437
11438 #include "realmode/wakeup.h"
11439 #include "sleep.h"
11440
11441-unsigned long acpi_wakeup_address;
11442+unsigned long acpi_wakeup_address = 0x2000;
11443 unsigned long acpi_realmode_flags;
11444
11445 /* address in low memory of the wakeup routine. */
11446@@ -98,9 +99,13 @@ int acpi_save_state_mem(void)
11447 #else /* CONFIG_64BIT */
11448 header->trampoline_segment = setup_trampoline() >> 4;
11449 #ifdef CONFIG_SMP
11450- stack_start.sp = temp_stack + sizeof(temp_stack);
11451+ stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
11452+
11453+ pax_open_kernel();
11454 early_gdt_descr.address =
11455 (unsigned long)get_cpu_gdt_table(smp_processor_id());
11456+ pax_close_kernel();
11457+
11458 initial_gs = per_cpu_offset(smp_processor_id());
11459 #endif
11460 initial_code = (unsigned long)wakeup_long64;
11461@@ -134,14 +139,8 @@ void __init acpi_reserve_bootmem(void)
11462 return;
11463 }
11464
11465- acpi_realmode = (unsigned long)alloc_bootmem_low(WAKEUP_SIZE);
11466-
11467- if (!acpi_realmode) {
11468- printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
11469- return;
11470- }
11471-
11472- acpi_wakeup_address = virt_to_phys((void *)acpi_realmode);
11473+ reserve_early(acpi_wakeup_address, acpi_wakeup_address + WAKEUP_SIZE, "ACPI Wakeup Code");
11474+ acpi_realmode = (unsigned long)__va(acpi_wakeup_address);;
11475 }
11476
11477
11478diff -urNp linux-2.6.32.42/arch/x86/kernel/acpi/wakeup_32.S linux-2.6.32.42/arch/x86/kernel/acpi/wakeup_32.S
11479--- linux-2.6.32.42/arch/x86/kernel/acpi/wakeup_32.S 2011-03-27 14:31:47.000000000 -0400
11480+++ linux-2.6.32.42/arch/x86/kernel/acpi/wakeup_32.S 2011-04-17 15:56:46.000000000 -0400
11481@@ -30,13 +30,11 @@ wakeup_pmode_return:
11482 # and restore the stack ... but you need gdt for this to work
11483 movl saved_context_esp, %esp
11484
11485- movl %cs:saved_magic, %eax
11486- cmpl $0x12345678, %eax
11487+ cmpl $0x12345678, saved_magic
11488 jne bogus_magic
11489
11490 # jump to place where we left off
11491- movl saved_eip, %eax
11492- jmp *%eax
11493+ jmp *(saved_eip)
11494
11495 bogus_magic:
11496 jmp bogus_magic
11497diff -urNp linux-2.6.32.42/arch/x86/kernel/alternative.c linux-2.6.32.42/arch/x86/kernel/alternative.c
11498--- linux-2.6.32.42/arch/x86/kernel/alternative.c 2011-03-27 14:31:47.000000000 -0400
11499+++ linux-2.6.32.42/arch/x86/kernel/alternative.c 2011-04-17 15:56:46.000000000 -0400
11500@@ -407,7 +407,7 @@ void __init_or_module apply_paravirt(str
11501
11502 BUG_ON(p->len > MAX_PATCH_LEN);
11503 /* prep the buffer with the original instructions */
11504- memcpy(insnbuf, p->instr, p->len);
11505+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
11506 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
11507 (unsigned long)p->instr, p->len);
11508
11509@@ -475,7 +475,7 @@ void __init alternative_instructions(voi
11510 if (smp_alt_once)
11511 free_init_pages("SMP alternatives",
11512 (unsigned long)__smp_locks,
11513- (unsigned long)__smp_locks_end);
11514+ PAGE_ALIGN((unsigned long)__smp_locks_end));
11515
11516 restart_nmi();
11517 }
11518@@ -492,13 +492,17 @@ void __init alternative_instructions(voi
11519 * instructions. And on the local CPU you need to be protected again NMI or MCE
11520 * handlers seeing an inconsistent instruction while you patch.
11521 */
11522-static void *__init_or_module text_poke_early(void *addr, const void *opcode,
11523+static void *__kprobes text_poke_early(void *addr, const void *opcode,
11524 size_t len)
11525 {
11526 unsigned long flags;
11527 local_irq_save(flags);
11528- memcpy(addr, opcode, len);
11529+
11530+ pax_open_kernel();
11531+ memcpy(ktla_ktva(addr), opcode, len);
11532 sync_core();
11533+ pax_close_kernel();
11534+
11535 local_irq_restore(flags);
11536 /* Could also do a CLFLUSH here to speed up CPU recovery; but
11537 that causes hangs on some VIA CPUs. */
11538@@ -520,35 +524,21 @@ static void *__init_or_module text_poke_
11539 */
11540 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
11541 {
11542- unsigned long flags;
11543- char *vaddr;
11544+ unsigned char *vaddr = ktla_ktva(addr);
11545 struct page *pages[2];
11546- int i;
11547+ size_t i;
11548
11549 if (!core_kernel_text((unsigned long)addr)) {
11550- pages[0] = vmalloc_to_page(addr);
11551- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
11552+ pages[0] = vmalloc_to_page(vaddr);
11553+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
11554 } else {
11555- pages[0] = virt_to_page(addr);
11556+ pages[0] = virt_to_page(vaddr);
11557 WARN_ON(!PageReserved(pages[0]));
11558- pages[1] = virt_to_page(addr + PAGE_SIZE);
11559+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
11560 }
11561 BUG_ON(!pages[0]);
11562- local_irq_save(flags);
11563- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
11564- if (pages[1])
11565- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
11566- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
11567- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
11568- clear_fixmap(FIX_TEXT_POKE0);
11569- if (pages[1])
11570- clear_fixmap(FIX_TEXT_POKE1);
11571- local_flush_tlb();
11572- sync_core();
11573- /* Could also do a CLFLUSH here to speed up CPU recovery; but
11574- that causes hangs on some VIA CPUs. */
11575+ text_poke_early(addr, opcode, len);
11576 for (i = 0; i < len; i++)
11577- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
11578- local_irq_restore(flags);
11579+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
11580 return addr;
11581 }
11582diff -urNp linux-2.6.32.42/arch/x86/kernel/amd_iommu.c linux-2.6.32.42/arch/x86/kernel/amd_iommu.c
11583--- linux-2.6.32.42/arch/x86/kernel/amd_iommu.c 2011-03-27 14:31:47.000000000 -0400
11584+++ linux-2.6.32.42/arch/x86/kernel/amd_iommu.c 2011-04-17 15:56:46.000000000 -0400
11585@@ -2076,7 +2076,7 @@ static void prealloc_protection_domains(
11586 }
11587 }
11588
11589-static struct dma_map_ops amd_iommu_dma_ops = {
11590+static const struct dma_map_ops amd_iommu_dma_ops = {
11591 .alloc_coherent = alloc_coherent,
11592 .free_coherent = free_coherent,
11593 .map_page = map_page,
11594diff -urNp linux-2.6.32.42/arch/x86/kernel/apic/apic.c linux-2.6.32.42/arch/x86/kernel/apic/apic.c
11595--- linux-2.6.32.42/arch/x86/kernel/apic/apic.c 2011-03-27 14:31:47.000000000 -0400
11596+++ linux-2.6.32.42/arch/x86/kernel/apic/apic.c 2011-05-16 21:46:57.000000000 -0400
11597@@ -1794,7 +1794,7 @@ void smp_error_interrupt(struct pt_regs
11598 apic_write(APIC_ESR, 0);
11599 v1 = apic_read(APIC_ESR);
11600 ack_APIC_irq();
11601- atomic_inc(&irq_err_count);
11602+ atomic_inc_unchecked(&irq_err_count);
11603
11604 /*
11605 * Here is what the APIC error bits mean:
11606@@ -2184,6 +2184,8 @@ static int __cpuinit apic_cluster_num(vo
11607 u16 *bios_cpu_apicid;
11608 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
11609
11610+ pax_track_stack();
11611+
11612 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
11613 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
11614
11615diff -urNp linux-2.6.32.42/arch/x86/kernel/apic/io_apic.c linux-2.6.32.42/arch/x86/kernel/apic/io_apic.c
11616--- linux-2.6.32.42/arch/x86/kernel/apic/io_apic.c 2011-03-27 14:31:47.000000000 -0400
11617+++ linux-2.6.32.42/arch/x86/kernel/apic/io_apic.c 2011-05-04 17:56:20.000000000 -0400
11618@@ -716,7 +716,7 @@ struct IO_APIC_route_entry **alloc_ioapi
11619 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
11620 GFP_ATOMIC);
11621 if (!ioapic_entries)
11622- return 0;
11623+ return NULL;
11624
11625 for (apic = 0; apic < nr_ioapics; apic++) {
11626 ioapic_entries[apic] =
11627@@ -733,7 +733,7 @@ nomem:
11628 kfree(ioapic_entries[apic]);
11629 kfree(ioapic_entries);
11630
11631- return 0;
11632+ return NULL;
11633 }
11634
11635 /*
11636@@ -1150,7 +1150,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
11637 }
11638 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
11639
11640-void lock_vector_lock(void)
11641+void lock_vector_lock(void) __acquires(vector_lock)
11642 {
11643 /* Used to the online set of cpus does not change
11644 * during assign_irq_vector.
11645@@ -1158,7 +1158,7 @@ void lock_vector_lock(void)
11646 spin_lock(&vector_lock);
11647 }
11648
11649-void unlock_vector_lock(void)
11650+void unlock_vector_lock(void) __releases(vector_lock)
11651 {
11652 spin_unlock(&vector_lock);
11653 }
11654@@ -2542,7 +2542,7 @@ static void ack_apic_edge(unsigned int i
11655 ack_APIC_irq();
11656 }
11657
11658-atomic_t irq_mis_count;
11659+atomic_unchecked_t irq_mis_count;
11660
11661 static void ack_apic_level(unsigned int irq)
11662 {
11663@@ -2626,7 +2626,7 @@ static void ack_apic_level(unsigned int
11664
11665 /* Tail end of version 0x11 I/O APIC bug workaround */
11666 if (!(v & (1 << (i & 0x1f)))) {
11667- atomic_inc(&irq_mis_count);
11668+ atomic_inc_unchecked(&irq_mis_count);
11669 spin_lock(&ioapic_lock);
11670 __mask_and_edge_IO_APIC_irq(cfg);
11671 __unmask_and_level_IO_APIC_irq(cfg);
11672diff -urNp linux-2.6.32.42/arch/x86/kernel/apm_32.c linux-2.6.32.42/arch/x86/kernel/apm_32.c
11673--- linux-2.6.32.42/arch/x86/kernel/apm_32.c 2011-03-27 14:31:47.000000000 -0400
11674+++ linux-2.6.32.42/arch/x86/kernel/apm_32.c 2011-04-23 12:56:10.000000000 -0400
11675@@ -410,7 +410,7 @@ static DEFINE_SPINLOCK(user_list_lock);
11676 * This is for buggy BIOS's that refer to (real mode) segment 0x40
11677 * even though they are called in protected mode.
11678 */
11679-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
11680+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
11681 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
11682
11683 static const char driver_version[] = "1.16ac"; /* no spaces */
11684@@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
11685 BUG_ON(cpu != 0);
11686 gdt = get_cpu_gdt_table(cpu);
11687 save_desc_40 = gdt[0x40 / 8];
11688+
11689+ pax_open_kernel();
11690 gdt[0x40 / 8] = bad_bios_desc;
11691+ pax_close_kernel();
11692
11693 apm_irq_save(flags);
11694 APM_DO_SAVE_SEGS;
11695@@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
11696 &call->esi);
11697 APM_DO_RESTORE_SEGS;
11698 apm_irq_restore(flags);
11699+
11700+ pax_open_kernel();
11701 gdt[0x40 / 8] = save_desc_40;
11702+ pax_close_kernel();
11703+
11704 put_cpu();
11705
11706 return call->eax & 0xff;
11707@@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void
11708 BUG_ON(cpu != 0);
11709 gdt = get_cpu_gdt_table(cpu);
11710 save_desc_40 = gdt[0x40 / 8];
11711+
11712+ pax_open_kernel();
11713 gdt[0x40 / 8] = bad_bios_desc;
11714+ pax_close_kernel();
11715
11716 apm_irq_save(flags);
11717 APM_DO_SAVE_SEGS;
11718@@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void
11719 &call->eax);
11720 APM_DO_RESTORE_SEGS;
11721 apm_irq_restore(flags);
11722+
11723+ pax_open_kernel();
11724 gdt[0x40 / 8] = save_desc_40;
11725+ pax_close_kernel();
11726+
11727 put_cpu();
11728 return error;
11729 }
11730@@ -975,7 +989,7 @@ recalc:
11731
11732 static void apm_power_off(void)
11733 {
11734- unsigned char po_bios_call[] = {
11735+ const unsigned char po_bios_call[] = {
11736 0xb8, 0x00, 0x10, /* movw $0x1000,ax */
11737 0x8e, 0xd0, /* movw ax,ss */
11738 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */
11739@@ -2357,12 +2371,15 @@ static int __init apm_init(void)
11740 * code to that CPU.
11741 */
11742 gdt = get_cpu_gdt_table(0);
11743+
11744+ pax_open_kernel();
11745 set_desc_base(&gdt[APM_CS >> 3],
11746 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
11747 set_desc_base(&gdt[APM_CS_16 >> 3],
11748 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
11749 set_desc_base(&gdt[APM_DS >> 3],
11750 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
11751+ pax_close_kernel();
11752
11753 proc_create("apm", 0, NULL, &apm_file_ops);
11754
11755diff -urNp linux-2.6.32.42/arch/x86/kernel/asm-offsets_32.c linux-2.6.32.42/arch/x86/kernel/asm-offsets_32.c
11756--- linux-2.6.32.42/arch/x86/kernel/asm-offsets_32.c 2011-03-27 14:31:47.000000000 -0400
11757+++ linux-2.6.32.42/arch/x86/kernel/asm-offsets_32.c 2011-05-16 21:46:57.000000000 -0400
11758@@ -51,7 +51,6 @@ void foo(void)
11759 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
11760 BLANK();
11761
11762- OFFSET(TI_task, thread_info, task);
11763 OFFSET(TI_exec_domain, thread_info, exec_domain);
11764 OFFSET(TI_flags, thread_info, flags);
11765 OFFSET(TI_status, thread_info, status);
11766@@ -60,6 +59,8 @@ void foo(void)
11767 OFFSET(TI_restart_block, thread_info, restart_block);
11768 OFFSET(TI_sysenter_return, thread_info, sysenter_return);
11769 OFFSET(TI_cpu, thread_info, cpu);
11770+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
11771+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
11772 BLANK();
11773
11774 OFFSET(GDS_size, desc_ptr, size);
11775@@ -99,6 +100,7 @@ void foo(void)
11776
11777 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
11778 DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
11779+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
11780 DEFINE(PTRS_PER_PTE, PTRS_PER_PTE);
11781 DEFINE(PTRS_PER_PMD, PTRS_PER_PMD);
11782 DEFINE(PTRS_PER_PGD, PTRS_PER_PGD);
11783@@ -115,6 +117,11 @@ void foo(void)
11784 OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
11785 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
11786 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
11787+
11788+#ifdef CONFIG_PAX_KERNEXEC
11789+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
11790+#endif
11791+
11792 #endif
11793
11794 #ifdef CONFIG_XEN
11795diff -urNp linux-2.6.32.42/arch/x86/kernel/asm-offsets_64.c linux-2.6.32.42/arch/x86/kernel/asm-offsets_64.c
11796--- linux-2.6.32.42/arch/x86/kernel/asm-offsets_64.c 2011-03-27 14:31:47.000000000 -0400
11797+++ linux-2.6.32.42/arch/x86/kernel/asm-offsets_64.c 2011-05-16 21:46:57.000000000 -0400
11798@@ -44,6 +44,8 @@ int main(void)
11799 ENTRY(addr_limit);
11800 ENTRY(preempt_count);
11801 ENTRY(status);
11802+ ENTRY(lowest_stack);
11803+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
11804 #ifdef CONFIG_IA32_EMULATION
11805 ENTRY(sysenter_return);
11806 #endif
11807@@ -63,6 +65,18 @@ int main(void)
11808 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
11809 OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
11810 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
11811+
11812+#ifdef CONFIG_PAX_KERNEXEC
11813+ OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
11814+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
11815+#endif
11816+
11817+#ifdef CONFIG_PAX_MEMORY_UDEREF
11818+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
11819+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
11820+ OFFSET(PV_MMU_set_pgd, pv_mmu_ops, set_pgd);
11821+#endif
11822+
11823 #endif
11824
11825
11826@@ -115,6 +129,7 @@ int main(void)
11827 ENTRY(cr8);
11828 BLANK();
11829 #undef ENTRY
11830+ DEFINE(TSS_size, sizeof(struct tss_struct));
11831 DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
11832 BLANK();
11833 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
11834@@ -130,6 +145,7 @@ int main(void)
11835
11836 BLANK();
11837 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
11838+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
11839 #ifdef CONFIG_XEN
11840 BLANK();
11841 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
11842diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/amd.c linux-2.6.32.42/arch/x86/kernel/cpu/amd.c
11843--- linux-2.6.32.42/arch/x86/kernel/cpu/amd.c 2011-06-25 12:55:34.000000000 -0400
11844+++ linux-2.6.32.42/arch/x86/kernel/cpu/amd.c 2011-06-25 12:56:37.000000000 -0400
11845@@ -602,7 +602,7 @@ static unsigned int __cpuinit amd_size_c
11846 unsigned int size)
11847 {
11848 /* AMD errata T13 (order #21922) */
11849- if ((c->x86 == 6)) {
11850+ if (c->x86 == 6) {
11851 /* Duron Rev A0 */
11852 if (c->x86_model == 3 && c->x86_mask == 0)
11853 size = 64;
11854diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/common.c linux-2.6.32.42/arch/x86/kernel/cpu/common.c
11855--- linux-2.6.32.42/arch/x86/kernel/cpu/common.c 2011-03-27 14:31:47.000000000 -0400
11856+++ linux-2.6.32.42/arch/x86/kernel/cpu/common.c 2011-05-11 18:25:15.000000000 -0400
11857@@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
11858
11859 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
11860
11861-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
11862-#ifdef CONFIG_X86_64
11863- /*
11864- * We need valid kernel segments for data and code in long mode too
11865- * IRET will check the segment types kkeil 2000/10/28
11866- * Also sysret mandates a special GDT layout
11867- *
11868- * TLS descriptors are currently at a different place compared to i386.
11869- * Hopefully nobody expects them at a fixed place (Wine?)
11870- */
11871- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
11872- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
11873- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
11874- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
11875- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
11876- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
11877-#else
11878- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
11879- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11880- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
11881- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
11882- /*
11883- * Segments used for calling PnP BIOS have byte granularity.
11884- * They code segments and data segments have fixed 64k limits,
11885- * the transfer segment sizes are set at run time.
11886- */
11887- /* 32-bit code */
11888- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
11889- /* 16-bit code */
11890- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
11891- /* 16-bit data */
11892- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
11893- /* 16-bit data */
11894- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
11895- /* 16-bit data */
11896- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
11897- /*
11898- * The APM segments have byte granularity and their bases
11899- * are set at run time. All have 64k limits.
11900- */
11901- /* 32-bit code */
11902- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
11903- /* 16-bit code */
11904- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
11905- /* data */
11906- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
11907-
11908- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11909- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11910- GDT_STACK_CANARY_INIT
11911-#endif
11912-} };
11913-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
11914-
11915 static int __init x86_xsave_setup(char *s)
11916 {
11917 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
11918@@ -344,7 +290,7 @@ void switch_to_new_gdt(int cpu)
11919 {
11920 struct desc_ptr gdt_descr;
11921
11922- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
11923+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
11924 gdt_descr.size = GDT_SIZE - 1;
11925 load_gdt(&gdt_descr);
11926 /* Reload the per-cpu base */
11927@@ -798,6 +744,10 @@ static void __cpuinit identify_cpu(struc
11928 /* Filter out anything that depends on CPUID levels we don't have */
11929 filter_cpuid_features(c, true);
11930
11931+#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
11932+ setup_clear_cpu_cap(X86_FEATURE_SEP);
11933+#endif
11934+
11935 /* If the model name is still unset, do table lookup. */
11936 if (!c->x86_model_id[0]) {
11937 const char *p;
11938@@ -980,6 +930,9 @@ static __init int setup_disablecpuid(cha
11939 }
11940 __setup("clearcpuid=", setup_disablecpuid);
11941
11942+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
11943+EXPORT_PER_CPU_SYMBOL(current_tinfo);
11944+
11945 #ifdef CONFIG_X86_64
11946 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
11947
11948@@ -995,7 +948,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
11949 EXPORT_PER_CPU_SYMBOL(current_task);
11950
11951 DEFINE_PER_CPU(unsigned long, kernel_stack) =
11952- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
11953+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
11954 EXPORT_PER_CPU_SYMBOL(kernel_stack);
11955
11956 DEFINE_PER_CPU(char *, irq_stack_ptr) =
11957@@ -1060,7 +1013,7 @@ struct pt_regs * __cpuinit idle_regs(str
11958 {
11959 memset(regs, 0, sizeof(struct pt_regs));
11960 regs->fs = __KERNEL_PERCPU;
11961- regs->gs = __KERNEL_STACK_CANARY;
11962+ savesegment(gs, regs->gs);
11963
11964 return regs;
11965 }
11966@@ -1101,7 +1054,7 @@ void __cpuinit cpu_init(void)
11967 int i;
11968
11969 cpu = stack_smp_processor_id();
11970- t = &per_cpu(init_tss, cpu);
11971+ t = init_tss + cpu;
11972 orig_ist = &per_cpu(orig_ist, cpu);
11973
11974 #ifdef CONFIG_NUMA
11975@@ -1127,7 +1080,7 @@ void __cpuinit cpu_init(void)
11976 switch_to_new_gdt(cpu);
11977 loadsegment(fs, 0);
11978
11979- load_idt((const struct desc_ptr *)&idt_descr);
11980+ load_idt(&idt_descr);
11981
11982 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
11983 syscall_init();
11984@@ -1136,7 +1089,6 @@ void __cpuinit cpu_init(void)
11985 wrmsrl(MSR_KERNEL_GS_BASE, 0);
11986 barrier();
11987
11988- check_efer();
11989 if (cpu != 0)
11990 enable_x2apic();
11991
11992@@ -1199,7 +1151,7 @@ void __cpuinit cpu_init(void)
11993 {
11994 int cpu = smp_processor_id();
11995 struct task_struct *curr = current;
11996- struct tss_struct *t = &per_cpu(init_tss, cpu);
11997+ struct tss_struct *t = init_tss + cpu;
11998 struct thread_struct *thread = &curr->thread;
11999
12000 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
12001diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/intel.c linux-2.6.32.42/arch/x86/kernel/cpu/intel.c
12002--- linux-2.6.32.42/arch/x86/kernel/cpu/intel.c 2011-03-27 14:31:47.000000000 -0400
12003+++ linux-2.6.32.42/arch/x86/kernel/cpu/intel.c 2011-04-17 15:56:46.000000000 -0400
12004@@ -162,7 +162,7 @@ static void __cpuinit trap_init_f00f_bug
12005 * Update the IDT descriptor and reload the IDT so that
12006 * it uses the read-only mapped virtual address.
12007 */
12008- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
12009+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
12010 load_idt(&idt_descr);
12011 }
12012 #endif
12013diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/intel_cacheinfo.c linux-2.6.32.42/arch/x86/kernel/cpu/intel_cacheinfo.c
12014--- linux-2.6.32.42/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-03-27 14:31:47.000000000 -0400
12015+++ linux-2.6.32.42/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-04-17 15:56:46.000000000 -0400
12016@@ -921,7 +921,7 @@ static ssize_t store(struct kobject *kob
12017 return ret;
12018 }
12019
12020-static struct sysfs_ops sysfs_ops = {
12021+static const struct sysfs_ops sysfs_ops = {
12022 .show = show,
12023 .store = store,
12024 };
12025diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/Makefile linux-2.6.32.42/arch/x86/kernel/cpu/Makefile
12026--- linux-2.6.32.42/arch/x86/kernel/cpu/Makefile 2011-03-27 14:31:47.000000000 -0400
12027+++ linux-2.6.32.42/arch/x86/kernel/cpu/Makefile 2011-04-17 15:56:46.000000000 -0400
12028@@ -7,10 +7,6 @@ ifdef CONFIG_FUNCTION_TRACER
12029 CFLAGS_REMOVE_common.o = -pg
12030 endif
12031
12032-# Make sure load_percpu_segment has no stackprotector
12033-nostackp := $(call cc-option, -fno-stack-protector)
12034-CFLAGS_common.o := $(nostackp)
12035-
12036 obj-y := intel_cacheinfo.o addon_cpuid_features.o
12037 obj-y += proc.o capflags.o powerflags.o common.o
12038 obj-y += vmware.o hypervisor.o sched.o
12039diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/mcheck/mce_amd.c linux-2.6.32.42/arch/x86/kernel/cpu/mcheck/mce_amd.c
12040--- linux-2.6.32.42/arch/x86/kernel/cpu/mcheck/mce_amd.c 2011-05-23 16:56:59.000000000 -0400
12041+++ linux-2.6.32.42/arch/x86/kernel/cpu/mcheck/mce_amd.c 2011-05-23 16:57:13.000000000 -0400
12042@@ -385,7 +385,7 @@ static ssize_t store(struct kobject *kob
12043 return ret;
12044 }
12045
12046-static struct sysfs_ops threshold_ops = {
12047+static const struct sysfs_ops threshold_ops = {
12048 .show = show,
12049 .store = store,
12050 };
12051diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/mcheck/mce.c linux-2.6.32.42/arch/x86/kernel/cpu/mcheck/mce.c
12052--- linux-2.6.32.42/arch/x86/kernel/cpu/mcheck/mce.c 2011-03-27 14:31:47.000000000 -0400
12053+++ linux-2.6.32.42/arch/x86/kernel/cpu/mcheck/mce.c 2011-05-04 17:56:20.000000000 -0400
12054@@ -43,6 +43,7 @@
12055 #include <asm/ipi.h>
12056 #include <asm/mce.h>
12057 #include <asm/msr.h>
12058+#include <asm/local.h>
12059
12060 #include "mce-internal.h"
12061
12062@@ -187,7 +188,7 @@ static void print_mce(struct mce *m)
12063 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
12064 m->cs, m->ip);
12065
12066- if (m->cs == __KERNEL_CS)
12067+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
12068 print_symbol("{%s}", m->ip);
12069 pr_cont("\n");
12070 }
12071@@ -221,10 +222,10 @@ static void print_mce_tail(void)
12072
12073 #define PANIC_TIMEOUT 5 /* 5 seconds */
12074
12075-static atomic_t mce_paniced;
12076+static atomic_unchecked_t mce_paniced;
12077
12078 static int fake_panic;
12079-static atomic_t mce_fake_paniced;
12080+static atomic_unchecked_t mce_fake_paniced;
12081
12082 /* Panic in progress. Enable interrupts and wait for final IPI */
12083 static void wait_for_panic(void)
12084@@ -248,7 +249,7 @@ static void mce_panic(char *msg, struct
12085 /*
12086 * Make sure only one CPU runs in machine check panic
12087 */
12088- if (atomic_inc_return(&mce_paniced) > 1)
12089+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
12090 wait_for_panic();
12091 barrier();
12092
12093@@ -256,7 +257,7 @@ static void mce_panic(char *msg, struct
12094 console_verbose();
12095 } else {
12096 /* Don't log too much for fake panic */
12097- if (atomic_inc_return(&mce_fake_paniced) > 1)
12098+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
12099 return;
12100 }
12101 print_mce_head();
12102@@ -616,7 +617,7 @@ static int mce_timed_out(u64 *t)
12103 * might have been modified by someone else.
12104 */
12105 rmb();
12106- if (atomic_read(&mce_paniced))
12107+ if (atomic_read_unchecked(&mce_paniced))
12108 wait_for_panic();
12109 if (!monarch_timeout)
12110 goto out;
12111@@ -1429,14 +1430,14 @@ void __cpuinit mcheck_init(struct cpuinf
12112 */
12113
12114 static DEFINE_SPINLOCK(mce_state_lock);
12115-static int open_count; /* #times opened */
12116+static local_t open_count; /* #times opened */
12117 static int open_exclu; /* already open exclusive? */
12118
12119 static int mce_open(struct inode *inode, struct file *file)
12120 {
12121 spin_lock(&mce_state_lock);
12122
12123- if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
12124+ if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
12125 spin_unlock(&mce_state_lock);
12126
12127 return -EBUSY;
12128@@ -1444,7 +1445,7 @@ static int mce_open(struct inode *inode,
12129
12130 if (file->f_flags & O_EXCL)
12131 open_exclu = 1;
12132- open_count++;
12133+ local_inc(&open_count);
12134
12135 spin_unlock(&mce_state_lock);
12136
12137@@ -1455,7 +1456,7 @@ static int mce_release(struct inode *ino
12138 {
12139 spin_lock(&mce_state_lock);
12140
12141- open_count--;
12142+ local_dec(&open_count);
12143 open_exclu = 0;
12144
12145 spin_unlock(&mce_state_lock);
12146@@ -2082,7 +2083,7 @@ struct dentry *mce_get_debugfs_dir(void)
12147 static void mce_reset(void)
12148 {
12149 cpu_missing = 0;
12150- atomic_set(&mce_fake_paniced, 0);
12151+ atomic_set_unchecked(&mce_fake_paniced, 0);
12152 atomic_set(&mce_executing, 0);
12153 atomic_set(&mce_callin, 0);
12154 atomic_set(&global_nwo, 0);
12155diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/amd.c linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/amd.c
12156--- linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/amd.c 2011-03-27 14:31:47.000000000 -0400
12157+++ linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/amd.c 2011-04-17 15:56:46.000000000 -0400
12158@@ -108,7 +108,7 @@ amd_validate_add_page(unsigned long base
12159 return 0;
12160 }
12161
12162-static struct mtrr_ops amd_mtrr_ops = {
12163+static const struct mtrr_ops amd_mtrr_ops = {
12164 .vendor = X86_VENDOR_AMD,
12165 .set = amd_set_mtrr,
12166 .get = amd_get_mtrr,
12167diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/centaur.c linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/centaur.c
12168--- linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/centaur.c 2011-03-27 14:31:47.000000000 -0400
12169+++ linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/centaur.c 2011-04-17 15:56:46.000000000 -0400
12170@@ -110,7 +110,7 @@ centaur_validate_add_page(unsigned long
12171 return 0;
12172 }
12173
12174-static struct mtrr_ops centaur_mtrr_ops = {
12175+static const struct mtrr_ops centaur_mtrr_ops = {
12176 .vendor = X86_VENDOR_CENTAUR,
12177 .set = centaur_set_mcr,
12178 .get = centaur_get_mcr,
12179diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/cyrix.c linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/cyrix.c
12180--- linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/cyrix.c 2011-03-27 14:31:47.000000000 -0400
12181+++ linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/cyrix.c 2011-04-17 15:56:46.000000000 -0400
12182@@ -265,7 +265,7 @@ static void cyrix_set_all(void)
12183 post_set();
12184 }
12185
12186-static struct mtrr_ops cyrix_mtrr_ops = {
12187+static const struct mtrr_ops cyrix_mtrr_ops = {
12188 .vendor = X86_VENDOR_CYRIX,
12189 .set_all = cyrix_set_all,
12190 .set = cyrix_set_arr,
12191diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/generic.c linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/generic.c
12192--- linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/generic.c 2011-03-27 14:31:47.000000000 -0400
12193+++ linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/generic.c 2011-04-23 12:56:10.000000000 -0400
12194@@ -752,7 +752,7 @@ int positive_have_wrcomb(void)
12195 /*
12196 * Generic structure...
12197 */
12198-struct mtrr_ops generic_mtrr_ops = {
12199+const struct mtrr_ops generic_mtrr_ops = {
12200 .use_intel_if = 1,
12201 .set_all = generic_set_all,
12202 .get = generic_get_mtrr,
12203diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/main.c linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/main.c
12204--- linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/main.c 2011-04-17 17:00:52.000000000 -0400
12205+++ linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/main.c 2011-04-17 17:03:05.000000000 -0400
12206@@ -60,14 +60,14 @@ static DEFINE_MUTEX(mtrr_mutex);
12207 u64 size_or_mask, size_and_mask;
12208 static bool mtrr_aps_delayed_init;
12209
12210-static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
12211+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
12212
12213-struct mtrr_ops *mtrr_if;
12214+const struct mtrr_ops *mtrr_if;
12215
12216 static void set_mtrr(unsigned int reg, unsigned long base,
12217 unsigned long size, mtrr_type type);
12218
12219-void set_mtrr_ops(struct mtrr_ops *ops)
12220+void set_mtrr_ops(const struct mtrr_ops *ops)
12221 {
12222 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
12223 mtrr_ops[ops->vendor] = ops;
12224diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/mtrr.h linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/mtrr.h
12225--- linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-03-27 14:31:47.000000000 -0400
12226+++ linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-04-17 15:56:46.000000000 -0400
12227@@ -12,19 +12,19 @@
12228 extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
12229
12230 struct mtrr_ops {
12231- u32 vendor;
12232- u32 use_intel_if;
12233- void (*set)(unsigned int reg, unsigned long base,
12234+ const u32 vendor;
12235+ const u32 use_intel_if;
12236+ void (* const set)(unsigned int reg, unsigned long base,
12237 unsigned long size, mtrr_type type);
12238- void (*set_all)(void);
12239+ void (* const set_all)(void);
12240
12241- void (*get)(unsigned int reg, unsigned long *base,
12242+ void (* const get)(unsigned int reg, unsigned long *base,
12243 unsigned long *size, mtrr_type *type);
12244- int (*get_free_region)(unsigned long base, unsigned long size,
12245+ int (* const get_free_region)(unsigned long base, unsigned long size,
12246 int replace_reg);
12247- int (*validate_add_page)(unsigned long base, unsigned long size,
12248+ int (* const validate_add_page)(unsigned long base, unsigned long size,
12249 unsigned int type);
12250- int (*have_wrcomb)(void);
12251+ int (* const have_wrcomb)(void);
12252 };
12253
12254 extern int generic_get_free_region(unsigned long base, unsigned long size,
12255@@ -32,7 +32,7 @@ extern int generic_get_free_region(unsig
12256 extern int generic_validate_add_page(unsigned long base, unsigned long size,
12257 unsigned int type);
12258
12259-extern struct mtrr_ops generic_mtrr_ops;
12260+extern const struct mtrr_ops generic_mtrr_ops;
12261
12262 extern int positive_have_wrcomb(void);
12263
12264@@ -53,10 +53,10 @@ void fill_mtrr_var_range(unsigned int in
12265 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
12266 void get_mtrr_state(void);
12267
12268-extern void set_mtrr_ops(struct mtrr_ops *ops);
12269+extern void set_mtrr_ops(const struct mtrr_ops *ops);
12270
12271 extern u64 size_or_mask, size_and_mask;
12272-extern struct mtrr_ops *mtrr_if;
12273+extern const struct mtrr_ops *mtrr_if;
12274
12275 #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
12276 #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
12277diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/perfctr-watchdog.c linux-2.6.32.42/arch/x86/kernel/cpu/perfctr-watchdog.c
12278--- linux-2.6.32.42/arch/x86/kernel/cpu/perfctr-watchdog.c 2011-03-27 14:31:47.000000000 -0400
12279+++ linux-2.6.32.42/arch/x86/kernel/cpu/perfctr-watchdog.c 2011-04-17 15:56:46.000000000 -0400
12280@@ -30,11 +30,11 @@ struct nmi_watchdog_ctlblk {
12281
12282 /* Interface defining a CPU specific perfctr watchdog */
12283 struct wd_ops {
12284- int (*reserve)(void);
12285- void (*unreserve)(void);
12286- int (*setup)(unsigned nmi_hz);
12287- void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
12288- void (*stop)(void);
12289+ int (* const reserve)(void);
12290+ void (* const unreserve)(void);
12291+ int (* const setup)(unsigned nmi_hz);
12292+ void (* const rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
12293+ void (* const stop)(void);
12294 unsigned perfctr;
12295 unsigned evntsel;
12296 u64 checkbit;
12297@@ -645,6 +645,7 @@ static const struct wd_ops p4_wd_ops = {
12298 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
12299 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
12300
12301+/* cannot be const */
12302 static struct wd_ops intel_arch_wd_ops;
12303
12304 static int setup_intel_arch_watchdog(unsigned nmi_hz)
12305@@ -697,6 +698,7 @@ static int setup_intel_arch_watchdog(uns
12306 return 1;
12307 }
12308
12309+/* cannot be const */
12310 static struct wd_ops intel_arch_wd_ops __read_mostly = {
12311 .reserve = single_msr_reserve,
12312 .unreserve = single_msr_unreserve,
12313diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/perf_event.c linux-2.6.32.42/arch/x86/kernel/cpu/perf_event.c
12314--- linux-2.6.32.42/arch/x86/kernel/cpu/perf_event.c 2011-03-27 14:31:47.000000000 -0400
12315+++ linux-2.6.32.42/arch/x86/kernel/cpu/perf_event.c 2011-05-04 17:56:20.000000000 -0400
12316@@ -723,10 +723,10 @@ x86_perf_event_update(struct perf_event
12317 * count to the generic event atomically:
12318 */
12319 again:
12320- prev_raw_count = atomic64_read(&hwc->prev_count);
12321+ prev_raw_count = atomic64_read_unchecked(&hwc->prev_count);
12322 rdmsrl(hwc->event_base + idx, new_raw_count);
12323
12324- if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
12325+ if (atomic64_cmpxchg_unchecked(&hwc->prev_count, prev_raw_count,
12326 new_raw_count) != prev_raw_count)
12327 goto again;
12328
12329@@ -741,7 +741,7 @@ again:
12330 delta = (new_raw_count << shift) - (prev_raw_count << shift);
12331 delta >>= shift;
12332
12333- atomic64_add(delta, &event->count);
12334+ atomic64_add_unchecked(delta, &event->count);
12335 atomic64_sub(delta, &hwc->period_left);
12336
12337 return new_raw_count;
12338@@ -1353,7 +1353,7 @@ x86_perf_event_set_period(struct perf_ev
12339 * The hw event starts counting from this event offset,
12340 * mark it to be able to extra future deltas:
12341 */
12342- atomic64_set(&hwc->prev_count, (u64)-left);
12343+ atomic64_set_unchecked(&hwc->prev_count, (u64)-left);
12344
12345 err = checking_wrmsrl(hwc->event_base + idx,
12346 (u64)(-left) & x86_pmu.event_mask);
12347@@ -2357,7 +2357,7 @@ perf_callchain_user(struct pt_regs *regs
12348 break;
12349
12350 callchain_store(entry, frame.return_address);
12351- fp = frame.next_frame;
12352+ fp = (__force const void __user *)frame.next_frame;
12353 }
12354 }
12355
12356diff -urNp linux-2.6.32.42/arch/x86/kernel/crash.c linux-2.6.32.42/arch/x86/kernel/crash.c
12357--- linux-2.6.32.42/arch/x86/kernel/crash.c 2011-03-27 14:31:47.000000000 -0400
12358+++ linux-2.6.32.42/arch/x86/kernel/crash.c 2011-04-17 15:56:46.000000000 -0400
12359@@ -41,7 +41,7 @@ static void kdump_nmi_callback(int cpu,
12360 regs = args->regs;
12361
12362 #ifdef CONFIG_X86_32
12363- if (!user_mode_vm(regs)) {
12364+ if (!user_mode(regs)) {
12365 crash_fixup_ss_esp(&fixed_regs, regs);
12366 regs = &fixed_regs;
12367 }
12368diff -urNp linux-2.6.32.42/arch/x86/kernel/doublefault_32.c linux-2.6.32.42/arch/x86/kernel/doublefault_32.c
12369--- linux-2.6.32.42/arch/x86/kernel/doublefault_32.c 2011-03-27 14:31:47.000000000 -0400
12370+++ linux-2.6.32.42/arch/x86/kernel/doublefault_32.c 2011-04-17 15:56:46.000000000 -0400
12371@@ -11,7 +11,7 @@
12372
12373 #define DOUBLEFAULT_STACKSIZE (1024)
12374 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
12375-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
12376+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
12377
12378 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
12379
12380@@ -21,7 +21,7 @@ static void doublefault_fn(void)
12381 unsigned long gdt, tss;
12382
12383 store_gdt(&gdt_desc);
12384- gdt = gdt_desc.address;
12385+ gdt = (unsigned long)gdt_desc.address;
12386
12387 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
12388
12389@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
12390 /* 0x2 bit is always set */
12391 .flags = X86_EFLAGS_SF | 0x2,
12392 .sp = STACK_START,
12393- .es = __USER_DS,
12394+ .es = __KERNEL_DS,
12395 .cs = __KERNEL_CS,
12396 .ss = __KERNEL_DS,
12397- .ds = __USER_DS,
12398+ .ds = __KERNEL_DS,
12399 .fs = __KERNEL_PERCPU,
12400
12401 .__cr3 = __pa_nodebug(swapper_pg_dir),
12402diff -urNp linux-2.6.32.42/arch/x86/kernel/dumpstack_32.c linux-2.6.32.42/arch/x86/kernel/dumpstack_32.c
12403--- linux-2.6.32.42/arch/x86/kernel/dumpstack_32.c 2011-03-27 14:31:47.000000000 -0400
12404+++ linux-2.6.32.42/arch/x86/kernel/dumpstack_32.c 2011-04-17 15:56:46.000000000 -0400
12405@@ -53,16 +53,12 @@ void dump_trace(struct task_struct *task
12406 #endif
12407
12408 for (;;) {
12409- struct thread_info *context;
12410+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12411+ bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12412
12413- context = (struct thread_info *)
12414- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
12415- bp = print_context_stack(context, stack, bp, ops,
12416- data, NULL, &graph);
12417-
12418- stack = (unsigned long *)context->previous_esp;
12419- if (!stack)
12420+ if (stack_start == task_stack_page(task))
12421 break;
12422+ stack = *(unsigned long **)stack_start;
12423 if (ops->stack(data, "IRQ") < 0)
12424 break;
12425 touch_nmi_watchdog();
12426@@ -112,11 +108,12 @@ void show_registers(struct pt_regs *regs
12427 * When in-kernel, we also print out the stack and code at the
12428 * time of the fault..
12429 */
12430- if (!user_mode_vm(regs)) {
12431+ if (!user_mode(regs)) {
12432 unsigned int code_prologue = code_bytes * 43 / 64;
12433 unsigned int code_len = code_bytes;
12434 unsigned char c;
12435 u8 *ip;
12436+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
12437
12438 printk(KERN_EMERG "Stack:\n");
12439 show_stack_log_lvl(NULL, regs, &regs->sp,
12440@@ -124,10 +121,10 @@ void show_registers(struct pt_regs *regs
12441
12442 printk(KERN_EMERG "Code: ");
12443
12444- ip = (u8 *)regs->ip - code_prologue;
12445+ ip = (u8 *)regs->ip - code_prologue + cs_base;
12446 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
12447 /* try starting at IP */
12448- ip = (u8 *)regs->ip;
12449+ ip = (u8 *)regs->ip + cs_base;
12450 code_len = code_len - code_prologue + 1;
12451 }
12452 for (i = 0; i < code_len; i++, ip++) {
12453@@ -136,7 +133,7 @@ void show_registers(struct pt_regs *regs
12454 printk(" Bad EIP value.");
12455 break;
12456 }
12457- if (ip == (u8 *)regs->ip)
12458+ if (ip == (u8 *)regs->ip + cs_base)
12459 printk("<%02x> ", c);
12460 else
12461 printk("%02x ", c);
12462@@ -149,6 +146,7 @@ int is_valid_bugaddr(unsigned long ip)
12463 {
12464 unsigned short ud2;
12465
12466+ ip = ktla_ktva(ip);
12467 if (ip < PAGE_OFFSET)
12468 return 0;
12469 if (probe_kernel_address((unsigned short *)ip, ud2))
12470diff -urNp linux-2.6.32.42/arch/x86/kernel/dumpstack_64.c linux-2.6.32.42/arch/x86/kernel/dumpstack_64.c
12471--- linux-2.6.32.42/arch/x86/kernel/dumpstack_64.c 2011-03-27 14:31:47.000000000 -0400
12472+++ linux-2.6.32.42/arch/x86/kernel/dumpstack_64.c 2011-04-17 15:56:46.000000000 -0400
12473@@ -116,8 +116,8 @@ void dump_trace(struct task_struct *task
12474 unsigned long *irq_stack_end =
12475 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
12476 unsigned used = 0;
12477- struct thread_info *tinfo;
12478 int graph = 0;
12479+ void *stack_start;
12480
12481 if (!task)
12482 task = current;
12483@@ -146,10 +146,10 @@ void dump_trace(struct task_struct *task
12484 * current stack address. If the stacks consist of nested
12485 * exceptions
12486 */
12487- tinfo = task_thread_info(task);
12488 for (;;) {
12489 char *id;
12490 unsigned long *estack_end;
12491+
12492 estack_end = in_exception_stack(cpu, (unsigned long)stack,
12493 &used, &id);
12494
12495@@ -157,7 +157,7 @@ void dump_trace(struct task_struct *task
12496 if (ops->stack(data, id) < 0)
12497 break;
12498
12499- bp = print_context_stack(tinfo, stack, bp, ops,
12500+ bp = print_context_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
12501 data, estack_end, &graph);
12502 ops->stack(data, "<EOE>");
12503 /*
12504@@ -176,7 +176,7 @@ void dump_trace(struct task_struct *task
12505 if (stack >= irq_stack && stack < irq_stack_end) {
12506 if (ops->stack(data, "IRQ") < 0)
12507 break;
12508- bp = print_context_stack(tinfo, stack, bp,
12509+ bp = print_context_stack(task, irq_stack, stack, bp,
12510 ops, data, irq_stack_end, &graph);
12511 /*
12512 * We link to the next stack (which would be
12513@@ -195,7 +195,8 @@ void dump_trace(struct task_struct *task
12514 /*
12515 * This handles the process stack:
12516 */
12517- bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph);
12518+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12519+ bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12520 put_cpu();
12521 }
12522 EXPORT_SYMBOL(dump_trace);
12523diff -urNp linux-2.6.32.42/arch/x86/kernel/dumpstack.c linux-2.6.32.42/arch/x86/kernel/dumpstack.c
12524--- linux-2.6.32.42/arch/x86/kernel/dumpstack.c 2011-03-27 14:31:47.000000000 -0400
12525+++ linux-2.6.32.42/arch/x86/kernel/dumpstack.c 2011-04-17 15:56:46.000000000 -0400
12526@@ -2,6 +2,9 @@
12527 * Copyright (C) 1991, 1992 Linus Torvalds
12528 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
12529 */
12530+#ifdef CONFIG_GRKERNSEC_HIDESYM
12531+#define __INCLUDED_BY_HIDESYM 1
12532+#endif
12533 #include <linux/kallsyms.h>
12534 #include <linux/kprobes.h>
12535 #include <linux/uaccess.h>
12536@@ -28,7 +31,7 @@ static int die_counter;
12537
12538 void printk_address(unsigned long address, int reliable)
12539 {
12540- printk(" [<%p>] %s%pS\n", (void *) address,
12541+ printk(" [<%p>] %s%pA\n", (void *) address,
12542 reliable ? "" : "? ", (void *) address);
12543 }
12544
12545@@ -36,9 +39,8 @@ void printk_address(unsigned long addres
12546 static void
12547 print_ftrace_graph_addr(unsigned long addr, void *data,
12548 const struct stacktrace_ops *ops,
12549- struct thread_info *tinfo, int *graph)
12550+ struct task_struct *task, int *graph)
12551 {
12552- struct task_struct *task = tinfo->task;
12553 unsigned long ret_addr;
12554 int index = task->curr_ret_stack;
12555
12556@@ -59,7 +61,7 @@ print_ftrace_graph_addr(unsigned long ad
12557 static inline void
12558 print_ftrace_graph_addr(unsigned long addr, void *data,
12559 const struct stacktrace_ops *ops,
12560- struct thread_info *tinfo, int *graph)
12561+ struct task_struct *task, int *graph)
12562 { }
12563 #endif
12564
12565@@ -70,10 +72,8 @@ print_ftrace_graph_addr(unsigned long ad
12566 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
12567 */
12568
12569-static inline int valid_stack_ptr(struct thread_info *tinfo,
12570- void *p, unsigned int size, void *end)
12571+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
12572 {
12573- void *t = tinfo;
12574 if (end) {
12575 if (p < end && p >= (end-THREAD_SIZE))
12576 return 1;
12577@@ -84,14 +84,14 @@ static inline int valid_stack_ptr(struct
12578 }
12579
12580 unsigned long
12581-print_context_stack(struct thread_info *tinfo,
12582+print_context_stack(struct task_struct *task, void *stack_start,
12583 unsigned long *stack, unsigned long bp,
12584 const struct stacktrace_ops *ops, void *data,
12585 unsigned long *end, int *graph)
12586 {
12587 struct stack_frame *frame = (struct stack_frame *)bp;
12588
12589- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
12590+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
12591 unsigned long addr;
12592
12593 addr = *stack;
12594@@ -103,7 +103,7 @@ print_context_stack(struct thread_info *
12595 } else {
12596 ops->address(data, addr, 0);
12597 }
12598- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12599+ print_ftrace_graph_addr(addr, data, ops, task, graph);
12600 }
12601 stack++;
12602 }
12603@@ -180,7 +180,7 @@ void dump_stack(void)
12604 #endif
12605
12606 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
12607- current->pid, current->comm, print_tainted(),
12608+ task_pid_nr(current), current->comm, print_tainted(),
12609 init_utsname()->release,
12610 (int)strcspn(init_utsname()->version, " "),
12611 init_utsname()->version);
12612@@ -220,6 +220,8 @@ unsigned __kprobes long oops_begin(void)
12613 return flags;
12614 }
12615
12616+extern void gr_handle_kernel_exploit(void);
12617+
12618 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
12619 {
12620 if (regs && kexec_should_crash(current))
12621@@ -241,7 +243,10 @@ void __kprobes oops_end(unsigned long fl
12622 panic("Fatal exception in interrupt");
12623 if (panic_on_oops)
12624 panic("Fatal exception");
12625- do_exit(signr);
12626+
12627+ gr_handle_kernel_exploit();
12628+
12629+ do_group_exit(signr);
12630 }
12631
12632 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
12633@@ -295,7 +300,7 @@ void die(const char *str, struct pt_regs
12634 unsigned long flags = oops_begin();
12635 int sig = SIGSEGV;
12636
12637- if (!user_mode_vm(regs))
12638+ if (!user_mode(regs))
12639 report_bug(regs->ip, regs);
12640
12641 if (__die(str, regs, err))
12642diff -urNp linux-2.6.32.42/arch/x86/kernel/dumpstack.h linux-2.6.32.42/arch/x86/kernel/dumpstack.h
12643--- linux-2.6.32.42/arch/x86/kernel/dumpstack.h 2011-03-27 14:31:47.000000000 -0400
12644+++ linux-2.6.32.42/arch/x86/kernel/dumpstack.h 2011-04-23 13:25:26.000000000 -0400
12645@@ -15,7 +15,7 @@
12646 #endif
12647
12648 extern unsigned long
12649-print_context_stack(struct thread_info *tinfo,
12650+print_context_stack(struct task_struct *task, void *stack_start,
12651 unsigned long *stack, unsigned long bp,
12652 const struct stacktrace_ops *ops, void *data,
12653 unsigned long *end, int *graph);
12654diff -urNp linux-2.6.32.42/arch/x86/kernel/e820.c linux-2.6.32.42/arch/x86/kernel/e820.c
12655--- linux-2.6.32.42/arch/x86/kernel/e820.c 2011-03-27 14:31:47.000000000 -0400
12656+++ linux-2.6.32.42/arch/x86/kernel/e820.c 2011-04-17 15:56:46.000000000 -0400
12657@@ -733,7 +733,7 @@ struct early_res {
12658 };
12659 static struct early_res early_res[MAX_EARLY_RES] __initdata = {
12660 { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
12661- {}
12662+ { 0, 0, {0}, 0 }
12663 };
12664
12665 static int __init find_overlapped_early(u64 start, u64 end)
12666diff -urNp linux-2.6.32.42/arch/x86/kernel/early_printk.c linux-2.6.32.42/arch/x86/kernel/early_printk.c
12667--- linux-2.6.32.42/arch/x86/kernel/early_printk.c 2011-03-27 14:31:47.000000000 -0400
12668+++ linux-2.6.32.42/arch/x86/kernel/early_printk.c 2011-05-16 21:46:57.000000000 -0400
12669@@ -7,6 +7,7 @@
12670 #include <linux/pci_regs.h>
12671 #include <linux/pci_ids.h>
12672 #include <linux/errno.h>
12673+#include <linux/sched.h>
12674 #include <asm/io.h>
12675 #include <asm/processor.h>
12676 #include <asm/fcntl.h>
12677@@ -170,6 +171,8 @@ asmlinkage void early_printk(const char
12678 int n;
12679 va_list ap;
12680
12681+ pax_track_stack();
12682+
12683 va_start(ap, fmt);
12684 n = vscnprintf(buf, sizeof(buf), fmt, ap);
12685 early_console->write(early_console, buf, n);
12686diff -urNp linux-2.6.32.42/arch/x86/kernel/efi_32.c linux-2.6.32.42/arch/x86/kernel/efi_32.c
12687--- linux-2.6.32.42/arch/x86/kernel/efi_32.c 2011-03-27 14:31:47.000000000 -0400
12688+++ linux-2.6.32.42/arch/x86/kernel/efi_32.c 2011-04-17 15:56:46.000000000 -0400
12689@@ -38,70 +38,38 @@
12690 */
12691
12692 static unsigned long efi_rt_eflags;
12693-static pgd_t efi_bak_pg_dir_pointer[2];
12694+static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
12695
12696-void efi_call_phys_prelog(void)
12697+void __init efi_call_phys_prelog(void)
12698 {
12699- unsigned long cr4;
12700- unsigned long temp;
12701 struct desc_ptr gdt_descr;
12702
12703 local_irq_save(efi_rt_eflags);
12704
12705- /*
12706- * If I don't have PAE, I should just duplicate two entries in page
12707- * directory. If I have PAE, I just need to duplicate one entry in
12708- * page directory.
12709- */
12710- cr4 = read_cr4_safe();
12711
12712- if (cr4 & X86_CR4_PAE) {
12713- efi_bak_pg_dir_pointer[0].pgd =
12714- swapper_pg_dir[pgd_index(0)].pgd;
12715- swapper_pg_dir[0].pgd =
12716- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
12717- } else {
12718- efi_bak_pg_dir_pointer[0].pgd =
12719- swapper_pg_dir[pgd_index(0)].pgd;
12720- efi_bak_pg_dir_pointer[1].pgd =
12721- swapper_pg_dir[pgd_index(0x400000)].pgd;
12722- swapper_pg_dir[pgd_index(0)].pgd =
12723- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
12724- temp = PAGE_OFFSET + 0x400000;
12725- swapper_pg_dir[pgd_index(0x400000)].pgd =
12726- swapper_pg_dir[pgd_index(temp)].pgd;
12727- }
12728+ clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
12729+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
12730+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
12731
12732 /*
12733 * After the lock is released, the original page table is restored.
12734 */
12735 __flush_tlb_all();
12736
12737- gdt_descr.address = __pa(get_cpu_gdt_table(0));
12738+ gdt_descr.address = (struct desc_struct *)__pa(get_cpu_gdt_table(0));
12739 gdt_descr.size = GDT_SIZE - 1;
12740 load_gdt(&gdt_descr);
12741 }
12742
12743-void efi_call_phys_epilog(void)
12744+void __init efi_call_phys_epilog(void)
12745 {
12746- unsigned long cr4;
12747 struct desc_ptr gdt_descr;
12748
12749- gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
12750+ gdt_descr.address = get_cpu_gdt_table(0);
12751 gdt_descr.size = GDT_SIZE - 1;
12752 load_gdt(&gdt_descr);
12753
12754- cr4 = read_cr4_safe();
12755-
12756- if (cr4 & X86_CR4_PAE) {
12757- swapper_pg_dir[pgd_index(0)].pgd =
12758- efi_bak_pg_dir_pointer[0].pgd;
12759- } else {
12760- swapper_pg_dir[pgd_index(0)].pgd =
12761- efi_bak_pg_dir_pointer[0].pgd;
12762- swapper_pg_dir[pgd_index(0x400000)].pgd =
12763- efi_bak_pg_dir_pointer[1].pgd;
12764- }
12765+ clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
12766
12767 /*
12768 * After the lock is released, the original page table is restored.
12769diff -urNp linux-2.6.32.42/arch/x86/kernel/efi_stub_32.S linux-2.6.32.42/arch/x86/kernel/efi_stub_32.S
12770--- linux-2.6.32.42/arch/x86/kernel/efi_stub_32.S 2011-03-27 14:31:47.000000000 -0400
12771+++ linux-2.6.32.42/arch/x86/kernel/efi_stub_32.S 2011-04-17 15:56:46.000000000 -0400
12772@@ -6,6 +6,7 @@
12773 */
12774
12775 #include <linux/linkage.h>
12776+#include <linux/init.h>
12777 #include <asm/page_types.h>
12778
12779 /*
12780@@ -20,7 +21,7 @@
12781 * service functions will comply with gcc calling convention, too.
12782 */
12783
12784-.text
12785+__INIT
12786 ENTRY(efi_call_phys)
12787 /*
12788 * 0. The function can only be called in Linux kernel. So CS has been
12789@@ -36,9 +37,7 @@ ENTRY(efi_call_phys)
12790 * The mapping of lower virtual memory has been created in prelog and
12791 * epilog.
12792 */
12793- movl $1f, %edx
12794- subl $__PAGE_OFFSET, %edx
12795- jmp *%edx
12796+ jmp 1f-__PAGE_OFFSET
12797 1:
12798
12799 /*
12800@@ -47,14 +46,8 @@ ENTRY(efi_call_phys)
12801 * parameter 2, ..., param n. To make things easy, we save the return
12802 * address of efi_call_phys in a global variable.
12803 */
12804- popl %edx
12805- movl %edx, saved_return_addr
12806- /* get the function pointer into ECX*/
12807- popl %ecx
12808- movl %ecx, efi_rt_function_ptr
12809- movl $2f, %edx
12810- subl $__PAGE_OFFSET, %edx
12811- pushl %edx
12812+ popl (saved_return_addr)
12813+ popl (efi_rt_function_ptr)
12814
12815 /*
12816 * 3. Clear PG bit in %CR0.
12817@@ -73,9 +66,8 @@ ENTRY(efi_call_phys)
12818 /*
12819 * 5. Call the physical function.
12820 */
12821- jmp *%ecx
12822+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
12823
12824-2:
12825 /*
12826 * 6. After EFI runtime service returns, control will return to
12827 * following instruction. We'd better readjust stack pointer first.
12828@@ -88,35 +80,28 @@ ENTRY(efi_call_phys)
12829 movl %cr0, %edx
12830 orl $0x80000000, %edx
12831 movl %edx, %cr0
12832- jmp 1f
12833-1:
12834+
12835 /*
12836 * 8. Now restore the virtual mode from flat mode by
12837 * adding EIP with PAGE_OFFSET.
12838 */
12839- movl $1f, %edx
12840- jmp *%edx
12841+ jmp 1f+__PAGE_OFFSET
12842 1:
12843
12844 /*
12845 * 9. Balance the stack. And because EAX contain the return value,
12846 * we'd better not clobber it.
12847 */
12848- leal efi_rt_function_ptr, %edx
12849- movl (%edx), %ecx
12850- pushl %ecx
12851+ pushl (efi_rt_function_ptr)
12852
12853 /*
12854- * 10. Push the saved return address onto the stack and return.
12855+ * 10. Return to the saved return address.
12856 */
12857- leal saved_return_addr, %edx
12858- movl (%edx), %ecx
12859- pushl %ecx
12860- ret
12861+ jmpl *(saved_return_addr)
12862 ENDPROC(efi_call_phys)
12863 .previous
12864
12865-.data
12866+__INITDATA
12867 saved_return_addr:
12868 .long 0
12869 efi_rt_function_ptr:
12870diff -urNp linux-2.6.32.42/arch/x86/kernel/entry_32.S linux-2.6.32.42/arch/x86/kernel/entry_32.S
12871--- linux-2.6.32.42/arch/x86/kernel/entry_32.S 2011-03-27 14:31:47.000000000 -0400
12872+++ linux-2.6.32.42/arch/x86/kernel/entry_32.S 2011-05-22 23:02:03.000000000 -0400
12873@@ -185,13 +185,146 @@
12874 /*CFI_REL_OFFSET gs, PT_GS*/
12875 .endm
12876 .macro SET_KERNEL_GS reg
12877+
12878+#ifdef CONFIG_CC_STACKPROTECTOR
12879 movl $(__KERNEL_STACK_CANARY), \reg
12880+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
12881+ movl $(__USER_DS), \reg
12882+#else
12883+ xorl \reg, \reg
12884+#endif
12885+
12886 movl \reg, %gs
12887 .endm
12888
12889 #endif /* CONFIG_X86_32_LAZY_GS */
12890
12891-.macro SAVE_ALL
12892+.macro pax_enter_kernel
12893+#ifdef CONFIG_PAX_KERNEXEC
12894+ call pax_enter_kernel
12895+#endif
12896+.endm
12897+
12898+.macro pax_exit_kernel
12899+#ifdef CONFIG_PAX_KERNEXEC
12900+ call pax_exit_kernel
12901+#endif
12902+.endm
12903+
12904+#ifdef CONFIG_PAX_KERNEXEC
12905+ENTRY(pax_enter_kernel)
12906+#ifdef CONFIG_PARAVIRT
12907+ pushl %eax
12908+ pushl %ecx
12909+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
12910+ mov %eax, %esi
12911+#else
12912+ mov %cr0, %esi
12913+#endif
12914+ bts $16, %esi
12915+ jnc 1f
12916+ mov %cs, %esi
12917+ cmp $__KERNEL_CS, %esi
12918+ jz 3f
12919+ ljmp $__KERNEL_CS, $3f
12920+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
12921+2:
12922+#ifdef CONFIG_PARAVIRT
12923+ mov %esi, %eax
12924+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
12925+#else
12926+ mov %esi, %cr0
12927+#endif
12928+3:
12929+#ifdef CONFIG_PARAVIRT
12930+ popl %ecx
12931+ popl %eax
12932+#endif
12933+ ret
12934+ENDPROC(pax_enter_kernel)
12935+
12936+ENTRY(pax_exit_kernel)
12937+#ifdef CONFIG_PARAVIRT
12938+ pushl %eax
12939+ pushl %ecx
12940+#endif
12941+ mov %cs, %esi
12942+ cmp $__KERNEXEC_KERNEL_CS, %esi
12943+ jnz 2f
12944+#ifdef CONFIG_PARAVIRT
12945+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
12946+ mov %eax, %esi
12947+#else
12948+ mov %cr0, %esi
12949+#endif
12950+ btr $16, %esi
12951+ ljmp $__KERNEL_CS, $1f
12952+1:
12953+#ifdef CONFIG_PARAVIRT
12954+ mov %esi, %eax
12955+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
12956+#else
12957+ mov %esi, %cr0
12958+#endif
12959+2:
12960+#ifdef CONFIG_PARAVIRT
12961+ popl %ecx
12962+ popl %eax
12963+#endif
12964+ ret
12965+ENDPROC(pax_exit_kernel)
12966+#endif
12967+
12968+.macro pax_erase_kstack
12969+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12970+ call pax_erase_kstack
12971+#endif
12972+.endm
12973+
12974+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12975+/*
12976+ * ebp: thread_info
12977+ * ecx, edx: can be clobbered
12978+ */
12979+ENTRY(pax_erase_kstack)
12980+ pushl %edi
12981+ pushl %eax
12982+
12983+ mov TI_lowest_stack(%ebp), %edi
12984+ mov $-0xBEEF, %eax
12985+ std
12986+
12987+1: mov %edi, %ecx
12988+ and $THREAD_SIZE_asm - 1, %ecx
12989+ shr $2, %ecx
12990+ repne scasl
12991+ jecxz 2f
12992+
12993+ cmp $2*16, %ecx
12994+ jc 2f
12995+
12996+ mov $2*16, %ecx
12997+ repe scasl
12998+ jecxz 2f
12999+ jne 1b
13000+
13001+2: cld
13002+ mov %esp, %ecx
13003+ sub %edi, %ecx
13004+ shr $2, %ecx
13005+ rep stosl
13006+
13007+ mov TI_task_thread_sp0(%ebp), %edi
13008+ sub $128, %edi
13009+ mov %edi, TI_lowest_stack(%ebp)
13010+
13011+ popl %eax
13012+ popl %edi
13013+ ret
13014+ENDPROC(pax_erase_kstack)
13015+#endif
13016+
13017+.macro __SAVE_ALL _DS
13018 cld
13019 PUSH_GS
13020 pushl %fs
13021@@ -224,7 +357,7 @@
13022 pushl %ebx
13023 CFI_ADJUST_CFA_OFFSET 4
13024 CFI_REL_OFFSET ebx, 0
13025- movl $(__USER_DS), %edx
13026+ movl $\_DS, %edx
13027 movl %edx, %ds
13028 movl %edx, %es
13029 movl $(__KERNEL_PERCPU), %edx
13030@@ -232,6 +365,15 @@
13031 SET_KERNEL_GS %edx
13032 .endm
13033
13034+.macro SAVE_ALL
13035+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
13036+ __SAVE_ALL __KERNEL_DS
13037+ pax_enter_kernel
13038+#else
13039+ __SAVE_ALL __USER_DS
13040+#endif
13041+.endm
13042+
13043 .macro RESTORE_INT_REGS
13044 popl %ebx
13045 CFI_ADJUST_CFA_OFFSET -4
13046@@ -352,7 +494,15 @@ check_userspace:
13047 movb PT_CS(%esp), %al
13048 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
13049 cmpl $USER_RPL, %eax
13050+
13051+#ifdef CONFIG_PAX_KERNEXEC
13052+ jae resume_userspace
13053+
13054+ PAX_EXIT_KERNEL
13055+ jmp resume_kernel
13056+#else
13057 jb resume_kernel # not returning to v8086 or userspace
13058+#endif
13059
13060 ENTRY(resume_userspace)
13061 LOCKDEP_SYS_EXIT
13062@@ -364,7 +514,7 @@ ENTRY(resume_userspace)
13063 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
13064 # int/exception return?
13065 jne work_pending
13066- jmp restore_all
13067+ jmp restore_all_pax
13068 END(ret_from_exception)
13069
13070 #ifdef CONFIG_PREEMPT
13071@@ -414,25 +564,36 @@ sysenter_past_esp:
13072 /*CFI_REL_OFFSET cs, 0*/
13073 /*
13074 * Push current_thread_info()->sysenter_return to the stack.
13075- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
13076- * pushed above; +8 corresponds to copy_thread's esp0 setting.
13077 */
13078- pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
13079+ pushl $0
13080 CFI_ADJUST_CFA_OFFSET 4
13081 CFI_REL_OFFSET eip, 0
13082
13083 pushl %eax
13084 CFI_ADJUST_CFA_OFFSET 4
13085 SAVE_ALL
13086+ GET_THREAD_INFO(%ebp)
13087+ movl TI_sysenter_return(%ebp),%ebp
13088+ movl %ebp,PT_EIP(%esp)
13089 ENABLE_INTERRUPTS(CLBR_NONE)
13090
13091 /*
13092 * Load the potential sixth argument from user stack.
13093 * Careful about security.
13094 */
13095+ movl PT_OLDESP(%esp),%ebp
13096+
13097+#ifdef CONFIG_PAX_MEMORY_UDEREF
13098+ mov PT_OLDSS(%esp),%ds
13099+1: movl %ds:(%ebp),%ebp
13100+ push %ss
13101+ pop %ds
13102+#else
13103 cmpl $__PAGE_OFFSET-3,%ebp
13104 jae syscall_fault
13105 1: movl (%ebp),%ebp
13106+#endif
13107+
13108 movl %ebp,PT_EBP(%esp)
13109 .section __ex_table,"a"
13110 .align 4
13111@@ -455,12 +616,23 @@ sysenter_do_call:
13112 testl $_TIF_ALLWORK_MASK, %ecx
13113 jne sysexit_audit
13114 sysenter_exit:
13115+
13116+#ifdef CONFIG_PAX_RANDKSTACK
13117+ pushl_cfi %eax
13118+ call pax_randomize_kstack
13119+ popl_cfi %eax
13120+#endif
13121+
13122+ pax_erase_kstack
13123+
13124 /* if something modifies registers it must also disable sysexit */
13125 movl PT_EIP(%esp), %edx
13126 movl PT_OLDESP(%esp), %ecx
13127 xorl %ebp,%ebp
13128 TRACE_IRQS_ON
13129 1: mov PT_FS(%esp), %fs
13130+2: mov PT_DS(%esp), %ds
13131+3: mov PT_ES(%esp), %es
13132 PTGS_TO_GS
13133 ENABLE_INTERRUPTS_SYSEXIT
13134
13135@@ -477,6 +649,9 @@ sysenter_audit:
13136 movl %eax,%edx /* 2nd arg: syscall number */
13137 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
13138 call audit_syscall_entry
13139+
13140+ pax_erase_kstack
13141+
13142 pushl %ebx
13143 CFI_ADJUST_CFA_OFFSET 4
13144 movl PT_EAX(%esp),%eax /* reload syscall number */
13145@@ -504,11 +679,17 @@ sysexit_audit:
13146
13147 CFI_ENDPROC
13148 .pushsection .fixup,"ax"
13149-2: movl $0,PT_FS(%esp)
13150+4: movl $0,PT_FS(%esp)
13151+ jmp 1b
13152+5: movl $0,PT_DS(%esp)
13153+ jmp 1b
13154+6: movl $0,PT_ES(%esp)
13155 jmp 1b
13156 .section __ex_table,"a"
13157 .align 4
13158- .long 1b,2b
13159+ .long 1b,4b
13160+ .long 2b,5b
13161+ .long 3b,6b
13162 .popsection
13163 PTGS_TO_GS_EX
13164 ENDPROC(ia32_sysenter_target)
13165@@ -538,6 +719,14 @@ syscall_exit:
13166 testl $_TIF_ALLWORK_MASK, %ecx # current->work
13167 jne syscall_exit_work
13168
13169+restore_all_pax:
13170+
13171+#ifdef CONFIG_PAX_RANDKSTACK
13172+ call pax_randomize_kstack
13173+#endif
13174+
13175+ pax_erase_kstack
13176+
13177 restore_all:
13178 TRACE_IRQS_IRET
13179 restore_all_notrace:
13180@@ -602,7 +791,13 @@ ldt_ss:
13181 mov PT_OLDESP(%esp), %eax /* load userspace esp */
13182 mov %dx, %ax /* eax: new kernel esp */
13183 sub %eax, %edx /* offset (low word is 0) */
13184- PER_CPU(gdt_page, %ebx)
13185+#ifdef CONFIG_SMP
13186+ movl PER_CPU_VAR(cpu_number), %ebx
13187+ shll $PAGE_SHIFT_asm, %ebx
13188+ addl $cpu_gdt_table, %ebx
13189+#else
13190+ movl $cpu_gdt_table, %ebx
13191+#endif
13192 shr $16, %edx
13193 mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
13194 mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
13195@@ -636,31 +831,25 @@ work_resched:
13196 movl TI_flags(%ebp), %ecx
13197 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
13198 # than syscall tracing?
13199- jz restore_all
13200+ jz restore_all_pax
13201 testb $_TIF_NEED_RESCHED, %cl
13202 jnz work_resched
13203
13204 work_notifysig: # deal with pending signals and
13205 # notify-resume requests
13206+ movl %esp, %eax
13207 #ifdef CONFIG_VM86
13208 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
13209- movl %esp, %eax
13210- jne work_notifysig_v86 # returning to kernel-space or
13211+ jz 1f # returning to kernel-space or
13212 # vm86-space
13213- xorl %edx, %edx
13214- call do_notify_resume
13215- jmp resume_userspace_sig
13216
13217- ALIGN
13218-work_notifysig_v86:
13219 pushl %ecx # save ti_flags for do_notify_resume
13220 CFI_ADJUST_CFA_OFFSET 4
13221 call save_v86_state # %eax contains pt_regs pointer
13222 popl %ecx
13223 CFI_ADJUST_CFA_OFFSET -4
13224 movl %eax, %esp
13225-#else
13226- movl %esp, %eax
13227+1:
13228 #endif
13229 xorl %edx, %edx
13230 call do_notify_resume
13231@@ -673,6 +862,9 @@ syscall_trace_entry:
13232 movl $-ENOSYS,PT_EAX(%esp)
13233 movl %esp, %eax
13234 call syscall_trace_enter
13235+
13236+ pax_erase_kstack
13237+
13238 /* What it returned is what we'll actually use. */
13239 cmpl $(nr_syscalls), %eax
13240 jnae syscall_call
13241@@ -695,6 +887,10 @@ END(syscall_exit_work)
13242
13243 RING0_INT_FRAME # can't unwind into user space anyway
13244 syscall_fault:
13245+#ifdef CONFIG_PAX_MEMORY_UDEREF
13246+ push %ss
13247+ pop %ds
13248+#endif
13249 GET_THREAD_INFO(%ebp)
13250 movl $-EFAULT,PT_EAX(%esp)
13251 jmp resume_userspace
13252@@ -726,6 +922,33 @@ PTREGSCALL(rt_sigreturn)
13253 PTREGSCALL(vm86)
13254 PTREGSCALL(vm86old)
13255
13256+ ALIGN;
13257+ENTRY(kernel_execve)
13258+ push %ebp
13259+ sub $PT_OLDSS+4,%esp
13260+ push %edi
13261+ push %ecx
13262+ push %eax
13263+ lea 3*4(%esp),%edi
13264+ mov $PT_OLDSS/4+1,%ecx
13265+ xorl %eax,%eax
13266+ rep stosl
13267+ pop %eax
13268+ pop %ecx
13269+ pop %edi
13270+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
13271+ mov %eax,PT_EBX(%esp)
13272+ mov %edx,PT_ECX(%esp)
13273+ mov %ecx,PT_EDX(%esp)
13274+ mov %esp,%eax
13275+ call sys_execve
13276+ GET_THREAD_INFO(%ebp)
13277+ test %eax,%eax
13278+ jz syscall_exit
13279+ add $PT_OLDSS+4,%esp
13280+ pop %ebp
13281+ ret
13282+
13283 .macro FIXUP_ESPFIX_STACK
13284 /*
13285 * Switch back for ESPFIX stack to the normal zerobased stack
13286@@ -735,7 +958,13 @@ PTREGSCALL(vm86old)
13287 * normal stack and adjusts ESP with the matching offset.
13288 */
13289 /* fixup the stack */
13290- PER_CPU(gdt_page, %ebx)
13291+#ifdef CONFIG_SMP
13292+ movl PER_CPU_VAR(cpu_number), %ebx
13293+ shll $PAGE_SHIFT_asm, %ebx
13294+ addl $cpu_gdt_table, %ebx
13295+#else
13296+ movl $cpu_gdt_table, %ebx
13297+#endif
13298 mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
13299 mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
13300 shl $16, %eax
13301@@ -1198,7 +1427,6 @@ return_to_handler:
13302 ret
13303 #endif
13304
13305-.section .rodata,"a"
13306 #include "syscall_table_32.S"
13307
13308 syscall_table_size=(.-sys_call_table)
13309@@ -1255,9 +1483,12 @@ error_code:
13310 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
13311 REG_TO_PTGS %ecx
13312 SET_KERNEL_GS %ecx
13313- movl $(__USER_DS), %ecx
13314+ movl $(__KERNEL_DS), %ecx
13315 movl %ecx, %ds
13316 movl %ecx, %es
13317+
13318+ pax_enter_kernel
13319+
13320 TRACE_IRQS_OFF
13321 movl %esp,%eax # pt_regs pointer
13322 call *%edi
13323@@ -1351,6 +1582,9 @@ nmi_stack_correct:
13324 xorl %edx,%edx # zero error code
13325 movl %esp,%eax # pt_regs pointer
13326 call do_nmi
13327+
13328+ pax_exit_kernel
13329+
13330 jmp restore_all_notrace
13331 CFI_ENDPROC
13332
13333@@ -1391,6 +1625,9 @@ nmi_espfix_stack:
13334 FIXUP_ESPFIX_STACK # %eax == %esp
13335 xorl %edx,%edx # zero error code
13336 call do_nmi
13337+
13338+ pax_exit_kernel
13339+
13340 RESTORE_REGS
13341 lss 12+4(%esp), %esp # back to espfix stack
13342 CFI_ADJUST_CFA_OFFSET -24
13343diff -urNp linux-2.6.32.42/arch/x86/kernel/entry_64.S linux-2.6.32.42/arch/x86/kernel/entry_64.S
13344--- linux-2.6.32.42/arch/x86/kernel/entry_64.S 2011-03-27 14:31:47.000000000 -0400
13345+++ linux-2.6.32.42/arch/x86/kernel/entry_64.S 2011-06-04 20:30:53.000000000 -0400
13346@@ -53,6 +53,7 @@
13347 #include <asm/paravirt.h>
13348 #include <asm/ftrace.h>
13349 #include <asm/percpu.h>
13350+#include <asm/pgtable.h>
13351
13352 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
13353 #include <linux/elf-em.h>
13354@@ -174,6 +175,257 @@ ENTRY(native_usergs_sysret64)
13355 ENDPROC(native_usergs_sysret64)
13356 #endif /* CONFIG_PARAVIRT */
13357
13358+ .macro ljmpq sel, off
13359+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
13360+ .byte 0x48; ljmp *1234f(%rip)
13361+ .pushsection .rodata
13362+ .align 16
13363+ 1234: .quad \off; .word \sel
13364+ .popsection
13365+#else
13366+ pushq $\sel
13367+ pushq $\off
13368+ lretq
13369+#endif
13370+ .endm
13371+
13372+ .macro pax_enter_kernel
13373+#ifdef CONFIG_PAX_KERNEXEC
13374+ call pax_enter_kernel
13375+#endif
13376+ .endm
13377+
13378+ .macro pax_exit_kernel
13379+#ifdef CONFIG_PAX_KERNEXEC
13380+ call pax_exit_kernel
13381+#endif
13382+ .endm
13383+
13384+#ifdef CONFIG_PAX_KERNEXEC
13385+ENTRY(pax_enter_kernel)
13386+ pushq %rdi
13387+
13388+#ifdef CONFIG_PARAVIRT
13389+ PV_SAVE_REGS(CLBR_RDI)
13390+#endif
13391+
13392+ GET_CR0_INTO_RDI
13393+ bts $16,%rdi
13394+ jnc 1f
13395+ mov %cs,%edi
13396+ cmp $__KERNEL_CS,%edi
13397+ jz 3f
13398+ ljmpq __KERNEL_CS,3f
13399+1: ljmpq __KERNEXEC_KERNEL_CS,2f
13400+2: SET_RDI_INTO_CR0
13401+3:
13402+
13403+#ifdef CONFIG_PARAVIRT
13404+ PV_RESTORE_REGS(CLBR_RDI)
13405+#endif
13406+
13407+ popq %rdi
13408+ retq
13409+ENDPROC(pax_enter_kernel)
13410+
13411+ENTRY(pax_exit_kernel)
13412+ pushq %rdi
13413+
13414+#ifdef CONFIG_PARAVIRT
13415+ PV_SAVE_REGS(CLBR_RDI)
13416+#endif
13417+
13418+ mov %cs,%rdi
13419+ cmp $__KERNEXEC_KERNEL_CS,%edi
13420+ jnz 2f
13421+ GET_CR0_INTO_RDI
13422+ btr $16,%rdi
13423+ ljmpq __KERNEL_CS,1f
13424+1: SET_RDI_INTO_CR0
13425+2:
13426+
13427+#ifdef CONFIG_PARAVIRT
13428+ PV_RESTORE_REGS(CLBR_RDI);
13429+#endif
13430+
13431+ popq %rdi
13432+ retq
13433+ENDPROC(pax_exit_kernel)
13434+#endif
13435+
13436+ .macro pax_enter_kernel_user
13437+#ifdef CONFIG_PAX_MEMORY_UDEREF
13438+ call pax_enter_kernel_user
13439+#endif
13440+ .endm
13441+
13442+ .macro pax_exit_kernel_user
13443+#ifdef CONFIG_PAX_MEMORY_UDEREF
13444+ call pax_exit_kernel_user
13445+#endif
13446+#ifdef CONFIG_PAX_RANDKSTACK
13447+ push %rax
13448+ call pax_randomize_kstack
13449+ pop %rax
13450+#endif
13451+ pax_erase_kstack
13452+ .endm
13453+
13454+#ifdef CONFIG_PAX_MEMORY_UDEREF
13455+ENTRY(pax_enter_kernel_user)
13456+ pushq %rdi
13457+ pushq %rbx
13458+
13459+#ifdef CONFIG_PARAVIRT
13460+ PV_SAVE_REGS(CLBR_RDI)
13461+#endif
13462+
13463+ GET_CR3_INTO_RDI
13464+ mov %rdi,%rbx
13465+ add $__START_KERNEL_map,%rbx
13466+ sub phys_base(%rip),%rbx
13467+
13468+#ifdef CONFIG_PARAVIRT
13469+ pushq %rdi
13470+ cmpl $0, pv_info+PARAVIRT_enabled
13471+ jz 1f
13472+ i = 0
13473+ .rept USER_PGD_PTRS
13474+ mov i*8(%rbx),%rsi
13475+ mov $0,%sil
13476+ lea i*8(%rbx),%rdi
13477+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
13478+ i = i + 1
13479+ .endr
13480+ jmp 2f
13481+1:
13482+#endif
13483+
13484+ i = 0
13485+ .rept USER_PGD_PTRS
13486+ movb $0,i*8(%rbx)
13487+ i = i + 1
13488+ .endr
13489+
13490+#ifdef CONFIG_PARAVIRT
13491+2: popq %rdi
13492+#endif
13493+ SET_RDI_INTO_CR3
13494+
13495+#ifdef CONFIG_PAX_KERNEXEC
13496+ GET_CR0_INTO_RDI
13497+ bts $16,%rdi
13498+ SET_RDI_INTO_CR0
13499+#endif
13500+
13501+#ifdef CONFIG_PARAVIRT
13502+ PV_RESTORE_REGS(CLBR_RDI)
13503+#endif
13504+
13505+ popq %rbx
13506+ popq %rdi
13507+ retq
13508+ENDPROC(pax_enter_kernel_user)
13509+
13510+ENTRY(pax_exit_kernel_user)
13511+ push %rdi
13512+
13513+#ifdef CONFIG_PARAVIRT
13514+ pushq %rbx
13515+ PV_SAVE_REGS(CLBR_RDI)
13516+#endif
13517+
13518+#ifdef CONFIG_PAX_KERNEXEC
13519+ GET_CR0_INTO_RDI
13520+ btr $16,%rdi
13521+ SET_RDI_INTO_CR0
13522+#endif
13523+
13524+ GET_CR3_INTO_RDI
13525+ add $__START_KERNEL_map,%rdi
13526+ sub phys_base(%rip),%rdi
13527+
13528+#ifdef CONFIG_PARAVIRT
13529+ cmpl $0, pv_info+PARAVIRT_enabled
13530+ jz 1f
13531+ mov %rdi,%rbx
13532+ i = 0
13533+ .rept USER_PGD_PTRS
13534+ mov i*8(%rbx),%rsi
13535+ mov $0x67,%sil
13536+ lea i*8(%rbx),%rdi
13537+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
13538+ i = i + 1
13539+ .endr
13540+ jmp 2f
13541+1:
13542+#endif
13543+
13544+ i = 0
13545+ .rept USER_PGD_PTRS
13546+ movb $0x67,i*8(%rdi)
13547+ i = i + 1
13548+ .endr
13549+
13550+#ifdef CONFIG_PARAVIRT
13551+2: PV_RESTORE_REGS(CLBR_RDI)
13552+ popq %rbx
13553+#endif
13554+
13555+ popq %rdi
13556+ retq
13557+ENDPROC(pax_exit_kernel_user)
13558+#endif
13559+
13560+.macro pax_erase_kstack
13561+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13562+ call pax_erase_kstack
13563+#endif
13564+.endm
13565+
13566+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13567+/*
13568+ * r10: thread_info
13569+ * rcx, rdx: can be clobbered
13570+ */
13571+ENTRY(pax_erase_kstack)
13572+ pushq %rdi
13573+ pushq %rax
13574+
13575+ GET_THREAD_INFO(%r10)
13576+ mov TI_lowest_stack(%r10), %rdi
13577+ mov $-0xBEEF, %rax
13578+ std
13579+
13580+1: mov %edi, %ecx
13581+ and $THREAD_SIZE_asm - 1, %ecx
13582+ shr $3, %ecx
13583+ repne scasq
13584+ jecxz 2f
13585+
13586+ cmp $2*8, %ecx
13587+ jc 2f
13588+
13589+ mov $2*8, %ecx
13590+ repe scasq
13591+ jecxz 2f
13592+ jne 1b
13593+
13594+2: cld
13595+ mov %esp, %ecx
13596+ sub %edi, %ecx
13597+ shr $3, %ecx
13598+ rep stosq
13599+
13600+ mov TI_task_thread_sp0(%r10), %rdi
13601+ sub $256, %rdi
13602+ mov %rdi, TI_lowest_stack(%r10)
13603+
13604+ popq %rax
13605+ popq %rdi
13606+ ret
13607+ENDPROC(pax_erase_kstack)
13608+#endif
13609
13610 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
13611 #ifdef CONFIG_TRACE_IRQFLAGS
13612@@ -317,7 +569,7 @@ ENTRY(save_args)
13613 leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
13614 movq_cfi rbp, 8 /* push %rbp */
13615 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
13616- testl $3, CS(%rdi)
13617+ testb $3, CS(%rdi)
13618 je 1f
13619 SWAPGS
13620 /*
13621@@ -409,7 +661,7 @@ ENTRY(ret_from_fork)
13622
13623 RESTORE_REST
13624
13625- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13626+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13627 je int_ret_from_sys_call
13628
13629 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
13630@@ -455,7 +707,7 @@ END(ret_from_fork)
13631 ENTRY(system_call)
13632 CFI_STARTPROC simple
13633 CFI_SIGNAL_FRAME
13634- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
13635+ CFI_DEF_CFA rsp,0
13636 CFI_REGISTER rip,rcx
13637 /*CFI_REGISTER rflags,r11*/
13638 SWAPGS_UNSAFE_STACK
13639@@ -468,12 +720,13 @@ ENTRY(system_call_after_swapgs)
13640
13641 movq %rsp,PER_CPU_VAR(old_rsp)
13642 movq PER_CPU_VAR(kernel_stack),%rsp
13643+ pax_enter_kernel_user
13644 /*
13645 * No need to follow this irqs off/on section - it's straight
13646 * and short:
13647 */
13648 ENABLE_INTERRUPTS(CLBR_NONE)
13649- SAVE_ARGS 8,1
13650+ SAVE_ARGS 8*6,1
13651 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
13652 movq %rcx,RIP-ARGOFFSET(%rsp)
13653 CFI_REL_OFFSET rip,RIP-ARGOFFSET
13654@@ -502,6 +755,7 @@ sysret_check:
13655 andl %edi,%edx
13656 jnz sysret_careful
13657 CFI_REMEMBER_STATE
13658+ pax_exit_kernel_user
13659 /*
13660 * sysretq will re-enable interrupts:
13661 */
13662@@ -562,6 +816,9 @@ auditsys:
13663 movq %rax,%rsi /* 2nd arg: syscall number */
13664 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
13665 call audit_syscall_entry
13666+
13667+ pax_erase_kstack
13668+
13669 LOAD_ARGS 0 /* reload call-clobbered registers */
13670 jmp system_call_fastpath
13671
13672@@ -592,6 +849,9 @@ tracesys:
13673 FIXUP_TOP_OF_STACK %rdi
13674 movq %rsp,%rdi
13675 call syscall_trace_enter
13676+
13677+ pax_erase_kstack
13678+
13679 /*
13680 * Reload arg registers from stack in case ptrace changed them.
13681 * We don't reload %rax because syscall_trace_enter() returned
13682@@ -613,7 +873,7 @@ tracesys:
13683 GLOBAL(int_ret_from_sys_call)
13684 DISABLE_INTERRUPTS(CLBR_NONE)
13685 TRACE_IRQS_OFF
13686- testl $3,CS-ARGOFFSET(%rsp)
13687+ testb $3,CS-ARGOFFSET(%rsp)
13688 je retint_restore_args
13689 movl $_TIF_ALLWORK_MASK,%edi
13690 /* edi: mask to check */
13691@@ -800,6 +1060,16 @@ END(interrupt)
13692 CFI_ADJUST_CFA_OFFSET 10*8
13693 call save_args
13694 PARTIAL_FRAME 0
13695+#ifdef CONFIG_PAX_MEMORY_UDEREF
13696+ testb $3, CS(%rdi)
13697+ jnz 1f
13698+ pax_enter_kernel
13699+ jmp 2f
13700+1: pax_enter_kernel_user
13701+2:
13702+#else
13703+ pax_enter_kernel
13704+#endif
13705 call \func
13706 .endm
13707
13708@@ -822,7 +1092,7 @@ ret_from_intr:
13709 CFI_ADJUST_CFA_OFFSET -8
13710 exit_intr:
13711 GET_THREAD_INFO(%rcx)
13712- testl $3,CS-ARGOFFSET(%rsp)
13713+ testb $3,CS-ARGOFFSET(%rsp)
13714 je retint_kernel
13715
13716 /* Interrupt came from user space */
13717@@ -844,12 +1114,14 @@ retint_swapgs: /* return to user-space
13718 * The iretq could re-enable interrupts:
13719 */
13720 DISABLE_INTERRUPTS(CLBR_ANY)
13721+ pax_exit_kernel_user
13722 TRACE_IRQS_IRETQ
13723 SWAPGS
13724 jmp restore_args
13725
13726 retint_restore_args: /* return to kernel space */
13727 DISABLE_INTERRUPTS(CLBR_ANY)
13728+ pax_exit_kernel
13729 /*
13730 * The iretq could re-enable interrupts:
13731 */
13732@@ -1032,6 +1304,16 @@ ENTRY(\sym)
13733 CFI_ADJUST_CFA_OFFSET 15*8
13734 call error_entry
13735 DEFAULT_FRAME 0
13736+#ifdef CONFIG_PAX_MEMORY_UDEREF
13737+ testb $3, CS(%rsp)
13738+ jnz 1f
13739+ pax_enter_kernel
13740+ jmp 2f
13741+1: pax_enter_kernel_user
13742+2:
13743+#else
13744+ pax_enter_kernel
13745+#endif
13746 movq %rsp,%rdi /* pt_regs pointer */
13747 xorl %esi,%esi /* no error code */
13748 call \do_sym
13749@@ -1049,6 +1331,16 @@ ENTRY(\sym)
13750 subq $15*8, %rsp
13751 call save_paranoid
13752 TRACE_IRQS_OFF
13753+#ifdef CONFIG_PAX_MEMORY_UDEREF
13754+ testb $3, CS(%rsp)
13755+ jnz 1f
13756+ pax_enter_kernel
13757+ jmp 2f
13758+1: pax_enter_kernel_user
13759+2:
13760+#else
13761+ pax_enter_kernel
13762+#endif
13763 movq %rsp,%rdi /* pt_regs pointer */
13764 xorl %esi,%esi /* no error code */
13765 call \do_sym
13766@@ -1066,9 +1358,24 @@ ENTRY(\sym)
13767 subq $15*8, %rsp
13768 call save_paranoid
13769 TRACE_IRQS_OFF
13770+#ifdef CONFIG_PAX_MEMORY_UDEREF
13771+ testb $3, CS(%rsp)
13772+ jnz 1f
13773+ pax_enter_kernel
13774+ jmp 2f
13775+1: pax_enter_kernel_user
13776+2:
13777+#else
13778+ pax_enter_kernel
13779+#endif
13780 movq %rsp,%rdi /* pt_regs pointer */
13781 xorl %esi,%esi /* no error code */
13782- PER_CPU(init_tss, %rbp)
13783+#ifdef CONFIG_SMP
13784+ imul $TSS_size, PER_CPU_VAR(cpu_number), %ebp
13785+ lea init_tss(%rbp), %rbp
13786+#else
13787+ lea init_tss(%rip), %rbp
13788+#endif
13789 subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
13790 call \do_sym
13791 addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
13792@@ -1085,6 +1392,16 @@ ENTRY(\sym)
13793 CFI_ADJUST_CFA_OFFSET 15*8
13794 call error_entry
13795 DEFAULT_FRAME 0
13796+#ifdef CONFIG_PAX_MEMORY_UDEREF
13797+ testb $3, CS(%rsp)
13798+ jnz 1f
13799+ pax_enter_kernel
13800+ jmp 2f
13801+1: pax_enter_kernel_user
13802+2:
13803+#else
13804+ pax_enter_kernel
13805+#endif
13806 movq %rsp,%rdi /* pt_regs pointer */
13807 movq ORIG_RAX(%rsp),%rsi /* get error code */
13808 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
13809@@ -1104,6 +1421,16 @@ ENTRY(\sym)
13810 call save_paranoid
13811 DEFAULT_FRAME 0
13812 TRACE_IRQS_OFF
13813+#ifdef CONFIG_PAX_MEMORY_UDEREF
13814+ testb $3, CS(%rsp)
13815+ jnz 1f
13816+ pax_enter_kernel
13817+ jmp 2f
13818+1: pax_enter_kernel_user
13819+2:
13820+#else
13821+ pax_enter_kernel
13822+#endif
13823 movq %rsp,%rdi /* pt_regs pointer */
13824 movq ORIG_RAX(%rsp),%rsi /* get error code */
13825 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
13826@@ -1405,14 +1732,27 @@ ENTRY(paranoid_exit)
13827 TRACE_IRQS_OFF
13828 testl %ebx,%ebx /* swapgs needed? */
13829 jnz paranoid_restore
13830- testl $3,CS(%rsp)
13831+ testb $3,CS(%rsp)
13832 jnz paranoid_userspace
13833+#ifdef CONFIG_PAX_MEMORY_UDEREF
13834+ pax_exit_kernel
13835+ TRACE_IRQS_IRETQ 0
13836+ SWAPGS_UNSAFE_STACK
13837+ RESTORE_ALL 8
13838+ jmp irq_return
13839+#endif
13840 paranoid_swapgs:
13841+#ifdef CONFIG_PAX_MEMORY_UDEREF
13842+ pax_exit_kernel_user
13843+#else
13844+ pax_exit_kernel
13845+#endif
13846 TRACE_IRQS_IRETQ 0
13847 SWAPGS_UNSAFE_STACK
13848 RESTORE_ALL 8
13849 jmp irq_return
13850 paranoid_restore:
13851+ pax_exit_kernel
13852 TRACE_IRQS_IRETQ 0
13853 RESTORE_ALL 8
13854 jmp irq_return
13855@@ -1470,7 +1810,7 @@ ENTRY(error_entry)
13856 movq_cfi r14, R14+8
13857 movq_cfi r15, R15+8
13858 xorl %ebx,%ebx
13859- testl $3,CS+8(%rsp)
13860+ testb $3,CS+8(%rsp)
13861 je error_kernelspace
13862 error_swapgs:
13863 SWAPGS
13864@@ -1529,6 +1869,16 @@ ENTRY(nmi)
13865 CFI_ADJUST_CFA_OFFSET 15*8
13866 call save_paranoid
13867 DEFAULT_FRAME 0
13868+#ifdef CONFIG_PAX_MEMORY_UDEREF
13869+ testb $3, CS(%rsp)
13870+ jnz 1f
13871+ pax_enter_kernel
13872+ jmp 2f
13873+1: pax_enter_kernel_user
13874+2:
13875+#else
13876+ pax_enter_kernel
13877+#endif
13878 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
13879 movq %rsp,%rdi
13880 movq $-1,%rsi
13881@@ -1539,11 +1889,25 @@ ENTRY(nmi)
13882 DISABLE_INTERRUPTS(CLBR_NONE)
13883 testl %ebx,%ebx /* swapgs needed? */
13884 jnz nmi_restore
13885- testl $3,CS(%rsp)
13886+ testb $3,CS(%rsp)
13887 jnz nmi_userspace
13888+#ifdef CONFIG_PAX_MEMORY_UDEREF
13889+ pax_exit_kernel
13890+ SWAPGS_UNSAFE_STACK
13891+ RESTORE_ALL 8
13892+ jmp irq_return
13893+#endif
13894 nmi_swapgs:
13895+#ifdef CONFIG_PAX_MEMORY_UDEREF
13896+ pax_exit_kernel_user
13897+#else
13898+ pax_exit_kernel
13899+#endif
13900 SWAPGS_UNSAFE_STACK
13901+ RESTORE_ALL 8
13902+ jmp irq_return
13903 nmi_restore:
13904+ pax_exit_kernel
13905 RESTORE_ALL 8
13906 jmp irq_return
13907 nmi_userspace:
13908diff -urNp linux-2.6.32.42/arch/x86/kernel/ftrace.c linux-2.6.32.42/arch/x86/kernel/ftrace.c
13909--- linux-2.6.32.42/arch/x86/kernel/ftrace.c 2011-03-27 14:31:47.000000000 -0400
13910+++ linux-2.6.32.42/arch/x86/kernel/ftrace.c 2011-05-04 17:56:20.000000000 -0400
13911@@ -103,7 +103,7 @@ static void *mod_code_ip; /* holds the
13912 static void *mod_code_newcode; /* holds the text to write to the IP */
13913
13914 static unsigned nmi_wait_count;
13915-static atomic_t nmi_update_count = ATOMIC_INIT(0);
13916+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
13917
13918 int ftrace_arch_read_dyn_info(char *buf, int size)
13919 {
13920@@ -111,7 +111,7 @@ int ftrace_arch_read_dyn_info(char *buf,
13921
13922 r = snprintf(buf, size, "%u %u",
13923 nmi_wait_count,
13924- atomic_read(&nmi_update_count));
13925+ atomic_read_unchecked(&nmi_update_count));
13926 return r;
13927 }
13928
13929@@ -149,8 +149,10 @@ void ftrace_nmi_enter(void)
13930 {
13931 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
13932 smp_rmb();
13933+ pax_open_kernel();
13934 ftrace_mod_code();
13935- atomic_inc(&nmi_update_count);
13936+ pax_close_kernel();
13937+ atomic_inc_unchecked(&nmi_update_count);
13938 }
13939 /* Must have previous changes seen before executions */
13940 smp_mb();
13941@@ -215,7 +217,7 @@ do_ftrace_mod_code(unsigned long ip, voi
13942
13943
13944
13945-static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
13946+static unsigned char ftrace_nop[MCOUNT_INSN_SIZE] __read_only;
13947
13948 static unsigned char *ftrace_nop_replace(void)
13949 {
13950@@ -228,6 +230,8 @@ ftrace_modify_code(unsigned long ip, uns
13951 {
13952 unsigned char replaced[MCOUNT_INSN_SIZE];
13953
13954+ ip = ktla_ktva(ip);
13955+
13956 /*
13957 * Note: Due to modules and __init, code can
13958 * disappear and change, we need to protect against faulting
13959@@ -284,7 +288,7 @@ int ftrace_update_ftrace_func(ftrace_fun
13960 unsigned char old[MCOUNT_INSN_SIZE], *new;
13961 int ret;
13962
13963- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
13964+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
13965 new = ftrace_call_replace(ip, (unsigned long)func);
13966 ret = ftrace_modify_code(ip, old, new);
13967
13968@@ -337,15 +341,15 @@ int __init ftrace_dyn_arch_init(void *da
13969 switch (faulted) {
13970 case 0:
13971 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
13972- memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
13973+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_p6nop), MCOUNT_INSN_SIZE);
13974 break;
13975 case 1:
13976 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
13977- memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
13978+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_nop5), MCOUNT_INSN_SIZE);
13979 break;
13980 case 2:
13981 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
13982- memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
13983+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_jmp), MCOUNT_INSN_SIZE);
13984 break;
13985 }
13986
13987@@ -366,6 +370,8 @@ static int ftrace_mod_jmp(unsigned long
13988 {
13989 unsigned char code[MCOUNT_INSN_SIZE];
13990
13991+ ip = ktla_ktva(ip);
13992+
13993 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
13994 return -EFAULT;
13995
13996diff -urNp linux-2.6.32.42/arch/x86/kernel/head32.c linux-2.6.32.42/arch/x86/kernel/head32.c
13997--- linux-2.6.32.42/arch/x86/kernel/head32.c 2011-03-27 14:31:47.000000000 -0400
13998+++ linux-2.6.32.42/arch/x86/kernel/head32.c 2011-04-17 15:56:46.000000000 -0400
13999@@ -16,6 +16,7 @@
14000 #include <asm/apic.h>
14001 #include <asm/io_apic.h>
14002 #include <asm/bios_ebda.h>
14003+#include <asm/boot.h>
14004
14005 static void __init i386_default_early_setup(void)
14006 {
14007@@ -31,7 +32,7 @@ void __init i386_start_kernel(void)
14008 {
14009 reserve_trampoline_memory();
14010
14011- reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14012+ reserve_early(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14013
14014 #ifdef CONFIG_BLK_DEV_INITRD
14015 /* Reserve INITRD */
14016diff -urNp linux-2.6.32.42/arch/x86/kernel/head_32.S linux-2.6.32.42/arch/x86/kernel/head_32.S
14017--- linux-2.6.32.42/arch/x86/kernel/head_32.S 2011-03-27 14:31:47.000000000 -0400
14018+++ linux-2.6.32.42/arch/x86/kernel/head_32.S 2011-07-06 19:53:33.000000000 -0400
14019@@ -19,10 +19,17 @@
14020 #include <asm/setup.h>
14021 #include <asm/processor-flags.h>
14022 #include <asm/percpu.h>
14023+#include <asm/msr-index.h>
14024
14025 /* Physical address */
14026 #define pa(X) ((X) - __PAGE_OFFSET)
14027
14028+#ifdef CONFIG_PAX_KERNEXEC
14029+#define ta(X) (X)
14030+#else
14031+#define ta(X) ((X) - __PAGE_OFFSET)
14032+#endif
14033+
14034 /*
14035 * References to members of the new_cpu_data structure.
14036 */
14037@@ -52,11 +59,7 @@
14038 * and small than max_low_pfn, otherwise will waste some page table entries
14039 */
14040
14041-#if PTRS_PER_PMD > 1
14042-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
14043-#else
14044-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
14045-#endif
14046+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
14047
14048 /* Enough space to fit pagetables for the low memory linear map */
14049 MAPPING_BEYOND_END = \
14050@@ -73,6 +76,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
14051 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14052
14053 /*
14054+ * Real beginning of normal "text" segment
14055+ */
14056+ENTRY(stext)
14057+ENTRY(_stext)
14058+
14059+/*
14060 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
14061 * %esi points to the real-mode code as a 32-bit pointer.
14062 * CS and DS must be 4 GB flat segments, but we don't depend on
14063@@ -80,7 +89,16 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14064 * can.
14065 */
14066 __HEAD
14067+
14068+#ifdef CONFIG_PAX_KERNEXEC
14069+ jmp startup_32
14070+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
14071+.fill PAGE_SIZE-5,1,0xcc
14072+#endif
14073+
14074 ENTRY(startup_32)
14075+ movl pa(stack_start),%ecx
14076+
14077 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
14078 us to not reload segments */
14079 testb $(1<<6), BP_loadflags(%esi)
14080@@ -95,7 +113,60 @@ ENTRY(startup_32)
14081 movl %eax,%es
14082 movl %eax,%fs
14083 movl %eax,%gs
14084+ movl %eax,%ss
14085 2:
14086+ leal -__PAGE_OFFSET(%ecx),%esp
14087+
14088+#ifdef CONFIG_SMP
14089+ movl $pa(cpu_gdt_table),%edi
14090+ movl $__per_cpu_load,%eax
14091+ movw %ax,__KERNEL_PERCPU + 2(%edi)
14092+ rorl $16,%eax
14093+ movb %al,__KERNEL_PERCPU + 4(%edi)
14094+ movb %ah,__KERNEL_PERCPU + 7(%edi)
14095+ movl $__per_cpu_end - 1,%eax
14096+ subl $__per_cpu_start,%eax
14097+ movw %ax,__KERNEL_PERCPU + 0(%edi)
14098+#endif
14099+
14100+#ifdef CONFIG_PAX_MEMORY_UDEREF
14101+ movl $NR_CPUS,%ecx
14102+ movl $pa(cpu_gdt_table),%edi
14103+1:
14104+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
14105+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
14106+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
14107+ addl $PAGE_SIZE_asm,%edi
14108+ loop 1b
14109+#endif
14110+
14111+#ifdef CONFIG_PAX_KERNEXEC
14112+ movl $pa(boot_gdt),%edi
14113+ movl $__LOAD_PHYSICAL_ADDR,%eax
14114+ movw %ax,__BOOT_CS + 2(%edi)
14115+ rorl $16,%eax
14116+ movb %al,__BOOT_CS + 4(%edi)
14117+ movb %ah,__BOOT_CS + 7(%edi)
14118+ rorl $16,%eax
14119+
14120+ ljmp $(__BOOT_CS),$1f
14121+1:
14122+
14123+ movl $NR_CPUS,%ecx
14124+ movl $pa(cpu_gdt_table),%edi
14125+ addl $__PAGE_OFFSET,%eax
14126+1:
14127+ movw %ax,__KERNEL_CS + 2(%edi)
14128+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
14129+ rorl $16,%eax
14130+ movb %al,__KERNEL_CS + 4(%edi)
14131+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
14132+ movb %ah,__KERNEL_CS + 7(%edi)
14133+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
14134+ rorl $16,%eax
14135+ addl $PAGE_SIZE_asm,%edi
14136+ loop 1b
14137+#endif
14138
14139 /*
14140 * Clear BSS first so that there are no surprises...
14141@@ -140,9 +211,7 @@ ENTRY(startup_32)
14142 cmpl $num_subarch_entries, %eax
14143 jae bad_subarch
14144
14145- movl pa(subarch_entries)(,%eax,4), %eax
14146- subl $__PAGE_OFFSET, %eax
14147- jmp *%eax
14148+ jmp *pa(subarch_entries)(,%eax,4)
14149
14150 bad_subarch:
14151 WEAK(lguest_entry)
14152@@ -154,10 +223,10 @@ WEAK(xen_entry)
14153 __INITDATA
14154
14155 subarch_entries:
14156- .long default_entry /* normal x86/PC */
14157- .long lguest_entry /* lguest hypervisor */
14158- .long xen_entry /* Xen hypervisor */
14159- .long default_entry /* Moorestown MID */
14160+ .long ta(default_entry) /* normal x86/PC */
14161+ .long ta(lguest_entry) /* lguest hypervisor */
14162+ .long ta(xen_entry) /* Xen hypervisor */
14163+ .long ta(default_entry) /* Moorestown MID */
14164 num_subarch_entries = (. - subarch_entries) / 4
14165 .previous
14166 #endif /* CONFIG_PARAVIRT */
14167@@ -218,8 +287,11 @@ default_entry:
14168 movl %eax, pa(max_pfn_mapped)
14169
14170 /* Do early initialization of the fixmap area */
14171- movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
14172- movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
14173+#ifdef CONFIG_COMPAT_VDSO
14174+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_pmd+0x1000*KPMDS-8)
14175+#else
14176+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_pmd+0x1000*KPMDS-8)
14177+#endif
14178 #else /* Not PAE */
14179
14180 page_pde_offset = (__PAGE_OFFSET >> 20);
14181@@ -249,8 +321,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
14182 movl %eax, pa(max_pfn_mapped)
14183
14184 /* Do early initialization of the fixmap area */
14185- movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
14186- movl %eax,pa(swapper_pg_dir+0xffc)
14187+#ifdef CONFIG_COMPAT_VDSO
14188+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_dir+0xffc)
14189+#else
14190+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_dir+0xffc)
14191+#endif
14192 #endif
14193 jmp 3f
14194 /*
14195@@ -272,6 +347,9 @@ ENTRY(startup_32_smp)
14196 movl %eax,%es
14197 movl %eax,%fs
14198 movl %eax,%gs
14199+ movl pa(stack_start),%ecx
14200+ movl %eax,%ss
14201+ leal -__PAGE_OFFSET(%ecx),%esp
14202 #endif /* CONFIG_SMP */
14203 3:
14204
14205@@ -297,6 +375,7 @@ ENTRY(startup_32_smp)
14206 orl %edx,%eax
14207 movl %eax,%cr4
14208
14209+#ifdef CONFIG_X86_PAE
14210 btl $5, %eax # check if PAE is enabled
14211 jnc 6f
14212
14213@@ -305,6 +384,10 @@ ENTRY(startup_32_smp)
14214 cpuid
14215 cmpl $0x80000000, %eax
14216 jbe 6f
14217+
14218+ /* Clear bogus XD_DISABLE bits */
14219+ call verify_cpu
14220+
14221 mov $0x80000001, %eax
14222 cpuid
14223 /* Execute Disable bit supported? */
14224@@ -312,13 +395,17 @@ ENTRY(startup_32_smp)
14225 jnc 6f
14226
14227 /* Setup EFER (Extended Feature Enable Register) */
14228- movl $0xc0000080, %ecx
14229+ movl $MSR_EFER, %ecx
14230 rdmsr
14231
14232 btsl $11, %eax
14233 /* Make changes effective */
14234 wrmsr
14235
14236+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
14237+ movl $1,pa(nx_enabled)
14238+#endif
14239+
14240 6:
14241
14242 /*
14243@@ -331,8 +418,8 @@ ENTRY(startup_32_smp)
14244 movl %eax,%cr0 /* ..and set paging (PG) bit */
14245 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
14246 1:
14247- /* Set up the stack pointer */
14248- lss stack_start,%esp
14249+ /* Shift the stack pointer to a virtual address */
14250+ addl $__PAGE_OFFSET, %esp
14251
14252 /*
14253 * Initialize eflags. Some BIOS's leave bits like NT set. This would
14254@@ -344,9 +431,7 @@ ENTRY(startup_32_smp)
14255
14256 #ifdef CONFIG_SMP
14257 cmpb $0, ready
14258- jz 1f /* Initial CPU cleans BSS */
14259- jmp checkCPUtype
14260-1:
14261+ jnz checkCPUtype
14262 #endif /* CONFIG_SMP */
14263
14264 /*
14265@@ -424,7 +509,7 @@ is386: movl $2,%ecx # set MP
14266 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
14267 movl %eax,%ss # after changing gdt.
14268
14269- movl $(__USER_DS),%eax # DS/ES contains default USER segment
14270+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
14271 movl %eax,%ds
14272 movl %eax,%es
14273
14274@@ -438,15 +523,22 @@ is386: movl $2,%ecx # set MP
14275 */
14276 cmpb $0,ready
14277 jne 1f
14278- movl $per_cpu__gdt_page,%eax
14279+ movl $cpu_gdt_table,%eax
14280 movl $per_cpu__stack_canary,%ecx
14281+#ifdef CONFIG_SMP
14282+ addl $__per_cpu_load,%ecx
14283+#endif
14284 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
14285 shrl $16, %ecx
14286 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
14287 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
14288 1:
14289-#endif
14290 movl $(__KERNEL_STACK_CANARY),%eax
14291+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14292+ movl $(__USER_DS),%eax
14293+#else
14294+ xorl %eax,%eax
14295+#endif
14296 movl %eax,%gs
14297
14298 xorl %eax,%eax # Clear LDT
14299@@ -454,14 +546,7 @@ is386: movl $2,%ecx # set MP
14300
14301 cld # gcc2 wants the direction flag cleared at all times
14302 pushl $0 # fake return address for unwinder
14303-#ifdef CONFIG_SMP
14304- movb ready, %cl
14305 movb $1, ready
14306- cmpb $0,%cl # the first CPU calls start_kernel
14307- je 1f
14308- movl (stack_start), %esp
14309-1:
14310-#endif /* CONFIG_SMP */
14311 jmp *(initial_code)
14312
14313 /*
14314@@ -546,22 +631,22 @@ early_page_fault:
14315 jmp early_fault
14316
14317 early_fault:
14318- cld
14319 #ifdef CONFIG_PRINTK
14320+ cmpl $1,%ss:early_recursion_flag
14321+ je hlt_loop
14322+ incl %ss:early_recursion_flag
14323+ cld
14324 pusha
14325 movl $(__KERNEL_DS),%eax
14326 movl %eax,%ds
14327 movl %eax,%es
14328- cmpl $2,early_recursion_flag
14329- je hlt_loop
14330- incl early_recursion_flag
14331 movl %cr2,%eax
14332 pushl %eax
14333 pushl %edx /* trapno */
14334 pushl $fault_msg
14335 call printk
14336+; call dump_stack
14337 #endif
14338- call dump_stack
14339 hlt_loop:
14340 hlt
14341 jmp hlt_loop
14342@@ -569,8 +654,11 @@ hlt_loop:
14343 /* This is the default interrupt "handler" :-) */
14344 ALIGN
14345 ignore_int:
14346- cld
14347 #ifdef CONFIG_PRINTK
14348+ cmpl $2,%ss:early_recursion_flag
14349+ je hlt_loop
14350+ incl %ss:early_recursion_flag
14351+ cld
14352 pushl %eax
14353 pushl %ecx
14354 pushl %edx
14355@@ -579,9 +667,6 @@ ignore_int:
14356 movl $(__KERNEL_DS),%eax
14357 movl %eax,%ds
14358 movl %eax,%es
14359- cmpl $2,early_recursion_flag
14360- je hlt_loop
14361- incl early_recursion_flag
14362 pushl 16(%esp)
14363 pushl 24(%esp)
14364 pushl 32(%esp)
14365@@ -600,6 +685,8 @@ ignore_int:
14366 #endif
14367 iret
14368
14369+#include "verify_cpu.S"
14370+
14371 __REFDATA
14372 .align 4
14373 ENTRY(initial_code)
14374@@ -610,31 +697,47 @@ ENTRY(initial_page_table)
14375 /*
14376 * BSS section
14377 */
14378-__PAGE_ALIGNED_BSS
14379- .align PAGE_SIZE_asm
14380 #ifdef CONFIG_X86_PAE
14381+.section .swapper_pg_pmd,"a",@progbits
14382 swapper_pg_pmd:
14383 .fill 1024*KPMDS,4,0
14384 #else
14385+.section .swapper_pg_dir,"a",@progbits
14386 ENTRY(swapper_pg_dir)
14387 .fill 1024,4,0
14388 #endif
14389+.section .swapper_pg_fixmap,"a",@progbits
14390 swapper_pg_fixmap:
14391 .fill 1024,4,0
14392 #ifdef CONFIG_X86_TRAMPOLINE
14393+.section .trampoline_pg_dir,"a",@progbits
14394 ENTRY(trampoline_pg_dir)
14395+#ifdef CONFIG_X86_PAE
14396+ .fill 4,8,0
14397+#else
14398 .fill 1024,4,0
14399 #endif
14400+#endif
14401+
14402+.section .empty_zero_page,"a",@progbits
14403 ENTRY(empty_zero_page)
14404 .fill 4096,1,0
14405
14406 /*
14407+ * The IDT has to be page-aligned to simplify the Pentium
14408+ * F0 0F bug workaround.. We have a special link segment
14409+ * for this.
14410+ */
14411+.section .idt,"a",@progbits
14412+ENTRY(idt_table)
14413+ .fill 256,8,0
14414+
14415+/*
14416 * This starts the data section.
14417 */
14418 #ifdef CONFIG_X86_PAE
14419-__PAGE_ALIGNED_DATA
14420- /* Page-aligned for the benefit of paravirt? */
14421- .align PAGE_SIZE_asm
14422+.section .swapper_pg_dir,"a",@progbits
14423+
14424 ENTRY(swapper_pg_dir)
14425 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
14426 # if KPMDS == 3
14427@@ -653,15 +756,24 @@ ENTRY(swapper_pg_dir)
14428 # error "Kernel PMDs should be 1, 2 or 3"
14429 # endif
14430 .align PAGE_SIZE_asm /* needs to be page-sized too */
14431+
14432+#ifdef CONFIG_PAX_PER_CPU_PGD
14433+ENTRY(cpu_pgd)
14434+ .rept NR_CPUS
14435+ .fill 4,8,0
14436+ .endr
14437+#endif
14438+
14439 #endif
14440
14441 .data
14442+.balign 4
14443 ENTRY(stack_start)
14444- .long init_thread_union+THREAD_SIZE
14445- .long __BOOT_DS
14446+ .long init_thread_union+THREAD_SIZE-8
14447
14448 ready: .byte 0
14449
14450+.section .rodata,"a",@progbits
14451 early_recursion_flag:
14452 .long 0
14453
14454@@ -697,7 +809,7 @@ fault_msg:
14455 .word 0 # 32 bit align gdt_desc.address
14456 boot_gdt_descr:
14457 .word __BOOT_DS+7
14458- .long boot_gdt - __PAGE_OFFSET
14459+ .long pa(boot_gdt)
14460
14461 .word 0 # 32-bit align idt_desc.address
14462 idt_descr:
14463@@ -708,7 +820,7 @@ idt_descr:
14464 .word 0 # 32 bit align gdt_desc.address
14465 ENTRY(early_gdt_descr)
14466 .word GDT_ENTRIES*8-1
14467- .long per_cpu__gdt_page /* Overwritten for secondary CPUs */
14468+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
14469
14470 /*
14471 * The boot_gdt must mirror the equivalent in setup.S and is
14472@@ -717,5 +829,65 @@ ENTRY(early_gdt_descr)
14473 .align L1_CACHE_BYTES
14474 ENTRY(boot_gdt)
14475 .fill GDT_ENTRY_BOOT_CS,8,0
14476- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
14477- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
14478+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
14479+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
14480+
14481+ .align PAGE_SIZE_asm
14482+ENTRY(cpu_gdt_table)
14483+ .rept NR_CPUS
14484+ .quad 0x0000000000000000 /* NULL descriptor */
14485+ .quad 0x0000000000000000 /* 0x0b reserved */
14486+ .quad 0x0000000000000000 /* 0x13 reserved */
14487+ .quad 0x0000000000000000 /* 0x1b reserved */
14488+
14489+#ifdef CONFIG_PAX_KERNEXEC
14490+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
14491+#else
14492+ .quad 0x0000000000000000 /* 0x20 unused */
14493+#endif
14494+
14495+ .quad 0x0000000000000000 /* 0x28 unused */
14496+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
14497+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
14498+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
14499+ .quad 0x0000000000000000 /* 0x4b reserved */
14500+ .quad 0x0000000000000000 /* 0x53 reserved */
14501+ .quad 0x0000000000000000 /* 0x5b reserved */
14502+
14503+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
14504+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
14505+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
14506+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
14507+
14508+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
14509+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
14510+
14511+ /*
14512+ * Segments used for calling PnP BIOS have byte granularity.
14513+ * The code segments and data segments have fixed 64k limits,
14514+ * the transfer segment sizes are set at run time.
14515+ */
14516+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
14517+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
14518+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
14519+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
14520+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
14521+
14522+ /*
14523+ * The APM segments have byte granularity and their bases
14524+ * are set at run time. All have 64k limits.
14525+ */
14526+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
14527+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
14528+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
14529+
14530+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
14531+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
14532+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
14533+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
14534+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
14535+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
14536+
14537+ /* Be sure this is zeroed to avoid false validations in Xen */
14538+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
14539+ .endr
14540diff -urNp linux-2.6.32.42/arch/x86/kernel/head_64.S linux-2.6.32.42/arch/x86/kernel/head_64.S
14541--- linux-2.6.32.42/arch/x86/kernel/head_64.S 2011-03-27 14:31:47.000000000 -0400
14542+++ linux-2.6.32.42/arch/x86/kernel/head_64.S 2011-04-17 15:56:46.000000000 -0400
14543@@ -19,6 +19,7 @@
14544 #include <asm/cache.h>
14545 #include <asm/processor-flags.h>
14546 #include <asm/percpu.h>
14547+#include <asm/cpufeature.h>
14548
14549 #ifdef CONFIG_PARAVIRT
14550 #include <asm/asm-offsets.h>
14551@@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
14552 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
14553 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
14554 L3_START_KERNEL = pud_index(__START_KERNEL_map)
14555+L4_VMALLOC_START = pgd_index(VMALLOC_START)
14556+L3_VMALLOC_START = pud_index(VMALLOC_START)
14557+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
14558+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
14559
14560 .text
14561 __HEAD
14562@@ -85,35 +90,22 @@ startup_64:
14563 */
14564 addq %rbp, init_level4_pgt + 0(%rip)
14565 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
14566+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
14567+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
14568 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
14569
14570 addq %rbp, level3_ident_pgt + 0(%rip)
14571+#ifndef CONFIG_XEN
14572+ addq %rbp, level3_ident_pgt + 8(%rip)
14573+#endif
14574
14575- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
14576- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
14577+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
14578
14579- addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
14580+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
14581+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
14582
14583- /* Add an Identity mapping if I am above 1G */
14584- leaq _text(%rip), %rdi
14585- andq $PMD_PAGE_MASK, %rdi
14586-
14587- movq %rdi, %rax
14588- shrq $PUD_SHIFT, %rax
14589- andq $(PTRS_PER_PUD - 1), %rax
14590- jz ident_complete
14591-
14592- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
14593- leaq level3_ident_pgt(%rip), %rbx
14594- movq %rdx, 0(%rbx, %rax, 8)
14595-
14596- movq %rdi, %rax
14597- shrq $PMD_SHIFT, %rax
14598- andq $(PTRS_PER_PMD - 1), %rax
14599- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
14600- leaq level2_spare_pgt(%rip), %rbx
14601- movq %rdx, 0(%rbx, %rax, 8)
14602-ident_complete:
14603+ addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
14604+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
14605
14606 /*
14607 * Fixup the kernel text+data virtual addresses. Note that
14608@@ -161,8 +153,8 @@ ENTRY(secondary_startup_64)
14609 * after the boot processor executes this code.
14610 */
14611
14612- /* Enable PAE mode and PGE */
14613- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
14614+ /* Enable PAE mode and PSE/PGE */
14615+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
14616 movq %rax, %cr4
14617
14618 /* Setup early boot stage 4 level pagetables. */
14619@@ -184,9 +176,13 @@ ENTRY(secondary_startup_64)
14620 movl $MSR_EFER, %ecx
14621 rdmsr
14622 btsl $_EFER_SCE, %eax /* Enable System Call */
14623- btl $20,%edi /* No Execute supported? */
14624+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
14625 jnc 1f
14626 btsl $_EFER_NX, %eax
14627+ leaq init_level4_pgt(%rip), %rdi
14628+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
14629+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
14630+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
14631 1: wrmsr /* Make changes effective */
14632
14633 /* Setup cr0 */
14634@@ -262,16 +258,16 @@ ENTRY(secondary_startup_64)
14635 .quad x86_64_start_kernel
14636 ENTRY(initial_gs)
14637 .quad INIT_PER_CPU_VAR(irq_stack_union)
14638- __FINITDATA
14639
14640 ENTRY(stack_start)
14641 .quad init_thread_union+THREAD_SIZE-8
14642 .word 0
14643+ __FINITDATA
14644
14645 bad_address:
14646 jmp bad_address
14647
14648- .section ".init.text","ax"
14649+ __INIT
14650 #ifdef CONFIG_EARLY_PRINTK
14651 .globl early_idt_handlers
14652 early_idt_handlers:
14653@@ -316,18 +312,23 @@ ENTRY(early_idt_handler)
14654 #endif /* EARLY_PRINTK */
14655 1: hlt
14656 jmp 1b
14657+ .previous
14658
14659 #ifdef CONFIG_EARLY_PRINTK
14660+ __INITDATA
14661 early_recursion_flag:
14662 .long 0
14663+ .previous
14664
14665+ .section .rodata,"a",@progbits
14666 early_idt_msg:
14667 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
14668 early_idt_ripmsg:
14669 .asciz "RIP %s\n"
14670-#endif /* CONFIG_EARLY_PRINTK */
14671 .previous
14672+#endif /* CONFIG_EARLY_PRINTK */
14673
14674+ .section .rodata,"a",@progbits
14675 #define NEXT_PAGE(name) \
14676 .balign PAGE_SIZE; \
14677 ENTRY(name)
14678@@ -350,13 +351,36 @@ NEXT_PAGE(init_level4_pgt)
14679 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
14680 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
14681 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
14682+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
14683+ .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
14684+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
14685+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
14686 .org init_level4_pgt + L4_START_KERNEL*8, 0
14687 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
14688 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
14689
14690+#ifdef CONFIG_PAX_PER_CPU_PGD
14691+NEXT_PAGE(cpu_pgd)
14692+ .rept NR_CPUS
14693+ .fill 512,8,0
14694+ .endr
14695+#endif
14696+
14697 NEXT_PAGE(level3_ident_pgt)
14698 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
14699+#ifdef CONFIG_XEN
14700 .fill 511,8,0
14701+#else
14702+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
14703+ .fill 510,8,0
14704+#endif
14705+
14706+NEXT_PAGE(level3_vmalloc_pgt)
14707+ .fill 512,8,0
14708+
14709+NEXT_PAGE(level3_vmemmap_pgt)
14710+ .fill L3_VMEMMAP_START,8,0
14711+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
14712
14713 NEXT_PAGE(level3_kernel_pgt)
14714 .fill L3_START_KERNEL,8,0
14715@@ -364,20 +388,23 @@ NEXT_PAGE(level3_kernel_pgt)
14716 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
14717 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
14718
14719+NEXT_PAGE(level2_vmemmap_pgt)
14720+ .fill 512,8,0
14721+
14722 NEXT_PAGE(level2_fixmap_pgt)
14723- .fill 506,8,0
14724- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
14725- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
14726- .fill 5,8,0
14727+ .fill 507,8,0
14728+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
14729+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
14730+ .fill 4,8,0
14731
14732-NEXT_PAGE(level1_fixmap_pgt)
14733+NEXT_PAGE(level1_vsyscall_pgt)
14734 .fill 512,8,0
14735
14736-NEXT_PAGE(level2_ident_pgt)
14737- /* Since I easily can, map the first 1G.
14738+ /* Since I easily can, map the first 2G.
14739 * Don't set NX because code runs from these pages.
14740 */
14741- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
14742+NEXT_PAGE(level2_ident_pgt)
14743+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
14744
14745 NEXT_PAGE(level2_kernel_pgt)
14746 /*
14747@@ -390,33 +417,55 @@ NEXT_PAGE(level2_kernel_pgt)
14748 * If you want to increase this then increase MODULES_VADDR
14749 * too.)
14750 */
14751- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
14752- KERNEL_IMAGE_SIZE/PMD_SIZE)
14753-
14754-NEXT_PAGE(level2_spare_pgt)
14755- .fill 512, 8, 0
14756+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
14757
14758 #undef PMDS
14759 #undef NEXT_PAGE
14760
14761- .data
14762+ .align PAGE_SIZE
14763+ENTRY(cpu_gdt_table)
14764+ .rept NR_CPUS
14765+ .quad 0x0000000000000000 /* NULL descriptor */
14766+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
14767+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
14768+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
14769+ .quad 0x00cffb000000ffff /* __USER32_CS */
14770+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
14771+ .quad 0x00affb000000ffff /* __USER_CS */
14772+
14773+#ifdef CONFIG_PAX_KERNEXEC
14774+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
14775+#else
14776+ .quad 0x0 /* unused */
14777+#endif
14778+
14779+ .quad 0,0 /* TSS */
14780+ .quad 0,0 /* LDT */
14781+ .quad 0,0,0 /* three TLS descriptors */
14782+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
14783+ /* asm/segment.h:GDT_ENTRIES must match this */
14784+
14785+ /* zero the remaining page */
14786+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
14787+ .endr
14788+
14789 .align 16
14790 .globl early_gdt_descr
14791 early_gdt_descr:
14792 .word GDT_ENTRIES*8-1
14793 early_gdt_descr_base:
14794- .quad INIT_PER_CPU_VAR(gdt_page)
14795+ .quad cpu_gdt_table
14796
14797 ENTRY(phys_base)
14798 /* This must match the first entry in level2_kernel_pgt */
14799 .quad 0x0000000000000000
14800
14801 #include "../../x86/xen/xen-head.S"
14802-
14803- .section .bss, "aw", @nobits
14804+
14805+ .section .rodata,"a",@progbits
14806 .align L1_CACHE_BYTES
14807 ENTRY(idt_table)
14808- .skip IDT_ENTRIES * 16
14809+ .fill 512,8,0
14810
14811 __PAGE_ALIGNED_BSS
14812 .align PAGE_SIZE
14813diff -urNp linux-2.6.32.42/arch/x86/kernel/i386_ksyms_32.c linux-2.6.32.42/arch/x86/kernel/i386_ksyms_32.c
14814--- linux-2.6.32.42/arch/x86/kernel/i386_ksyms_32.c 2011-03-27 14:31:47.000000000 -0400
14815+++ linux-2.6.32.42/arch/x86/kernel/i386_ksyms_32.c 2011-04-17 15:56:46.000000000 -0400
14816@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
14817 EXPORT_SYMBOL(cmpxchg8b_emu);
14818 #endif
14819
14820+EXPORT_SYMBOL_GPL(cpu_gdt_table);
14821+
14822 /* Networking helper routines. */
14823 EXPORT_SYMBOL(csum_partial_copy_generic);
14824+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
14825+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
14826
14827 EXPORT_SYMBOL(__get_user_1);
14828 EXPORT_SYMBOL(__get_user_2);
14829@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
14830
14831 EXPORT_SYMBOL(csum_partial);
14832 EXPORT_SYMBOL(empty_zero_page);
14833+
14834+#ifdef CONFIG_PAX_KERNEXEC
14835+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
14836+#endif
14837diff -urNp linux-2.6.32.42/arch/x86/kernel/i8259.c linux-2.6.32.42/arch/x86/kernel/i8259.c
14838--- linux-2.6.32.42/arch/x86/kernel/i8259.c 2011-03-27 14:31:47.000000000 -0400
14839+++ linux-2.6.32.42/arch/x86/kernel/i8259.c 2011-05-04 17:56:28.000000000 -0400
14840@@ -208,7 +208,7 @@ spurious_8259A_irq:
14841 "spurious 8259A interrupt: IRQ%d.\n", irq);
14842 spurious_irq_mask |= irqmask;
14843 }
14844- atomic_inc(&irq_err_count);
14845+ atomic_inc_unchecked(&irq_err_count);
14846 /*
14847 * Theoretically we do not have to handle this IRQ,
14848 * but in Linux this does not cause problems and is
14849diff -urNp linux-2.6.32.42/arch/x86/kernel/init_task.c linux-2.6.32.42/arch/x86/kernel/init_task.c
14850--- linux-2.6.32.42/arch/x86/kernel/init_task.c 2011-03-27 14:31:47.000000000 -0400
14851+++ linux-2.6.32.42/arch/x86/kernel/init_task.c 2011-04-17 15:56:46.000000000 -0400
14852@@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
14853 * way process stacks are handled. This is done by having a special
14854 * "init_task" linker map entry..
14855 */
14856-union thread_union init_thread_union __init_task_data =
14857- { INIT_THREAD_INFO(init_task) };
14858+union thread_union init_thread_union __init_task_data;
14859
14860 /*
14861 * Initial task structure.
14862@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
14863 * section. Since TSS's are completely CPU-local, we want them
14864 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
14865 */
14866-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
14867-
14868+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
14869+EXPORT_SYMBOL(init_tss);
14870diff -urNp linux-2.6.32.42/arch/x86/kernel/ioport.c linux-2.6.32.42/arch/x86/kernel/ioport.c
14871--- linux-2.6.32.42/arch/x86/kernel/ioport.c 2011-03-27 14:31:47.000000000 -0400
14872+++ linux-2.6.32.42/arch/x86/kernel/ioport.c 2011-04-17 15:56:46.000000000 -0400
14873@@ -6,6 +6,7 @@
14874 #include <linux/sched.h>
14875 #include <linux/kernel.h>
14876 #include <linux/capability.h>
14877+#include <linux/security.h>
14878 #include <linux/errno.h>
14879 #include <linux/types.h>
14880 #include <linux/ioport.h>
14881@@ -41,6 +42,12 @@ asmlinkage long sys_ioperm(unsigned long
14882
14883 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
14884 return -EINVAL;
14885+#ifdef CONFIG_GRKERNSEC_IO
14886+ if (turn_on && grsec_disable_privio) {
14887+ gr_handle_ioperm();
14888+ return -EPERM;
14889+ }
14890+#endif
14891 if (turn_on && !capable(CAP_SYS_RAWIO))
14892 return -EPERM;
14893
14894@@ -67,7 +74,7 @@ asmlinkage long sys_ioperm(unsigned long
14895 * because the ->io_bitmap_max value must match the bitmap
14896 * contents:
14897 */
14898- tss = &per_cpu(init_tss, get_cpu());
14899+ tss = init_tss + get_cpu();
14900
14901 set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
14902
14903@@ -111,6 +118,12 @@ static int do_iopl(unsigned int level, s
14904 return -EINVAL;
14905 /* Trying to gain more privileges? */
14906 if (level > old) {
14907+#ifdef CONFIG_GRKERNSEC_IO
14908+ if (grsec_disable_privio) {
14909+ gr_handle_iopl();
14910+ return -EPERM;
14911+ }
14912+#endif
14913 if (!capable(CAP_SYS_RAWIO))
14914 return -EPERM;
14915 }
14916diff -urNp linux-2.6.32.42/arch/x86/kernel/irq_32.c linux-2.6.32.42/arch/x86/kernel/irq_32.c
14917--- linux-2.6.32.42/arch/x86/kernel/irq_32.c 2011-03-27 14:31:47.000000000 -0400
14918+++ linux-2.6.32.42/arch/x86/kernel/irq_32.c 2011-07-06 19:53:33.000000000 -0400
14919@@ -35,7 +35,7 @@ static int check_stack_overflow(void)
14920 __asm__ __volatile__("andl %%esp,%0" :
14921 "=r" (sp) : "0" (THREAD_SIZE - 1));
14922
14923- return sp < (sizeof(struct thread_info) + STACK_WARN);
14924+ return sp < STACK_WARN;
14925 }
14926
14927 static void print_stack_overflow(void)
14928@@ -54,9 +54,9 @@ static inline void print_stack_overflow(
14929 * per-CPU IRQ handling contexts (thread information and stack)
14930 */
14931 union irq_ctx {
14932- struct thread_info tinfo;
14933- u32 stack[THREAD_SIZE/sizeof(u32)];
14934-} __attribute__((aligned(PAGE_SIZE)));
14935+ unsigned long previous_esp;
14936+ u32 stack[THREAD_SIZE/sizeof(u32)];
14937+} __attribute__((aligned(THREAD_SIZE)));
14938
14939 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
14940 static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
14941@@ -78,10 +78,9 @@ static void call_on_stack(void *func, vo
14942 static inline int
14943 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
14944 {
14945- union irq_ctx *curctx, *irqctx;
14946+ union irq_ctx *irqctx;
14947 u32 *isp, arg1, arg2;
14948
14949- curctx = (union irq_ctx *) current_thread_info();
14950 irqctx = __get_cpu_var(hardirq_ctx);
14951
14952 /*
14953@@ -90,21 +89,16 @@ execute_on_irq_stack(int overflow, struc
14954 * handler) we can't do that and just have to keep using the
14955 * current stack (which is the irq stack already after all)
14956 */
14957- if (unlikely(curctx == irqctx))
14958+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
14959 return 0;
14960
14961 /* build the stack frame on the IRQ stack */
14962- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
14963- irqctx->tinfo.task = curctx->tinfo.task;
14964- irqctx->tinfo.previous_esp = current_stack_pointer;
14965+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
14966+ irqctx->previous_esp = current_stack_pointer;
14967
14968- /*
14969- * Copy the softirq bits in preempt_count so that the
14970- * softirq checks work in the hardirq context.
14971- */
14972- irqctx->tinfo.preempt_count =
14973- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
14974- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
14975+#ifdef CONFIG_PAX_MEMORY_UDEREF
14976+ __set_fs(MAKE_MM_SEG(0));
14977+#endif
14978
14979 if (unlikely(overflow))
14980 call_on_stack(print_stack_overflow, isp);
14981@@ -116,6 +110,11 @@ execute_on_irq_stack(int overflow, struc
14982 : "0" (irq), "1" (desc), "2" (isp),
14983 "D" (desc->handle_irq)
14984 : "memory", "cc", "ecx");
14985+
14986+#ifdef CONFIG_PAX_MEMORY_UDEREF
14987+ __set_fs(current_thread_info()->addr_limit);
14988+#endif
14989+
14990 return 1;
14991 }
14992
14993@@ -124,28 +123,11 @@ execute_on_irq_stack(int overflow, struc
14994 */
14995 void __cpuinit irq_ctx_init(int cpu)
14996 {
14997- union irq_ctx *irqctx;
14998-
14999 if (per_cpu(hardirq_ctx, cpu))
15000 return;
15001
15002- irqctx = &per_cpu(hardirq_stack, cpu);
15003- irqctx->tinfo.task = NULL;
15004- irqctx->tinfo.exec_domain = NULL;
15005- irqctx->tinfo.cpu = cpu;
15006- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
15007- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15008-
15009- per_cpu(hardirq_ctx, cpu) = irqctx;
15010-
15011- irqctx = &per_cpu(softirq_stack, cpu);
15012- irqctx->tinfo.task = NULL;
15013- irqctx->tinfo.exec_domain = NULL;
15014- irqctx->tinfo.cpu = cpu;
15015- irqctx->tinfo.preempt_count = 0;
15016- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15017-
15018- per_cpu(softirq_ctx, cpu) = irqctx;
15019+ per_cpu(hardirq_ctx, cpu) = &per_cpu(hardirq_stack, cpu);
15020+ per_cpu(softirq_ctx, cpu) = &per_cpu(softirq_stack, cpu);
15021
15022 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
15023 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
15024@@ -159,7 +141,6 @@ void irq_ctx_exit(int cpu)
15025 asmlinkage void do_softirq(void)
15026 {
15027 unsigned long flags;
15028- struct thread_info *curctx;
15029 union irq_ctx *irqctx;
15030 u32 *isp;
15031
15032@@ -169,15 +150,22 @@ asmlinkage void do_softirq(void)
15033 local_irq_save(flags);
15034
15035 if (local_softirq_pending()) {
15036- curctx = current_thread_info();
15037 irqctx = __get_cpu_var(softirq_ctx);
15038- irqctx->tinfo.task = curctx->task;
15039- irqctx->tinfo.previous_esp = current_stack_pointer;
15040+ irqctx->previous_esp = current_stack_pointer;
15041
15042 /* build the stack frame on the softirq stack */
15043- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15044+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15045+
15046+#ifdef CONFIG_PAX_MEMORY_UDEREF
15047+ __set_fs(MAKE_MM_SEG(0));
15048+#endif
15049
15050 call_on_stack(__do_softirq, isp);
15051+
15052+#ifdef CONFIG_PAX_MEMORY_UDEREF
15053+ __set_fs(current_thread_info()->addr_limit);
15054+#endif
15055+
15056 /*
15057 * Shouldnt happen, we returned above if in_interrupt():
15058 */
15059diff -urNp linux-2.6.32.42/arch/x86/kernel/irq.c linux-2.6.32.42/arch/x86/kernel/irq.c
15060--- linux-2.6.32.42/arch/x86/kernel/irq.c 2011-03-27 14:31:47.000000000 -0400
15061+++ linux-2.6.32.42/arch/x86/kernel/irq.c 2011-05-04 17:56:28.000000000 -0400
15062@@ -15,7 +15,7 @@
15063 #include <asm/mce.h>
15064 #include <asm/hw_irq.h>
15065
15066-atomic_t irq_err_count;
15067+atomic_unchecked_t irq_err_count;
15068
15069 /* Function pointer for generic interrupt vector handling */
15070 void (*generic_interrupt_extension)(void) = NULL;
15071@@ -114,9 +114,9 @@ static int show_other_interrupts(struct
15072 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
15073 seq_printf(p, " Machine check polls\n");
15074 #endif
15075- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
15076+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
15077 #if defined(CONFIG_X86_IO_APIC)
15078- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
15079+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
15080 #endif
15081 return 0;
15082 }
15083@@ -209,10 +209,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
15084
15085 u64 arch_irq_stat(void)
15086 {
15087- u64 sum = atomic_read(&irq_err_count);
15088+ u64 sum = atomic_read_unchecked(&irq_err_count);
15089
15090 #ifdef CONFIG_X86_IO_APIC
15091- sum += atomic_read(&irq_mis_count);
15092+ sum += atomic_read_unchecked(&irq_mis_count);
15093 #endif
15094 return sum;
15095 }
15096diff -urNp linux-2.6.32.42/arch/x86/kernel/kgdb.c linux-2.6.32.42/arch/x86/kernel/kgdb.c
15097--- linux-2.6.32.42/arch/x86/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
15098+++ linux-2.6.32.42/arch/x86/kernel/kgdb.c 2011-05-04 17:56:20.000000000 -0400
15099@@ -390,13 +390,13 @@ int kgdb_arch_handle_exception(int e_vec
15100
15101 /* clear the trace bit */
15102 linux_regs->flags &= ~X86_EFLAGS_TF;
15103- atomic_set(&kgdb_cpu_doing_single_step, -1);
15104+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
15105
15106 /* set the trace bit if we're stepping */
15107 if (remcomInBuffer[0] == 's') {
15108 linux_regs->flags |= X86_EFLAGS_TF;
15109 kgdb_single_step = 1;
15110- atomic_set(&kgdb_cpu_doing_single_step,
15111+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
15112 raw_smp_processor_id());
15113 }
15114
15115@@ -476,7 +476,7 @@ static int __kgdb_notify(struct die_args
15116 break;
15117
15118 case DIE_DEBUG:
15119- if (atomic_read(&kgdb_cpu_doing_single_step) ==
15120+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) ==
15121 raw_smp_processor_id()) {
15122 if (user_mode(regs))
15123 return single_step_cont(regs, args);
15124@@ -573,7 +573,7 @@ unsigned long kgdb_arch_pc(int exception
15125 return instruction_pointer(regs);
15126 }
15127
15128-struct kgdb_arch arch_kgdb_ops = {
15129+const struct kgdb_arch arch_kgdb_ops = {
15130 /* Breakpoint instruction: */
15131 .gdb_bpt_instr = { 0xcc },
15132 .flags = KGDB_HW_BREAKPOINT,
15133diff -urNp linux-2.6.32.42/arch/x86/kernel/kprobes.c linux-2.6.32.42/arch/x86/kernel/kprobes.c
15134--- linux-2.6.32.42/arch/x86/kernel/kprobes.c 2011-03-27 14:31:47.000000000 -0400
15135+++ linux-2.6.32.42/arch/x86/kernel/kprobes.c 2011-04-17 15:56:46.000000000 -0400
15136@@ -166,9 +166,13 @@ static void __kprobes set_jmp_op(void *f
15137 char op;
15138 s32 raddr;
15139 } __attribute__((packed)) * jop;
15140- jop = (struct __arch_jmp_op *)from;
15141+
15142+ jop = (struct __arch_jmp_op *)(ktla_ktva(from));
15143+
15144+ pax_open_kernel();
15145 jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
15146 jop->op = RELATIVEJUMP_INSTRUCTION;
15147+ pax_close_kernel();
15148 }
15149
15150 /*
15151@@ -193,7 +197,7 @@ static int __kprobes can_boost(kprobe_op
15152 kprobe_opcode_t opcode;
15153 kprobe_opcode_t *orig_opcodes = opcodes;
15154
15155- if (search_exception_tables((unsigned long)opcodes))
15156+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
15157 return 0; /* Page fault may occur on this address. */
15158
15159 retry:
15160@@ -337,7 +341,9 @@ static void __kprobes fix_riprel(struct
15161 disp = (u8 *) p->addr + *((s32 *) insn) -
15162 (u8 *) p->ainsn.insn;
15163 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
15164+ pax_open_kernel();
15165 *(s32 *)insn = (s32) disp;
15166+ pax_close_kernel();
15167 }
15168 }
15169 #endif
15170@@ -345,16 +351,18 @@ static void __kprobes fix_riprel(struct
15171
15172 static void __kprobes arch_copy_kprobe(struct kprobe *p)
15173 {
15174- memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
15175+ pax_open_kernel();
15176+ memcpy(p->ainsn.insn, ktla_ktva(p->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
15177+ pax_close_kernel();
15178
15179 fix_riprel(p);
15180
15181- if (can_boost(p->addr))
15182+ if (can_boost(ktla_ktva(p->addr)))
15183 p->ainsn.boostable = 0;
15184 else
15185 p->ainsn.boostable = -1;
15186
15187- p->opcode = *p->addr;
15188+ p->opcode = *(ktla_ktva(p->addr));
15189 }
15190
15191 int __kprobes arch_prepare_kprobe(struct kprobe *p)
15192@@ -432,7 +440,7 @@ static void __kprobes prepare_singlestep
15193 if (p->opcode == BREAKPOINT_INSTRUCTION)
15194 regs->ip = (unsigned long)p->addr;
15195 else
15196- regs->ip = (unsigned long)p->ainsn.insn;
15197+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15198 }
15199
15200 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
15201@@ -453,7 +461,7 @@ static void __kprobes setup_singlestep(s
15202 if (p->ainsn.boostable == 1 && !p->post_handler) {
15203 /* Boost up -- we can execute copied instructions directly */
15204 reset_current_kprobe();
15205- regs->ip = (unsigned long)p->ainsn.insn;
15206+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15207 preempt_enable_no_resched();
15208 return;
15209 }
15210@@ -523,7 +531,7 @@ static int __kprobes kprobe_handler(stru
15211 struct kprobe_ctlblk *kcb;
15212
15213 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
15214- if (*addr != BREAKPOINT_INSTRUCTION) {
15215+ if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
15216 /*
15217 * The breakpoint instruction was removed right
15218 * after we hit it. Another cpu has removed
15219@@ -775,7 +783,7 @@ static void __kprobes resume_execution(s
15220 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
15221 {
15222 unsigned long *tos = stack_addr(regs);
15223- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
15224+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
15225 unsigned long orig_ip = (unsigned long)p->addr;
15226 kprobe_opcode_t *insn = p->ainsn.insn;
15227
15228@@ -958,7 +966,7 @@ int __kprobes kprobe_exceptions_notify(s
15229 struct die_args *args = data;
15230 int ret = NOTIFY_DONE;
15231
15232- if (args->regs && user_mode_vm(args->regs))
15233+ if (args->regs && user_mode(args->regs))
15234 return ret;
15235
15236 switch (val) {
15237diff -urNp linux-2.6.32.42/arch/x86/kernel/ldt.c linux-2.6.32.42/arch/x86/kernel/ldt.c
15238--- linux-2.6.32.42/arch/x86/kernel/ldt.c 2011-03-27 14:31:47.000000000 -0400
15239+++ linux-2.6.32.42/arch/x86/kernel/ldt.c 2011-04-17 15:56:46.000000000 -0400
15240@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, i
15241 if (reload) {
15242 #ifdef CONFIG_SMP
15243 preempt_disable();
15244- load_LDT(pc);
15245+ load_LDT_nolock(pc);
15246 if (!cpumask_equal(mm_cpumask(current->mm),
15247 cpumask_of(smp_processor_id())))
15248 smp_call_function(flush_ldt, current->mm, 1);
15249 preempt_enable();
15250 #else
15251- load_LDT(pc);
15252+ load_LDT_nolock(pc);
15253 #endif
15254 }
15255 if (oldsize) {
15256@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t
15257 return err;
15258
15259 for (i = 0; i < old->size; i++)
15260- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
15261+ write_ldt_entry(new->ldt, i, old->ldt + i);
15262 return 0;
15263 }
15264
15265@@ -115,6 +115,24 @@ int init_new_context(struct task_struct
15266 retval = copy_ldt(&mm->context, &old_mm->context);
15267 mutex_unlock(&old_mm->context.lock);
15268 }
15269+
15270+ if (tsk == current) {
15271+ mm->context.vdso = 0;
15272+
15273+#ifdef CONFIG_X86_32
15274+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
15275+ mm->context.user_cs_base = 0UL;
15276+ mm->context.user_cs_limit = ~0UL;
15277+
15278+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
15279+ cpus_clear(mm->context.cpu_user_cs_mask);
15280+#endif
15281+
15282+#endif
15283+#endif
15284+
15285+ }
15286+
15287 return retval;
15288 }
15289
15290@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, u
15291 }
15292 }
15293
15294+#ifdef CONFIG_PAX_SEGMEXEC
15295+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
15296+ error = -EINVAL;
15297+ goto out_unlock;
15298+ }
15299+#endif
15300+
15301 fill_ldt(&ldt, &ldt_info);
15302 if (oldmode)
15303 ldt.avl = 0;
15304diff -urNp linux-2.6.32.42/arch/x86/kernel/machine_kexec_32.c linux-2.6.32.42/arch/x86/kernel/machine_kexec_32.c
15305--- linux-2.6.32.42/arch/x86/kernel/machine_kexec_32.c 2011-03-27 14:31:47.000000000 -0400
15306+++ linux-2.6.32.42/arch/x86/kernel/machine_kexec_32.c 2011-04-17 15:56:46.000000000 -0400
15307@@ -26,7 +26,7 @@
15308 #include <asm/system.h>
15309 #include <asm/cacheflush.h>
15310
15311-static void set_idt(void *newidt, __u16 limit)
15312+static void set_idt(struct desc_struct *newidt, __u16 limit)
15313 {
15314 struct desc_ptr curidt;
15315
15316@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16
15317 }
15318
15319
15320-static void set_gdt(void *newgdt, __u16 limit)
15321+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
15322 {
15323 struct desc_ptr curgdt;
15324
15325@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
15326 }
15327
15328 control_page = page_address(image->control_code_page);
15329- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
15330+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
15331
15332 relocate_kernel_ptr = control_page;
15333 page_list[PA_CONTROL_PAGE] = __pa(control_page);
15334diff -urNp linux-2.6.32.42/arch/x86/kernel/microcode_amd.c linux-2.6.32.42/arch/x86/kernel/microcode_amd.c
15335--- linux-2.6.32.42/arch/x86/kernel/microcode_amd.c 2011-04-17 17:00:52.000000000 -0400
15336+++ linux-2.6.32.42/arch/x86/kernel/microcode_amd.c 2011-04-17 17:03:05.000000000 -0400
15337@@ -364,7 +364,7 @@ static void microcode_fini_cpu_amd(int c
15338 uci->mc = NULL;
15339 }
15340
15341-static struct microcode_ops microcode_amd_ops = {
15342+static const struct microcode_ops microcode_amd_ops = {
15343 .request_microcode_user = request_microcode_user,
15344 .request_microcode_fw = request_microcode_fw,
15345 .collect_cpu_info = collect_cpu_info_amd,
15346@@ -372,7 +372,7 @@ static struct microcode_ops microcode_am
15347 .microcode_fini_cpu = microcode_fini_cpu_amd,
15348 };
15349
15350-struct microcode_ops * __init init_amd_microcode(void)
15351+const struct microcode_ops * __init init_amd_microcode(void)
15352 {
15353 return &microcode_amd_ops;
15354 }
15355diff -urNp linux-2.6.32.42/arch/x86/kernel/microcode_core.c linux-2.6.32.42/arch/x86/kernel/microcode_core.c
15356--- linux-2.6.32.42/arch/x86/kernel/microcode_core.c 2011-03-27 14:31:47.000000000 -0400
15357+++ linux-2.6.32.42/arch/x86/kernel/microcode_core.c 2011-04-17 15:56:46.000000000 -0400
15358@@ -90,7 +90,7 @@ MODULE_LICENSE("GPL");
15359
15360 #define MICROCODE_VERSION "2.00"
15361
15362-static struct microcode_ops *microcode_ops;
15363+static const struct microcode_ops *microcode_ops;
15364
15365 /*
15366 * Synchronization.
15367diff -urNp linux-2.6.32.42/arch/x86/kernel/microcode_intel.c linux-2.6.32.42/arch/x86/kernel/microcode_intel.c
15368--- linux-2.6.32.42/arch/x86/kernel/microcode_intel.c 2011-03-27 14:31:47.000000000 -0400
15369+++ linux-2.6.32.42/arch/x86/kernel/microcode_intel.c 2011-04-17 15:56:46.000000000 -0400
15370@@ -443,13 +443,13 @@ static enum ucode_state request_microcod
15371
15372 static int get_ucode_user(void *to, const void *from, size_t n)
15373 {
15374- return copy_from_user(to, from, n);
15375+ return copy_from_user(to, (__force const void __user *)from, n);
15376 }
15377
15378 static enum ucode_state
15379 request_microcode_user(int cpu, const void __user *buf, size_t size)
15380 {
15381- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
15382+ return generic_load_microcode(cpu, (__force void *)buf, size, &get_ucode_user);
15383 }
15384
15385 static void microcode_fini_cpu(int cpu)
15386@@ -460,7 +460,7 @@ static void microcode_fini_cpu(int cpu)
15387 uci->mc = NULL;
15388 }
15389
15390-static struct microcode_ops microcode_intel_ops = {
15391+static const struct microcode_ops microcode_intel_ops = {
15392 .request_microcode_user = request_microcode_user,
15393 .request_microcode_fw = request_microcode_fw,
15394 .collect_cpu_info = collect_cpu_info,
15395@@ -468,7 +468,7 @@ static struct microcode_ops microcode_in
15396 .microcode_fini_cpu = microcode_fini_cpu,
15397 };
15398
15399-struct microcode_ops * __init init_intel_microcode(void)
15400+const struct microcode_ops * __init init_intel_microcode(void)
15401 {
15402 return &microcode_intel_ops;
15403 }
15404diff -urNp linux-2.6.32.42/arch/x86/kernel/module.c linux-2.6.32.42/arch/x86/kernel/module.c
15405--- linux-2.6.32.42/arch/x86/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
15406+++ linux-2.6.32.42/arch/x86/kernel/module.c 2011-04-17 15:56:46.000000000 -0400
15407@@ -34,7 +34,7 @@
15408 #define DEBUGP(fmt...)
15409 #endif
15410
15411-void *module_alloc(unsigned long size)
15412+static void *__module_alloc(unsigned long size, pgprot_t prot)
15413 {
15414 struct vm_struct *area;
15415
15416@@ -48,8 +48,18 @@ void *module_alloc(unsigned long size)
15417 if (!area)
15418 return NULL;
15419
15420- return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM,
15421- PAGE_KERNEL_EXEC);
15422+ return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot);
15423+}
15424+
15425+void *module_alloc(unsigned long size)
15426+{
15427+
15428+#ifdef CONFIG_PAX_KERNEXEC
15429+ return __module_alloc(size, PAGE_KERNEL);
15430+#else
15431+ return __module_alloc(size, PAGE_KERNEL_EXEC);
15432+#endif
15433+
15434 }
15435
15436 /* Free memory returned from module_alloc */
15437@@ -58,6 +68,40 @@ void module_free(struct module *mod, voi
15438 vfree(module_region);
15439 }
15440
15441+#ifdef CONFIG_PAX_KERNEXEC
15442+#ifdef CONFIG_X86_32
15443+void *module_alloc_exec(unsigned long size)
15444+{
15445+ struct vm_struct *area;
15446+
15447+ if (size == 0)
15448+ return NULL;
15449+
15450+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
15451+ return area ? area->addr : NULL;
15452+}
15453+EXPORT_SYMBOL(module_alloc_exec);
15454+
15455+void module_free_exec(struct module *mod, void *module_region)
15456+{
15457+ vunmap(module_region);
15458+}
15459+EXPORT_SYMBOL(module_free_exec);
15460+#else
15461+void module_free_exec(struct module *mod, void *module_region)
15462+{
15463+ module_free(mod, module_region);
15464+}
15465+EXPORT_SYMBOL(module_free_exec);
15466+
15467+void *module_alloc_exec(unsigned long size)
15468+{
15469+ return __module_alloc(size, PAGE_KERNEL_RX);
15470+}
15471+EXPORT_SYMBOL(module_alloc_exec);
15472+#endif
15473+#endif
15474+
15475 /* We don't need anything special. */
15476 int module_frob_arch_sections(Elf_Ehdr *hdr,
15477 Elf_Shdr *sechdrs,
15478@@ -77,14 +121,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15479 unsigned int i;
15480 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
15481 Elf32_Sym *sym;
15482- uint32_t *location;
15483+ uint32_t *plocation, location;
15484
15485 DEBUGP("Applying relocate section %u to %u\n", relsec,
15486 sechdrs[relsec].sh_info);
15487 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
15488 /* This is where to make the change */
15489- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
15490- + rel[i].r_offset;
15491+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
15492+ location = (uint32_t)plocation;
15493+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
15494+ plocation = ktla_ktva((void *)plocation);
15495 /* This is the symbol it is referring to. Note that all
15496 undefined symbols have been resolved. */
15497 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
15498@@ -93,11 +139,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15499 switch (ELF32_R_TYPE(rel[i].r_info)) {
15500 case R_386_32:
15501 /* We add the value into the location given */
15502- *location += sym->st_value;
15503+ pax_open_kernel();
15504+ *plocation += sym->st_value;
15505+ pax_close_kernel();
15506 break;
15507 case R_386_PC32:
15508 /* Add the value, subtract its postition */
15509- *location += sym->st_value - (uint32_t)location;
15510+ pax_open_kernel();
15511+ *plocation += sym->st_value - location;
15512+ pax_close_kernel();
15513 break;
15514 default:
15515 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
15516@@ -153,21 +203,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
15517 case R_X86_64_NONE:
15518 break;
15519 case R_X86_64_64:
15520+ pax_open_kernel();
15521 *(u64 *)loc = val;
15522+ pax_close_kernel();
15523 break;
15524 case R_X86_64_32:
15525+ pax_open_kernel();
15526 *(u32 *)loc = val;
15527+ pax_close_kernel();
15528 if (val != *(u32 *)loc)
15529 goto overflow;
15530 break;
15531 case R_X86_64_32S:
15532+ pax_open_kernel();
15533 *(s32 *)loc = val;
15534+ pax_close_kernel();
15535 if ((s64)val != *(s32 *)loc)
15536 goto overflow;
15537 break;
15538 case R_X86_64_PC32:
15539 val -= (u64)loc;
15540+ pax_open_kernel();
15541 *(u32 *)loc = val;
15542+ pax_close_kernel();
15543+
15544 #if 0
15545 if ((s64)val != *(s32 *)loc)
15546 goto overflow;
15547diff -urNp linux-2.6.32.42/arch/x86/kernel/paravirt.c linux-2.6.32.42/arch/x86/kernel/paravirt.c
15548--- linux-2.6.32.42/arch/x86/kernel/paravirt.c 2011-03-27 14:31:47.000000000 -0400
15549+++ linux-2.6.32.42/arch/x86/kernel/paravirt.c 2011-05-16 21:46:57.000000000 -0400
15550@@ -122,7 +122,7 @@ unsigned paravirt_patch_jmp(void *insnbu
15551 * corresponding structure. */
15552 static void *get_call_destination(u8 type)
15553 {
15554- struct paravirt_patch_template tmpl = {
15555+ const struct paravirt_patch_template tmpl = {
15556 .pv_init_ops = pv_init_ops,
15557 .pv_time_ops = pv_time_ops,
15558 .pv_cpu_ops = pv_cpu_ops,
15559@@ -133,6 +133,9 @@ static void *get_call_destination(u8 typ
15560 .pv_lock_ops = pv_lock_ops,
15561 #endif
15562 };
15563+
15564+ pax_track_stack();
15565+
15566 return *((void **)&tmpl + type);
15567 }
15568
15569@@ -145,14 +148,14 @@ unsigned paravirt_patch_default(u8 type,
15570 if (opfunc == NULL)
15571 /* If there's no function, patch it with a ud2a (BUG) */
15572 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
15573- else if (opfunc == _paravirt_nop)
15574+ else if (opfunc == (void *)_paravirt_nop)
15575 /* If the operation is a nop, then nop the callsite */
15576 ret = paravirt_patch_nop();
15577
15578 /* identity functions just return their single argument */
15579- else if (opfunc == _paravirt_ident_32)
15580+ else if (opfunc == (void *)_paravirt_ident_32)
15581 ret = paravirt_patch_ident_32(insnbuf, len);
15582- else if (opfunc == _paravirt_ident_64)
15583+ else if (opfunc == (void *)_paravirt_ident_64)
15584 ret = paravirt_patch_ident_64(insnbuf, len);
15585
15586 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
15587@@ -178,7 +181,7 @@ unsigned paravirt_patch_insns(void *insn
15588 if (insn_len > len || start == NULL)
15589 insn_len = len;
15590 else
15591- memcpy(insnbuf, start, insn_len);
15592+ memcpy(insnbuf, ktla_ktva(start), insn_len);
15593
15594 return insn_len;
15595 }
15596@@ -294,22 +297,22 @@ void arch_flush_lazy_mmu_mode(void)
15597 preempt_enable();
15598 }
15599
15600-struct pv_info pv_info = {
15601+struct pv_info pv_info __read_only = {
15602 .name = "bare hardware",
15603 .paravirt_enabled = 0,
15604 .kernel_rpl = 0,
15605 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
15606 };
15607
15608-struct pv_init_ops pv_init_ops = {
15609+struct pv_init_ops pv_init_ops __read_only = {
15610 .patch = native_patch,
15611 };
15612
15613-struct pv_time_ops pv_time_ops = {
15614+struct pv_time_ops pv_time_ops __read_only = {
15615 .sched_clock = native_sched_clock,
15616 };
15617
15618-struct pv_irq_ops pv_irq_ops = {
15619+struct pv_irq_ops pv_irq_ops __read_only = {
15620 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
15621 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
15622 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
15623@@ -321,7 +324,7 @@ struct pv_irq_ops pv_irq_ops = {
15624 #endif
15625 };
15626
15627-struct pv_cpu_ops pv_cpu_ops = {
15628+struct pv_cpu_ops pv_cpu_ops __read_only = {
15629 .cpuid = native_cpuid,
15630 .get_debugreg = native_get_debugreg,
15631 .set_debugreg = native_set_debugreg,
15632@@ -382,7 +385,7 @@ struct pv_cpu_ops pv_cpu_ops = {
15633 .end_context_switch = paravirt_nop,
15634 };
15635
15636-struct pv_apic_ops pv_apic_ops = {
15637+struct pv_apic_ops pv_apic_ops __read_only = {
15638 #ifdef CONFIG_X86_LOCAL_APIC
15639 .startup_ipi_hook = paravirt_nop,
15640 #endif
15641@@ -396,7 +399,7 @@ struct pv_apic_ops pv_apic_ops = {
15642 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
15643 #endif
15644
15645-struct pv_mmu_ops pv_mmu_ops = {
15646+struct pv_mmu_ops pv_mmu_ops __read_only = {
15647
15648 .read_cr2 = native_read_cr2,
15649 .write_cr2 = native_write_cr2,
15650@@ -467,6 +470,12 @@ struct pv_mmu_ops pv_mmu_ops = {
15651 },
15652
15653 .set_fixmap = native_set_fixmap,
15654+
15655+#ifdef CONFIG_PAX_KERNEXEC
15656+ .pax_open_kernel = native_pax_open_kernel,
15657+ .pax_close_kernel = native_pax_close_kernel,
15658+#endif
15659+
15660 };
15661
15662 EXPORT_SYMBOL_GPL(pv_time_ops);
15663diff -urNp linux-2.6.32.42/arch/x86/kernel/paravirt-spinlocks.c linux-2.6.32.42/arch/x86/kernel/paravirt-spinlocks.c
15664--- linux-2.6.32.42/arch/x86/kernel/paravirt-spinlocks.c 2011-03-27 14:31:47.000000000 -0400
15665+++ linux-2.6.32.42/arch/x86/kernel/paravirt-spinlocks.c 2011-04-17 15:56:46.000000000 -0400
15666@@ -13,7 +13,7 @@ default_spin_lock_flags(raw_spinlock_t *
15667 __raw_spin_lock(lock);
15668 }
15669
15670-struct pv_lock_ops pv_lock_ops = {
15671+struct pv_lock_ops pv_lock_ops __read_only = {
15672 #ifdef CONFIG_SMP
15673 .spin_is_locked = __ticket_spin_is_locked,
15674 .spin_is_contended = __ticket_spin_is_contended,
15675diff -urNp linux-2.6.32.42/arch/x86/kernel/pci-calgary_64.c linux-2.6.32.42/arch/x86/kernel/pci-calgary_64.c
15676--- linux-2.6.32.42/arch/x86/kernel/pci-calgary_64.c 2011-03-27 14:31:47.000000000 -0400
15677+++ linux-2.6.32.42/arch/x86/kernel/pci-calgary_64.c 2011-04-17 15:56:46.000000000 -0400
15678@@ -477,7 +477,7 @@ static void calgary_free_coherent(struct
15679 free_pages((unsigned long)vaddr, get_order(size));
15680 }
15681
15682-static struct dma_map_ops calgary_dma_ops = {
15683+static const struct dma_map_ops calgary_dma_ops = {
15684 .alloc_coherent = calgary_alloc_coherent,
15685 .free_coherent = calgary_free_coherent,
15686 .map_sg = calgary_map_sg,
15687diff -urNp linux-2.6.32.42/arch/x86/kernel/pci-dma.c linux-2.6.32.42/arch/x86/kernel/pci-dma.c
15688--- linux-2.6.32.42/arch/x86/kernel/pci-dma.c 2011-03-27 14:31:47.000000000 -0400
15689+++ linux-2.6.32.42/arch/x86/kernel/pci-dma.c 2011-04-17 15:56:46.000000000 -0400
15690@@ -14,7 +14,7 @@
15691
15692 static int forbid_dac __read_mostly;
15693
15694-struct dma_map_ops *dma_ops;
15695+const struct dma_map_ops *dma_ops;
15696 EXPORT_SYMBOL(dma_ops);
15697
15698 static int iommu_sac_force __read_mostly;
15699@@ -243,7 +243,7 @@ early_param("iommu", iommu_setup);
15700
15701 int dma_supported(struct device *dev, u64 mask)
15702 {
15703- struct dma_map_ops *ops = get_dma_ops(dev);
15704+ const struct dma_map_ops *ops = get_dma_ops(dev);
15705
15706 #ifdef CONFIG_PCI
15707 if (mask > 0xffffffff && forbid_dac > 0) {
15708diff -urNp linux-2.6.32.42/arch/x86/kernel/pci-gart_64.c linux-2.6.32.42/arch/x86/kernel/pci-gart_64.c
15709--- linux-2.6.32.42/arch/x86/kernel/pci-gart_64.c 2011-03-27 14:31:47.000000000 -0400
15710+++ linux-2.6.32.42/arch/x86/kernel/pci-gart_64.c 2011-04-17 15:56:46.000000000 -0400
15711@@ -682,7 +682,7 @@ static __init int init_k8_gatt(struct ag
15712 return -1;
15713 }
15714
15715-static struct dma_map_ops gart_dma_ops = {
15716+static const struct dma_map_ops gart_dma_ops = {
15717 .map_sg = gart_map_sg,
15718 .unmap_sg = gart_unmap_sg,
15719 .map_page = gart_map_page,
15720diff -urNp linux-2.6.32.42/arch/x86/kernel/pci-nommu.c linux-2.6.32.42/arch/x86/kernel/pci-nommu.c
15721--- linux-2.6.32.42/arch/x86/kernel/pci-nommu.c 2011-03-27 14:31:47.000000000 -0400
15722+++ linux-2.6.32.42/arch/x86/kernel/pci-nommu.c 2011-04-17 15:56:46.000000000 -0400
15723@@ -94,7 +94,7 @@ static void nommu_sync_sg_for_device(str
15724 flush_write_buffers();
15725 }
15726
15727-struct dma_map_ops nommu_dma_ops = {
15728+const struct dma_map_ops nommu_dma_ops = {
15729 .alloc_coherent = dma_generic_alloc_coherent,
15730 .free_coherent = nommu_free_coherent,
15731 .map_sg = nommu_map_sg,
15732diff -urNp linux-2.6.32.42/arch/x86/kernel/pci-swiotlb.c linux-2.6.32.42/arch/x86/kernel/pci-swiotlb.c
15733--- linux-2.6.32.42/arch/x86/kernel/pci-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
15734+++ linux-2.6.32.42/arch/x86/kernel/pci-swiotlb.c 2011-04-17 15:56:46.000000000 -0400
15735@@ -25,7 +25,7 @@ static void *x86_swiotlb_alloc_coherent(
15736 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
15737 }
15738
15739-static struct dma_map_ops swiotlb_dma_ops = {
15740+static const struct dma_map_ops swiotlb_dma_ops = {
15741 .mapping_error = swiotlb_dma_mapping_error,
15742 .alloc_coherent = x86_swiotlb_alloc_coherent,
15743 .free_coherent = swiotlb_free_coherent,
15744diff -urNp linux-2.6.32.42/arch/x86/kernel/process_32.c linux-2.6.32.42/arch/x86/kernel/process_32.c
15745--- linux-2.6.32.42/arch/x86/kernel/process_32.c 2011-06-25 12:55:34.000000000 -0400
15746+++ linux-2.6.32.42/arch/x86/kernel/process_32.c 2011-06-25 12:56:37.000000000 -0400
15747@@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __as
15748 unsigned long thread_saved_pc(struct task_struct *tsk)
15749 {
15750 return ((unsigned long *)tsk->thread.sp)[3];
15751+//XXX return tsk->thread.eip;
15752 }
15753
15754 #ifndef CONFIG_SMP
15755@@ -129,15 +130,14 @@ void __show_regs(struct pt_regs *regs, i
15756 unsigned short ss, gs;
15757 const char *board;
15758
15759- if (user_mode_vm(regs)) {
15760+ if (user_mode(regs)) {
15761 sp = regs->sp;
15762 ss = regs->ss & 0xffff;
15763- gs = get_user_gs(regs);
15764 } else {
15765 sp = (unsigned long) (&regs->sp);
15766 savesegment(ss, ss);
15767- savesegment(gs, gs);
15768 }
15769+ gs = get_user_gs(regs);
15770
15771 printk("\n");
15772
15773@@ -210,10 +210,10 @@ int kernel_thread(int (*fn)(void *), voi
15774 regs.bx = (unsigned long) fn;
15775 regs.dx = (unsigned long) arg;
15776
15777- regs.ds = __USER_DS;
15778- regs.es = __USER_DS;
15779+ regs.ds = __KERNEL_DS;
15780+ regs.es = __KERNEL_DS;
15781 regs.fs = __KERNEL_PERCPU;
15782- regs.gs = __KERNEL_STACK_CANARY;
15783+ savesegment(gs, regs.gs);
15784 regs.orig_ax = -1;
15785 regs.ip = (unsigned long) kernel_thread_helper;
15786 regs.cs = __KERNEL_CS | get_kernel_rpl();
15787@@ -247,13 +247,14 @@ int copy_thread(unsigned long clone_flag
15788 struct task_struct *tsk;
15789 int err;
15790
15791- childregs = task_pt_regs(p);
15792+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
15793 *childregs = *regs;
15794 childregs->ax = 0;
15795 childregs->sp = sp;
15796
15797 p->thread.sp = (unsigned long) childregs;
15798 p->thread.sp0 = (unsigned long) (childregs+1);
15799+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
15800
15801 p->thread.ip = (unsigned long) ret_from_fork;
15802
15803@@ -345,7 +346,7 @@ __switch_to(struct task_struct *prev_p,
15804 struct thread_struct *prev = &prev_p->thread,
15805 *next = &next_p->thread;
15806 int cpu = smp_processor_id();
15807- struct tss_struct *tss = &per_cpu(init_tss, cpu);
15808+ struct tss_struct *tss = init_tss + cpu;
15809 bool preload_fpu;
15810
15811 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
15812@@ -380,6 +381,10 @@ __switch_to(struct task_struct *prev_p,
15813 */
15814 lazy_save_gs(prev->gs);
15815
15816+#ifdef CONFIG_PAX_MEMORY_UDEREF
15817+ __set_fs(task_thread_info(next_p)->addr_limit);
15818+#endif
15819+
15820 /*
15821 * Load the per-thread Thread-Local Storage descriptor.
15822 */
15823@@ -415,6 +420,9 @@ __switch_to(struct task_struct *prev_p,
15824 */
15825 arch_end_context_switch(next_p);
15826
15827+ percpu_write(current_task, next_p);
15828+ percpu_write(current_tinfo, &next_p->tinfo);
15829+
15830 if (preload_fpu)
15831 __math_state_restore();
15832
15833@@ -424,8 +432,6 @@ __switch_to(struct task_struct *prev_p,
15834 if (prev->gs | next->gs)
15835 lazy_load_gs(next->gs);
15836
15837- percpu_write(current_task, next_p);
15838-
15839 return prev_p;
15840 }
15841
15842@@ -495,4 +501,3 @@ unsigned long get_wchan(struct task_stru
15843 } while (count++ < 16);
15844 return 0;
15845 }
15846-
15847diff -urNp linux-2.6.32.42/arch/x86/kernel/process_64.c linux-2.6.32.42/arch/x86/kernel/process_64.c
15848--- linux-2.6.32.42/arch/x86/kernel/process_64.c 2011-06-25 12:55:34.000000000 -0400
15849+++ linux-2.6.32.42/arch/x86/kernel/process_64.c 2011-06-25 12:56:37.000000000 -0400
15850@@ -91,7 +91,7 @@ static void __exit_idle(void)
15851 void exit_idle(void)
15852 {
15853 /* idle loop has pid 0 */
15854- if (current->pid)
15855+ if (task_pid_nr(current))
15856 return;
15857 __exit_idle();
15858 }
15859@@ -170,7 +170,7 @@ void __show_regs(struct pt_regs *regs, i
15860 if (!board)
15861 board = "";
15862 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n",
15863- current->pid, current->comm, print_tainted(),
15864+ task_pid_nr(current), current->comm, print_tainted(),
15865 init_utsname()->release,
15866 (int)strcspn(init_utsname()->version, " "),
15867 init_utsname()->version, board);
15868@@ -280,8 +280,7 @@ int copy_thread(unsigned long clone_flag
15869 struct pt_regs *childregs;
15870 struct task_struct *me = current;
15871
15872- childregs = ((struct pt_regs *)
15873- (THREAD_SIZE + task_stack_page(p))) - 1;
15874+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
15875 *childregs = *regs;
15876
15877 childregs->ax = 0;
15878@@ -292,6 +291,7 @@ int copy_thread(unsigned long clone_flag
15879 p->thread.sp = (unsigned long) childregs;
15880 p->thread.sp0 = (unsigned long) (childregs+1);
15881 p->thread.usersp = me->thread.usersp;
15882+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
15883
15884 set_tsk_thread_flag(p, TIF_FORK);
15885
15886@@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p,
15887 struct thread_struct *prev = &prev_p->thread;
15888 struct thread_struct *next = &next_p->thread;
15889 int cpu = smp_processor_id();
15890- struct tss_struct *tss = &per_cpu(init_tss, cpu);
15891+ struct tss_struct *tss = init_tss + cpu;
15892 unsigned fsindex, gsindex;
15893 bool preload_fpu;
15894
15895@@ -475,10 +475,9 @@ __switch_to(struct task_struct *prev_p,
15896 prev->usersp = percpu_read(old_rsp);
15897 percpu_write(old_rsp, next->usersp);
15898 percpu_write(current_task, next_p);
15899+ percpu_write(current_tinfo, &next_p->tinfo);
15900
15901- percpu_write(kernel_stack,
15902- (unsigned long)task_stack_page(next_p) +
15903- THREAD_SIZE - KERNEL_STACK_OFFSET);
15904+ percpu_write(kernel_stack, next->sp0);
15905
15906 /*
15907 * Now maybe reload the debug registers and handle I/O bitmaps
15908@@ -559,12 +558,11 @@ unsigned long get_wchan(struct task_stru
15909 if (!p || p == current || p->state == TASK_RUNNING)
15910 return 0;
15911 stack = (unsigned long)task_stack_page(p);
15912- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
15913+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
15914 return 0;
15915 fp = *(u64 *)(p->thread.sp);
15916 do {
15917- if (fp < (unsigned long)stack ||
15918- fp >= (unsigned long)stack+THREAD_SIZE)
15919+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
15920 return 0;
15921 ip = *(u64 *)(fp+8);
15922 if (!in_sched_functions(ip))
15923diff -urNp linux-2.6.32.42/arch/x86/kernel/process.c linux-2.6.32.42/arch/x86/kernel/process.c
15924--- linux-2.6.32.42/arch/x86/kernel/process.c 2011-04-22 19:16:29.000000000 -0400
15925+++ linux-2.6.32.42/arch/x86/kernel/process.c 2011-05-22 23:02:03.000000000 -0400
15926@@ -51,16 +51,33 @@ void free_thread_xstate(struct task_stru
15927
15928 void free_thread_info(struct thread_info *ti)
15929 {
15930- free_thread_xstate(ti->task);
15931 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
15932 }
15933
15934+static struct kmem_cache *task_struct_cachep;
15935+
15936 void arch_task_cache_init(void)
15937 {
15938- task_xstate_cachep =
15939- kmem_cache_create("task_xstate", xstate_size,
15940+ /* create a slab on which task_structs can be allocated */
15941+ task_struct_cachep =
15942+ kmem_cache_create("task_struct", sizeof(struct task_struct),
15943+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
15944+
15945+ task_xstate_cachep =
15946+ kmem_cache_create("task_xstate", xstate_size,
15947 __alignof__(union thread_xstate),
15948- SLAB_PANIC | SLAB_NOTRACK, NULL);
15949+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
15950+}
15951+
15952+struct task_struct *alloc_task_struct(void)
15953+{
15954+ return kmem_cache_alloc(task_struct_cachep, GFP_KERNEL);
15955+}
15956+
15957+void free_task_struct(struct task_struct *task)
15958+{
15959+ free_thread_xstate(task);
15960+ kmem_cache_free(task_struct_cachep, task);
15961 }
15962
15963 /*
15964@@ -73,7 +90,7 @@ void exit_thread(void)
15965 unsigned long *bp = t->io_bitmap_ptr;
15966
15967 if (bp) {
15968- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
15969+ struct tss_struct *tss = init_tss + get_cpu();
15970
15971 t->io_bitmap_ptr = NULL;
15972 clear_thread_flag(TIF_IO_BITMAP);
15973@@ -93,6 +110,9 @@ void flush_thread(void)
15974
15975 clear_tsk_thread_flag(tsk, TIF_DEBUG);
15976
15977+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
15978+ loadsegment(gs, 0);
15979+#endif
15980 tsk->thread.debugreg0 = 0;
15981 tsk->thread.debugreg1 = 0;
15982 tsk->thread.debugreg2 = 0;
15983@@ -307,7 +327,7 @@ void default_idle(void)
15984 EXPORT_SYMBOL(default_idle);
15985 #endif
15986
15987-void stop_this_cpu(void *dummy)
15988+__noreturn void stop_this_cpu(void *dummy)
15989 {
15990 local_irq_disable();
15991 /*
15992@@ -568,16 +588,35 @@ static int __init idle_setup(char *str)
15993 }
15994 early_param("idle", idle_setup);
15995
15996-unsigned long arch_align_stack(unsigned long sp)
15997+#ifdef CONFIG_PAX_RANDKSTACK
15998+asmlinkage void pax_randomize_kstack(void)
15999 {
16000- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
16001- sp -= get_random_int() % 8192;
16002- return sp & ~0xf;
16003-}
16004+ struct thread_struct *thread = &current->thread;
16005+ unsigned long time;
16006
16007-unsigned long arch_randomize_brk(struct mm_struct *mm)
16008-{
16009- unsigned long range_end = mm->brk + 0x02000000;
16010- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
16011+ if (!randomize_va_space)
16012+ return;
16013+
16014+ rdtscl(time);
16015+
16016+ /* P4 seems to return a 0 LSB, ignore it */
16017+#ifdef CONFIG_MPENTIUM4
16018+ time &= 0x3EUL;
16019+ time <<= 2;
16020+#elif defined(CONFIG_X86_64)
16021+ time &= 0xFUL;
16022+ time <<= 4;
16023+#else
16024+ time &= 0x1FUL;
16025+ time <<= 3;
16026+#endif
16027+
16028+ thread->sp0 ^= time;
16029+ load_sp0(init_tss + smp_processor_id(), thread);
16030+
16031+#ifdef CONFIG_X86_64
16032+ percpu_write(kernel_stack, thread->sp0);
16033+#endif
16034 }
16035+#endif
16036
16037diff -urNp linux-2.6.32.42/arch/x86/kernel/ptrace.c linux-2.6.32.42/arch/x86/kernel/ptrace.c
16038--- linux-2.6.32.42/arch/x86/kernel/ptrace.c 2011-03-27 14:31:47.000000000 -0400
16039+++ linux-2.6.32.42/arch/x86/kernel/ptrace.c 2011-04-17 15:56:46.000000000 -0400
16040@@ -925,7 +925,7 @@ static const struct user_regset_view use
16041 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
16042 {
16043 int ret;
16044- unsigned long __user *datap = (unsigned long __user *)data;
16045+ unsigned long __user *datap = (__force unsigned long __user *)data;
16046
16047 switch (request) {
16048 /* read the word at location addr in the USER area. */
16049@@ -1012,14 +1012,14 @@ long arch_ptrace(struct task_struct *chi
16050 if (addr < 0)
16051 return -EIO;
16052 ret = do_get_thread_area(child, addr,
16053- (struct user_desc __user *) data);
16054+ (__force struct user_desc __user *) data);
16055 break;
16056
16057 case PTRACE_SET_THREAD_AREA:
16058 if (addr < 0)
16059 return -EIO;
16060 ret = do_set_thread_area(child, addr,
16061- (struct user_desc __user *) data, 0);
16062+ (__force struct user_desc __user *) data, 0);
16063 break;
16064 #endif
16065
16066@@ -1038,12 +1038,12 @@ long arch_ptrace(struct task_struct *chi
16067 #ifdef CONFIG_X86_PTRACE_BTS
16068 case PTRACE_BTS_CONFIG:
16069 ret = ptrace_bts_config
16070- (child, data, (struct ptrace_bts_config __user *)addr);
16071+ (child, data, (__force struct ptrace_bts_config __user *)addr);
16072 break;
16073
16074 case PTRACE_BTS_STATUS:
16075 ret = ptrace_bts_status
16076- (child, data, (struct ptrace_bts_config __user *)addr);
16077+ (child, data, (__force struct ptrace_bts_config __user *)addr);
16078 break;
16079
16080 case PTRACE_BTS_SIZE:
16081@@ -1052,7 +1052,7 @@ long arch_ptrace(struct task_struct *chi
16082
16083 case PTRACE_BTS_GET:
16084 ret = ptrace_bts_read_record
16085- (child, data, (struct bts_struct __user *) addr);
16086+ (child, data, (__force struct bts_struct __user *) addr);
16087 break;
16088
16089 case PTRACE_BTS_CLEAR:
16090@@ -1061,7 +1061,7 @@ long arch_ptrace(struct task_struct *chi
16091
16092 case PTRACE_BTS_DRAIN:
16093 ret = ptrace_bts_drain
16094- (child, data, (struct bts_struct __user *) addr);
16095+ (child, data, (__force struct bts_struct __user *) addr);
16096 break;
16097 #endif /* CONFIG_X86_PTRACE_BTS */
16098
16099@@ -1450,7 +1450,7 @@ void send_sigtrap(struct task_struct *ts
16100 info.si_code = si_code;
16101
16102 /* User-mode ip? */
16103- info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
16104+ info.si_addr = user_mode(regs) ? (__force void __user *) regs->ip : NULL;
16105
16106 /* Send us the fake SIGTRAP */
16107 force_sig_info(SIGTRAP, &info, tsk);
16108@@ -1469,7 +1469,7 @@ void send_sigtrap(struct task_struct *ts
16109 * We must return the syscall number to actually look up in the table.
16110 * This can be -1L to skip running any syscall at all.
16111 */
16112-asmregparm long syscall_trace_enter(struct pt_regs *regs)
16113+long syscall_trace_enter(struct pt_regs *regs)
16114 {
16115 long ret = 0;
16116
16117@@ -1514,7 +1514,7 @@ asmregparm long syscall_trace_enter(stru
16118 return ret ?: regs->orig_ax;
16119 }
16120
16121-asmregparm void syscall_trace_leave(struct pt_regs *regs)
16122+void syscall_trace_leave(struct pt_regs *regs)
16123 {
16124 if (unlikely(current->audit_context))
16125 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
16126diff -urNp linux-2.6.32.42/arch/x86/kernel/reboot.c linux-2.6.32.42/arch/x86/kernel/reboot.c
16127--- linux-2.6.32.42/arch/x86/kernel/reboot.c 2011-03-27 14:31:47.000000000 -0400
16128+++ linux-2.6.32.42/arch/x86/kernel/reboot.c 2011-05-22 23:02:03.000000000 -0400
16129@@ -33,7 +33,7 @@ void (*pm_power_off)(void);
16130 EXPORT_SYMBOL(pm_power_off);
16131
16132 static const struct desc_ptr no_idt = {};
16133-static int reboot_mode;
16134+static unsigned short reboot_mode;
16135 enum reboot_type reboot_type = BOOT_KBD;
16136 int reboot_force;
16137
16138@@ -292,12 +292,12 @@ core_initcall(reboot_init);
16139 controller to pulse the CPU reset line, which is more thorough, but
16140 doesn't work with at least one type of 486 motherboard. It is easy
16141 to stop this code working; hence the copious comments. */
16142-static const unsigned long long
16143-real_mode_gdt_entries [3] =
16144+static struct desc_struct
16145+real_mode_gdt_entries [3] __read_only =
16146 {
16147- 0x0000000000000000ULL, /* Null descriptor */
16148- 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
16149- 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
16150+ GDT_ENTRY_INIT(0, 0, 0), /* Null descriptor */
16151+ GDT_ENTRY_INIT(0x9b, 0, 0xffff), /* 16-bit real-mode 64k code at 0x00000000 */
16152+ GDT_ENTRY_INIT(0x93, 0x100, 0xffff) /* 16-bit real-mode 64k data at 0x00000100 */
16153 };
16154
16155 static const struct desc_ptr
16156@@ -346,7 +346,7 @@ static const unsigned char jump_to_bios
16157 * specified by the code and length parameters.
16158 * We assume that length will aways be less that 100!
16159 */
16160-void machine_real_restart(const unsigned char *code, int length)
16161+__noreturn void machine_real_restart(const unsigned char *code, unsigned int length)
16162 {
16163 local_irq_disable();
16164
16165@@ -366,8 +366,8 @@ void machine_real_restart(const unsigned
16166 /* Remap the kernel at virtual address zero, as well as offset zero
16167 from the kernel segment. This assumes the kernel segment starts at
16168 virtual address PAGE_OFFSET. */
16169- memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16170- sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
16171+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16172+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
16173
16174 /*
16175 * Use `swapper_pg_dir' as our page directory.
16176@@ -379,16 +379,15 @@ void machine_real_restart(const unsigned
16177 boot)". This seems like a fairly standard thing that gets set by
16178 REBOOT.COM programs, and the previous reset routine did this
16179 too. */
16180- *((unsigned short *)0x472) = reboot_mode;
16181+ *(unsigned short *)(__va(0x472)) = reboot_mode;
16182
16183 /* For the switch to real mode, copy some code to low memory. It has
16184 to be in the first 64k because it is running in 16-bit mode, and it
16185 has to have the same physical and virtual address, because it turns
16186 off paging. Copy it near the end of the first page, out of the way
16187 of BIOS variables. */
16188- memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100),
16189- real_mode_switch, sizeof (real_mode_switch));
16190- memcpy((void *)(0x1000 - 100), code, length);
16191+ memcpy(__va(0x1000 - sizeof (real_mode_switch) - 100), real_mode_switch, sizeof (real_mode_switch));
16192+ memcpy(__va(0x1000 - 100), code, length);
16193
16194 /* Set up the IDT for real mode. */
16195 load_idt(&real_mode_idt);
16196@@ -416,6 +415,7 @@ void machine_real_restart(const unsigned
16197 __asm__ __volatile__ ("ljmp $0x0008,%0"
16198 :
16199 : "i" ((void *)(0x1000 - sizeof (real_mode_switch) - 100)));
16200+ do { } while (1);
16201 }
16202 #ifdef CONFIG_APM_MODULE
16203 EXPORT_SYMBOL(machine_real_restart);
16204@@ -536,7 +536,7 @@ void __attribute__((weak)) mach_reboot_f
16205 {
16206 }
16207
16208-static void native_machine_emergency_restart(void)
16209+__noreturn static void native_machine_emergency_restart(void)
16210 {
16211 int i;
16212
16213@@ -651,13 +651,13 @@ void native_machine_shutdown(void)
16214 #endif
16215 }
16216
16217-static void __machine_emergency_restart(int emergency)
16218+static __noreturn void __machine_emergency_restart(int emergency)
16219 {
16220 reboot_emergency = emergency;
16221 machine_ops.emergency_restart();
16222 }
16223
16224-static void native_machine_restart(char *__unused)
16225+static __noreturn void native_machine_restart(char *__unused)
16226 {
16227 printk("machine restart\n");
16228
16229@@ -666,7 +666,7 @@ static void native_machine_restart(char
16230 __machine_emergency_restart(0);
16231 }
16232
16233-static void native_machine_halt(void)
16234+static __noreturn void native_machine_halt(void)
16235 {
16236 /* stop other cpus and apics */
16237 machine_shutdown();
16238@@ -677,7 +677,7 @@ static void native_machine_halt(void)
16239 stop_this_cpu(NULL);
16240 }
16241
16242-static void native_machine_power_off(void)
16243+__noreturn static void native_machine_power_off(void)
16244 {
16245 if (pm_power_off) {
16246 if (!reboot_force)
16247@@ -686,6 +686,7 @@ static void native_machine_power_off(voi
16248 }
16249 /* a fallback in case there is no PM info available */
16250 tboot_shutdown(TB_SHUTDOWN_HALT);
16251+ do { } while (1);
16252 }
16253
16254 struct machine_ops machine_ops = {
16255diff -urNp linux-2.6.32.42/arch/x86/kernel/setup.c linux-2.6.32.42/arch/x86/kernel/setup.c
16256--- linux-2.6.32.42/arch/x86/kernel/setup.c 2011-04-17 17:00:52.000000000 -0400
16257+++ linux-2.6.32.42/arch/x86/kernel/setup.c 2011-04-17 17:03:05.000000000 -0400
16258@@ -783,14 +783,14 @@ void __init setup_arch(char **cmdline_p)
16259
16260 if (!boot_params.hdr.root_flags)
16261 root_mountflags &= ~MS_RDONLY;
16262- init_mm.start_code = (unsigned long) _text;
16263- init_mm.end_code = (unsigned long) _etext;
16264+ init_mm.start_code = ktla_ktva((unsigned long) _text);
16265+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
16266 init_mm.end_data = (unsigned long) _edata;
16267 init_mm.brk = _brk_end;
16268
16269- code_resource.start = virt_to_phys(_text);
16270- code_resource.end = virt_to_phys(_etext)-1;
16271- data_resource.start = virt_to_phys(_etext);
16272+ code_resource.start = virt_to_phys(ktla_ktva(_text));
16273+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
16274+ data_resource.start = virt_to_phys(_sdata);
16275 data_resource.end = virt_to_phys(_edata)-1;
16276 bss_resource.start = virt_to_phys(&__bss_start);
16277 bss_resource.end = virt_to_phys(&__bss_stop)-1;
16278diff -urNp linux-2.6.32.42/arch/x86/kernel/setup_percpu.c linux-2.6.32.42/arch/x86/kernel/setup_percpu.c
16279--- linux-2.6.32.42/arch/x86/kernel/setup_percpu.c 2011-03-27 14:31:47.000000000 -0400
16280+++ linux-2.6.32.42/arch/x86/kernel/setup_percpu.c 2011-06-04 20:36:29.000000000 -0400
16281@@ -25,19 +25,17 @@
16282 # define DBG(x...)
16283 #endif
16284
16285-DEFINE_PER_CPU(int, cpu_number);
16286+#ifdef CONFIG_SMP
16287+DEFINE_PER_CPU(unsigned int, cpu_number);
16288 EXPORT_PER_CPU_SYMBOL(cpu_number);
16289+#endif
16290
16291-#ifdef CONFIG_X86_64
16292 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
16293-#else
16294-#define BOOT_PERCPU_OFFSET 0
16295-#endif
16296
16297 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
16298 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
16299
16300-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
16301+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
16302 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
16303 };
16304 EXPORT_SYMBOL(__per_cpu_offset);
16305@@ -159,10 +157,10 @@ static inline void setup_percpu_segment(
16306 {
16307 #ifdef CONFIG_X86_32
16308 struct desc_struct gdt;
16309+ unsigned long base = per_cpu_offset(cpu);
16310
16311- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
16312- 0x2 | DESCTYPE_S, 0x8);
16313- gdt.s = 1;
16314+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
16315+ 0x83 | DESCTYPE_S, 0xC);
16316 write_gdt_entry(get_cpu_gdt_table(cpu),
16317 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
16318 #endif
16319@@ -212,6 +210,11 @@ void __init setup_per_cpu_areas(void)
16320 /* alrighty, percpu areas up and running */
16321 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
16322 for_each_possible_cpu(cpu) {
16323+#ifdef CONFIG_CC_STACKPROTECTOR
16324+#ifdef CONFIG_X86_32
16325+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
16326+#endif
16327+#endif
16328 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
16329 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
16330 per_cpu(cpu_number, cpu) = cpu;
16331@@ -239,6 +242,12 @@ void __init setup_per_cpu_areas(void)
16332 early_per_cpu_map(x86_cpu_to_node_map, cpu);
16333 #endif
16334 #endif
16335+#ifdef CONFIG_CC_STACKPROTECTOR
16336+#ifdef CONFIG_X86_32
16337+ if (!cpu)
16338+ per_cpu(stack_canary.canary, cpu) = canary;
16339+#endif
16340+#endif
16341 /*
16342 * Up to this point, the boot CPU has been using .data.init
16343 * area. Reload any changed state for the boot CPU.
16344diff -urNp linux-2.6.32.42/arch/x86/kernel/signal.c linux-2.6.32.42/arch/x86/kernel/signal.c
16345--- linux-2.6.32.42/arch/x86/kernel/signal.c 2011-03-27 14:31:47.000000000 -0400
16346+++ linux-2.6.32.42/arch/x86/kernel/signal.c 2011-05-22 23:02:03.000000000 -0400
16347@@ -197,7 +197,7 @@ static unsigned long align_sigframe(unsi
16348 * Align the stack pointer according to the i386 ABI,
16349 * i.e. so that on function entry ((sp + 4) & 15) == 0.
16350 */
16351- sp = ((sp + 4) & -16ul) - 4;
16352+ sp = ((sp - 12) & -16ul) - 4;
16353 #else /* !CONFIG_X86_32 */
16354 sp = round_down(sp, 16) - 8;
16355 #endif
16356@@ -248,11 +248,11 @@ get_sigframe(struct k_sigaction *ka, str
16357 * Return an always-bogus address instead so we will die with SIGSEGV.
16358 */
16359 if (onsigstack && !likely(on_sig_stack(sp)))
16360- return (void __user *)-1L;
16361+ return (__force void __user *)-1L;
16362
16363 /* save i387 state */
16364 if (used_math() && save_i387_xstate(*fpstate) < 0)
16365- return (void __user *)-1L;
16366+ return (__force void __user *)-1L;
16367
16368 return (void __user *)sp;
16369 }
16370@@ -307,9 +307,9 @@ __setup_frame(int sig, struct k_sigactio
16371 }
16372
16373 if (current->mm->context.vdso)
16374- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16375+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16376 else
16377- restorer = &frame->retcode;
16378+ restorer = (void __user *)&frame->retcode;
16379 if (ka->sa.sa_flags & SA_RESTORER)
16380 restorer = ka->sa.sa_restorer;
16381
16382@@ -323,7 +323,7 @@ __setup_frame(int sig, struct k_sigactio
16383 * reasons and because gdb uses it as a signature to notice
16384 * signal handler stack frames.
16385 */
16386- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
16387+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
16388
16389 if (err)
16390 return -EFAULT;
16391@@ -377,7 +377,10 @@ static int __setup_rt_frame(int sig, str
16392 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
16393
16394 /* Set up to return from userspace. */
16395- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16396+ if (current->mm->context.vdso)
16397+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16398+ else
16399+ restorer = (void __user *)&frame->retcode;
16400 if (ka->sa.sa_flags & SA_RESTORER)
16401 restorer = ka->sa.sa_restorer;
16402 put_user_ex(restorer, &frame->pretcode);
16403@@ -389,7 +392,7 @@ static int __setup_rt_frame(int sig, str
16404 * reasons and because gdb uses it as a signature to notice
16405 * signal handler stack frames.
16406 */
16407- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
16408+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
16409 } put_user_catch(err);
16410
16411 if (err)
16412@@ -782,6 +785,8 @@ static void do_signal(struct pt_regs *re
16413 int signr;
16414 sigset_t *oldset;
16415
16416+ pax_track_stack();
16417+
16418 /*
16419 * We want the common case to go fast, which is why we may in certain
16420 * cases get here from kernel mode. Just return without doing anything
16421@@ -789,7 +794,7 @@ static void do_signal(struct pt_regs *re
16422 * X86_32: vm86 regs switched out by assembly code before reaching
16423 * here, so testing against kernel CS suffices.
16424 */
16425- if (!user_mode(regs))
16426+ if (!user_mode_novm(regs))
16427 return;
16428
16429 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
16430diff -urNp linux-2.6.32.42/arch/x86/kernel/smpboot.c linux-2.6.32.42/arch/x86/kernel/smpboot.c
16431--- linux-2.6.32.42/arch/x86/kernel/smpboot.c 2011-03-27 14:31:47.000000000 -0400
16432+++ linux-2.6.32.42/arch/x86/kernel/smpboot.c 2011-07-01 19:10:03.000000000 -0400
16433@@ -94,14 +94,14 @@ static DEFINE_PER_CPU(struct task_struct
16434 */
16435 static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
16436
16437-void cpu_hotplug_driver_lock()
16438+void cpu_hotplug_driver_lock(void)
16439 {
16440- mutex_lock(&x86_cpu_hotplug_driver_mutex);
16441+ mutex_lock(&x86_cpu_hotplug_driver_mutex);
16442 }
16443
16444-void cpu_hotplug_driver_unlock()
16445+void cpu_hotplug_driver_unlock(void)
16446 {
16447- mutex_unlock(&x86_cpu_hotplug_driver_mutex);
16448+ mutex_unlock(&x86_cpu_hotplug_driver_mutex);
16449 }
16450
16451 ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
16452@@ -625,7 +625,7 @@ wakeup_secondary_cpu_via_init(int phys_a
16453 * target processor state.
16454 */
16455 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
16456- (unsigned long)stack_start.sp);
16457+ stack_start);
16458
16459 /*
16460 * Run STARTUP IPI loop.
16461@@ -743,6 +743,7 @@ static int __cpuinit do_boot_cpu(int api
16462 set_idle_for_cpu(cpu, c_idle.idle);
16463 do_rest:
16464 per_cpu(current_task, cpu) = c_idle.idle;
16465+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
16466 #ifdef CONFIG_X86_32
16467 /* Stack for startup_32 can be just as for start_secondary onwards */
16468 irq_ctx_init(cpu);
16469@@ -750,13 +751,15 @@ do_rest:
16470 #else
16471 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
16472 initial_gs = per_cpu_offset(cpu);
16473- per_cpu(kernel_stack, cpu) =
16474- (unsigned long)task_stack_page(c_idle.idle) -
16475- KERNEL_STACK_OFFSET + THREAD_SIZE;
16476+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
16477 #endif
16478+
16479+ pax_open_kernel();
16480 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
16481+ pax_close_kernel();
16482+
16483 initial_code = (unsigned long)start_secondary;
16484- stack_start.sp = (void *) c_idle.idle->thread.sp;
16485+ stack_start = c_idle.idle->thread.sp;
16486
16487 /* start_ip had better be page-aligned! */
16488 start_ip = setup_trampoline();
16489@@ -891,6 +894,12 @@ int __cpuinit native_cpu_up(unsigned int
16490
16491 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
16492
16493+#ifdef CONFIG_PAX_PER_CPU_PGD
16494+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
16495+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16496+ KERNEL_PGD_PTRS);
16497+#endif
16498+
16499 err = do_boot_cpu(apicid, cpu);
16500
16501 if (err) {
16502diff -urNp linux-2.6.32.42/arch/x86/kernel/step.c linux-2.6.32.42/arch/x86/kernel/step.c
16503--- linux-2.6.32.42/arch/x86/kernel/step.c 2011-03-27 14:31:47.000000000 -0400
16504+++ linux-2.6.32.42/arch/x86/kernel/step.c 2011-04-17 15:56:46.000000000 -0400
16505@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
16506 struct desc_struct *desc;
16507 unsigned long base;
16508
16509- seg &= ~7UL;
16510+ seg >>= 3;
16511
16512 mutex_lock(&child->mm->context.lock);
16513- if (unlikely((seg >> 3) >= child->mm->context.size))
16514+ if (unlikely(seg >= child->mm->context.size))
16515 addr = -1L; /* bogus selector, access would fault */
16516 else {
16517 desc = child->mm->context.ldt + seg;
16518@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
16519 addr += base;
16520 }
16521 mutex_unlock(&child->mm->context.lock);
16522- }
16523+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
16524+ addr = ktla_ktva(addr);
16525
16526 return addr;
16527 }
16528@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
16529 unsigned char opcode[15];
16530 unsigned long addr = convert_ip_to_linear(child, regs);
16531
16532+ if (addr == -EINVAL)
16533+ return 0;
16534+
16535 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
16536 for (i = 0; i < copied; i++) {
16537 switch (opcode[i]) {
16538@@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct t
16539
16540 #ifdef CONFIG_X86_64
16541 case 0x40 ... 0x4f:
16542- if (regs->cs != __USER_CS)
16543+ if ((regs->cs & 0xffff) != __USER_CS)
16544 /* 32-bit mode: register increment */
16545 return 0;
16546 /* 64-bit mode: REX prefix */
16547diff -urNp linux-2.6.32.42/arch/x86/kernel/syscall_table_32.S linux-2.6.32.42/arch/x86/kernel/syscall_table_32.S
16548--- linux-2.6.32.42/arch/x86/kernel/syscall_table_32.S 2011-03-27 14:31:47.000000000 -0400
16549+++ linux-2.6.32.42/arch/x86/kernel/syscall_table_32.S 2011-04-17 15:56:46.000000000 -0400
16550@@ -1,3 +1,4 @@
16551+.section .rodata,"a",@progbits
16552 ENTRY(sys_call_table)
16553 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
16554 .long sys_exit
16555diff -urNp linux-2.6.32.42/arch/x86/kernel/sys_i386_32.c linux-2.6.32.42/arch/x86/kernel/sys_i386_32.c
16556--- linux-2.6.32.42/arch/x86/kernel/sys_i386_32.c 2011-03-27 14:31:47.000000000 -0400
16557+++ linux-2.6.32.42/arch/x86/kernel/sys_i386_32.c 2011-04-17 15:56:46.000000000 -0400
16558@@ -24,6 +24,21 @@
16559
16560 #include <asm/syscalls.h>
16561
16562+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
16563+{
16564+ unsigned long pax_task_size = TASK_SIZE;
16565+
16566+#ifdef CONFIG_PAX_SEGMEXEC
16567+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
16568+ pax_task_size = SEGMEXEC_TASK_SIZE;
16569+#endif
16570+
16571+ if (len > pax_task_size || addr > pax_task_size - len)
16572+ return -EINVAL;
16573+
16574+ return 0;
16575+}
16576+
16577 /*
16578 * Perform the select(nd, in, out, ex, tv) and mmap() system
16579 * calls. Linux/i386 didn't use to be able to handle more than
16580@@ -58,6 +73,212 @@ out:
16581 return err;
16582 }
16583
16584+unsigned long
16585+arch_get_unmapped_area(struct file *filp, unsigned long addr,
16586+ unsigned long len, unsigned long pgoff, unsigned long flags)
16587+{
16588+ struct mm_struct *mm = current->mm;
16589+ struct vm_area_struct *vma;
16590+ unsigned long start_addr, pax_task_size = TASK_SIZE;
16591+
16592+#ifdef CONFIG_PAX_SEGMEXEC
16593+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
16594+ pax_task_size = SEGMEXEC_TASK_SIZE;
16595+#endif
16596+
16597+ pax_task_size -= PAGE_SIZE;
16598+
16599+ if (len > pax_task_size)
16600+ return -ENOMEM;
16601+
16602+ if (flags & MAP_FIXED)
16603+ return addr;
16604+
16605+#ifdef CONFIG_PAX_RANDMMAP
16606+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16607+#endif
16608+
16609+ if (addr) {
16610+ addr = PAGE_ALIGN(addr);
16611+ if (pax_task_size - len >= addr) {
16612+ vma = find_vma(mm, addr);
16613+ if (check_heap_stack_gap(vma, addr, len))
16614+ return addr;
16615+ }
16616+ }
16617+ if (len > mm->cached_hole_size) {
16618+ start_addr = addr = mm->free_area_cache;
16619+ } else {
16620+ start_addr = addr = mm->mmap_base;
16621+ mm->cached_hole_size = 0;
16622+ }
16623+
16624+#ifdef CONFIG_PAX_PAGEEXEC
16625+ if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
16626+ start_addr = 0x00110000UL;
16627+
16628+#ifdef CONFIG_PAX_RANDMMAP
16629+ if (mm->pax_flags & MF_PAX_RANDMMAP)
16630+ start_addr += mm->delta_mmap & 0x03FFF000UL;
16631+#endif
16632+
16633+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
16634+ start_addr = addr = mm->mmap_base;
16635+ else
16636+ addr = start_addr;
16637+ }
16638+#endif
16639+
16640+full_search:
16641+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
16642+ /* At this point: (!vma || addr < vma->vm_end). */
16643+ if (pax_task_size - len < addr) {
16644+ /*
16645+ * Start a new search - just in case we missed
16646+ * some holes.
16647+ */
16648+ if (start_addr != mm->mmap_base) {
16649+ start_addr = addr = mm->mmap_base;
16650+ mm->cached_hole_size = 0;
16651+ goto full_search;
16652+ }
16653+ return -ENOMEM;
16654+ }
16655+ if (check_heap_stack_gap(vma, addr, len))
16656+ break;
16657+ if (addr + mm->cached_hole_size < vma->vm_start)
16658+ mm->cached_hole_size = vma->vm_start - addr;
16659+ addr = vma->vm_end;
16660+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
16661+ start_addr = addr = mm->mmap_base;
16662+ mm->cached_hole_size = 0;
16663+ goto full_search;
16664+ }
16665+ }
16666+
16667+ /*
16668+ * Remember the place where we stopped the search:
16669+ */
16670+ mm->free_area_cache = addr + len;
16671+ return addr;
16672+}
16673+
16674+unsigned long
16675+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
16676+ const unsigned long len, const unsigned long pgoff,
16677+ const unsigned long flags)
16678+{
16679+ struct vm_area_struct *vma;
16680+ struct mm_struct *mm = current->mm;
16681+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
16682+
16683+#ifdef CONFIG_PAX_SEGMEXEC
16684+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
16685+ pax_task_size = SEGMEXEC_TASK_SIZE;
16686+#endif
16687+
16688+ pax_task_size -= PAGE_SIZE;
16689+
16690+ /* requested length too big for entire address space */
16691+ if (len > pax_task_size)
16692+ return -ENOMEM;
16693+
16694+ if (flags & MAP_FIXED)
16695+ return addr;
16696+
16697+#ifdef CONFIG_PAX_PAGEEXEC
16698+ if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
16699+ goto bottomup;
16700+#endif
16701+
16702+#ifdef CONFIG_PAX_RANDMMAP
16703+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16704+#endif
16705+
16706+ /* requesting a specific address */
16707+ if (addr) {
16708+ addr = PAGE_ALIGN(addr);
16709+ if (pax_task_size - len >= addr) {
16710+ vma = find_vma(mm, addr);
16711+ if (check_heap_stack_gap(vma, addr, len))
16712+ return addr;
16713+ }
16714+ }
16715+
16716+ /* check if free_area_cache is useful for us */
16717+ if (len <= mm->cached_hole_size) {
16718+ mm->cached_hole_size = 0;
16719+ mm->free_area_cache = mm->mmap_base;
16720+ }
16721+
16722+ /* either no address requested or can't fit in requested address hole */
16723+ addr = mm->free_area_cache;
16724+
16725+ /* make sure it can fit in the remaining address space */
16726+ if (addr > len) {
16727+ vma = find_vma(mm, addr-len);
16728+ if (check_heap_stack_gap(vma, addr - len, len))
16729+ /* remember the address as a hint for next time */
16730+ return (mm->free_area_cache = addr-len);
16731+ }
16732+
16733+ if (mm->mmap_base < len)
16734+ goto bottomup;
16735+
16736+ addr = mm->mmap_base-len;
16737+
16738+ do {
16739+ /*
16740+ * Lookup failure means no vma is above this address,
16741+ * else if new region fits below vma->vm_start,
16742+ * return with success:
16743+ */
16744+ vma = find_vma(mm, addr);
16745+ if (check_heap_stack_gap(vma, addr, len))
16746+ /* remember the address as a hint for next time */
16747+ return (mm->free_area_cache = addr);
16748+
16749+ /* remember the largest hole we saw so far */
16750+ if (addr + mm->cached_hole_size < vma->vm_start)
16751+ mm->cached_hole_size = vma->vm_start - addr;
16752+
16753+ /* try just below the current vma->vm_start */
16754+ addr = skip_heap_stack_gap(vma, len);
16755+ } while (!IS_ERR_VALUE(addr));
16756+
16757+bottomup:
16758+ /*
16759+ * A failed mmap() very likely causes application failure,
16760+ * so fall back to the bottom-up function here. This scenario
16761+ * can happen with large stack limits and large mmap()
16762+ * allocations.
16763+ */
16764+
16765+#ifdef CONFIG_PAX_SEGMEXEC
16766+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
16767+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
16768+ else
16769+#endif
16770+
16771+ mm->mmap_base = TASK_UNMAPPED_BASE;
16772+
16773+#ifdef CONFIG_PAX_RANDMMAP
16774+ if (mm->pax_flags & MF_PAX_RANDMMAP)
16775+ mm->mmap_base += mm->delta_mmap;
16776+#endif
16777+
16778+ mm->free_area_cache = mm->mmap_base;
16779+ mm->cached_hole_size = ~0UL;
16780+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
16781+ /*
16782+ * Restore the topdown base:
16783+ */
16784+ mm->mmap_base = base;
16785+ mm->free_area_cache = base;
16786+ mm->cached_hole_size = ~0UL;
16787+
16788+ return addr;
16789+}
16790
16791 struct sel_arg_struct {
16792 unsigned long n;
16793@@ -93,7 +314,7 @@ asmlinkage int sys_ipc(uint call, int fi
16794 return sys_semtimedop(first, (struct sembuf __user *)ptr, second, NULL);
16795 case SEMTIMEDOP:
16796 return sys_semtimedop(first, (struct sembuf __user *)ptr, second,
16797- (const struct timespec __user *)fifth);
16798+ (__force const struct timespec __user *)fifth);
16799
16800 case SEMGET:
16801 return sys_semget(first, second, third);
16802@@ -140,7 +361,7 @@ asmlinkage int sys_ipc(uint call, int fi
16803 ret = do_shmat(first, (char __user *) ptr, second, &raddr);
16804 if (ret)
16805 return ret;
16806- return put_user(raddr, (ulong __user *) third);
16807+ return put_user(raddr, (__force ulong __user *) third);
16808 }
16809 case 1: /* iBCS2 emulator entry point */
16810 if (!segment_eq(get_fs(), get_ds()))
16811@@ -207,17 +428,3 @@ asmlinkage int sys_olduname(struct oldol
16812
16813 return error;
16814 }
16815-
16816-
16817-/*
16818- * Do a system call from kernel instead of calling sys_execve so we
16819- * end up with proper pt_regs.
16820- */
16821-int kernel_execve(const char *filename, char *const argv[], char *const envp[])
16822-{
16823- long __res;
16824- asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx"
16825- : "=a" (__res)
16826- : "0" (__NR_execve), "ri" (filename), "c" (argv), "d" (envp) : "memory");
16827- return __res;
16828-}
16829diff -urNp linux-2.6.32.42/arch/x86/kernel/sys_x86_64.c linux-2.6.32.42/arch/x86/kernel/sys_x86_64.c
16830--- linux-2.6.32.42/arch/x86/kernel/sys_x86_64.c 2011-03-27 14:31:47.000000000 -0400
16831+++ linux-2.6.32.42/arch/x86/kernel/sys_x86_64.c 2011-04-17 15:56:46.000000000 -0400
16832@@ -32,8 +32,8 @@ out:
16833 return error;
16834 }
16835
16836-static void find_start_end(unsigned long flags, unsigned long *begin,
16837- unsigned long *end)
16838+static void find_start_end(struct mm_struct *mm, unsigned long flags,
16839+ unsigned long *begin, unsigned long *end)
16840 {
16841 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
16842 unsigned long new_begin;
16843@@ -52,7 +52,7 @@ static void find_start_end(unsigned long
16844 *begin = new_begin;
16845 }
16846 } else {
16847- *begin = TASK_UNMAPPED_BASE;
16848+ *begin = mm->mmap_base;
16849 *end = TASK_SIZE;
16850 }
16851 }
16852@@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
16853 if (flags & MAP_FIXED)
16854 return addr;
16855
16856- find_start_end(flags, &begin, &end);
16857+ find_start_end(mm, flags, &begin, &end);
16858
16859 if (len > end)
16860 return -ENOMEM;
16861
16862+#ifdef CONFIG_PAX_RANDMMAP
16863+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16864+#endif
16865+
16866 if (addr) {
16867 addr = PAGE_ALIGN(addr);
16868 vma = find_vma(mm, addr);
16869- if (end - len >= addr &&
16870- (!vma || addr + len <= vma->vm_start))
16871+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
16872 return addr;
16873 }
16874 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
16875@@ -106,7 +109,7 @@ full_search:
16876 }
16877 return -ENOMEM;
16878 }
16879- if (!vma || addr + len <= vma->vm_start) {
16880+ if (check_heap_stack_gap(vma, addr, len)) {
16881 /*
16882 * Remember the place where we stopped the search:
16883 */
16884@@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
16885 {
16886 struct vm_area_struct *vma;
16887 struct mm_struct *mm = current->mm;
16888- unsigned long addr = addr0;
16889+ unsigned long base = mm->mmap_base, addr = addr0;
16890
16891 /* requested length too big for entire address space */
16892 if (len > TASK_SIZE)
16893@@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
16894 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
16895 goto bottomup;
16896
16897+#ifdef CONFIG_PAX_RANDMMAP
16898+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16899+#endif
16900+
16901 /* requesting a specific address */
16902 if (addr) {
16903 addr = PAGE_ALIGN(addr);
16904- vma = find_vma(mm, addr);
16905- if (TASK_SIZE - len >= addr &&
16906- (!vma || addr + len <= vma->vm_start))
16907- return addr;
16908+ if (TASK_SIZE - len >= addr) {
16909+ vma = find_vma(mm, addr);
16910+ if (check_heap_stack_gap(vma, addr, len))
16911+ return addr;
16912+ }
16913 }
16914
16915 /* check if free_area_cache is useful for us */
16916@@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
16917 /* make sure it can fit in the remaining address space */
16918 if (addr > len) {
16919 vma = find_vma(mm, addr-len);
16920- if (!vma || addr <= vma->vm_start)
16921+ if (check_heap_stack_gap(vma, addr - len, len))
16922 /* remember the address as a hint for next time */
16923 return mm->free_area_cache = addr-len;
16924 }
16925@@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
16926 * return with success:
16927 */
16928 vma = find_vma(mm, addr);
16929- if (!vma || addr+len <= vma->vm_start)
16930+ if (check_heap_stack_gap(vma, addr, len))
16931 /* remember the address as a hint for next time */
16932 return mm->free_area_cache = addr;
16933
16934@@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
16935 mm->cached_hole_size = vma->vm_start - addr;
16936
16937 /* try just below the current vma->vm_start */
16938- addr = vma->vm_start-len;
16939- } while (len < vma->vm_start);
16940+ addr = skip_heap_stack_gap(vma, len);
16941+ } while (!IS_ERR_VALUE(addr));
16942
16943 bottomup:
16944 /*
16945@@ -198,13 +206,21 @@ bottomup:
16946 * can happen with large stack limits and large mmap()
16947 * allocations.
16948 */
16949+ mm->mmap_base = TASK_UNMAPPED_BASE;
16950+
16951+#ifdef CONFIG_PAX_RANDMMAP
16952+ if (mm->pax_flags & MF_PAX_RANDMMAP)
16953+ mm->mmap_base += mm->delta_mmap;
16954+#endif
16955+
16956+ mm->free_area_cache = mm->mmap_base;
16957 mm->cached_hole_size = ~0UL;
16958- mm->free_area_cache = TASK_UNMAPPED_BASE;
16959 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
16960 /*
16961 * Restore the topdown base:
16962 */
16963- mm->free_area_cache = mm->mmap_base;
16964+ mm->mmap_base = base;
16965+ mm->free_area_cache = base;
16966 mm->cached_hole_size = ~0UL;
16967
16968 return addr;
16969diff -urNp linux-2.6.32.42/arch/x86/kernel/tboot.c linux-2.6.32.42/arch/x86/kernel/tboot.c
16970--- linux-2.6.32.42/arch/x86/kernel/tboot.c 2011-03-27 14:31:47.000000000 -0400
16971+++ linux-2.6.32.42/arch/x86/kernel/tboot.c 2011-05-22 23:02:03.000000000 -0400
16972@@ -216,7 +216,7 @@ static int tboot_setup_sleep(void)
16973
16974 void tboot_shutdown(u32 shutdown_type)
16975 {
16976- void (*shutdown)(void);
16977+ void (* __noreturn shutdown)(void);
16978
16979 if (!tboot_enabled())
16980 return;
16981@@ -238,7 +238,7 @@ void tboot_shutdown(u32 shutdown_type)
16982
16983 switch_to_tboot_pt();
16984
16985- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
16986+ shutdown = (void *)tboot->shutdown_entry;
16987 shutdown();
16988
16989 /* should not reach here */
16990@@ -295,7 +295,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
16991 tboot_shutdown(acpi_shutdown_map[sleep_state]);
16992 }
16993
16994-static atomic_t ap_wfs_count;
16995+static atomic_unchecked_t ap_wfs_count;
16996
16997 static int tboot_wait_for_aps(int num_aps)
16998 {
16999@@ -319,9 +319,9 @@ static int __cpuinit tboot_cpu_callback(
17000 {
17001 switch (action) {
17002 case CPU_DYING:
17003- atomic_inc(&ap_wfs_count);
17004+ atomic_inc_unchecked(&ap_wfs_count);
17005 if (num_online_cpus() == 1)
17006- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
17007+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
17008 return NOTIFY_BAD;
17009 break;
17010 }
17011@@ -340,7 +340,7 @@ static __init int tboot_late_init(void)
17012
17013 tboot_create_trampoline();
17014
17015- atomic_set(&ap_wfs_count, 0);
17016+ atomic_set_unchecked(&ap_wfs_count, 0);
17017 register_hotcpu_notifier(&tboot_cpu_notifier);
17018 return 0;
17019 }
17020diff -urNp linux-2.6.32.42/arch/x86/kernel/time.c linux-2.6.32.42/arch/x86/kernel/time.c
17021--- linux-2.6.32.42/arch/x86/kernel/time.c 2011-03-27 14:31:47.000000000 -0400
17022+++ linux-2.6.32.42/arch/x86/kernel/time.c 2011-04-17 15:56:46.000000000 -0400
17023@@ -26,17 +26,13 @@
17024 int timer_ack;
17025 #endif
17026
17027-#ifdef CONFIG_X86_64
17028-volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
17029-#endif
17030-
17031 unsigned long profile_pc(struct pt_regs *regs)
17032 {
17033 unsigned long pc = instruction_pointer(regs);
17034
17035- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
17036+ if (!user_mode(regs) && in_lock_functions(pc)) {
17037 #ifdef CONFIG_FRAME_POINTER
17038- return *(unsigned long *)(regs->bp + sizeof(long));
17039+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
17040 #else
17041 unsigned long *sp =
17042 (unsigned long *)kernel_stack_pointer(regs);
17043@@ -45,11 +41,17 @@ unsigned long profile_pc(struct pt_regs
17044 * or above a saved flags. Eflags has bits 22-31 zero,
17045 * kernel addresses don't.
17046 */
17047+
17048+#ifdef CONFIG_PAX_KERNEXEC
17049+ return ktla_ktva(sp[0]);
17050+#else
17051 if (sp[0] >> 22)
17052 return sp[0];
17053 if (sp[1] >> 22)
17054 return sp[1];
17055 #endif
17056+
17057+#endif
17058 }
17059 return pc;
17060 }
17061diff -urNp linux-2.6.32.42/arch/x86/kernel/tls.c linux-2.6.32.42/arch/x86/kernel/tls.c
17062--- linux-2.6.32.42/arch/x86/kernel/tls.c 2011-03-27 14:31:47.000000000 -0400
17063+++ linux-2.6.32.42/arch/x86/kernel/tls.c 2011-04-17 15:56:46.000000000 -0400
17064@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
17065 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
17066 return -EINVAL;
17067
17068+#ifdef CONFIG_PAX_SEGMEXEC
17069+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
17070+ return -EINVAL;
17071+#endif
17072+
17073 set_tls_desc(p, idx, &info, 1);
17074
17075 return 0;
17076diff -urNp linux-2.6.32.42/arch/x86/kernel/trampoline_32.S linux-2.6.32.42/arch/x86/kernel/trampoline_32.S
17077--- linux-2.6.32.42/arch/x86/kernel/trampoline_32.S 2011-03-27 14:31:47.000000000 -0400
17078+++ linux-2.6.32.42/arch/x86/kernel/trampoline_32.S 2011-04-17 15:56:46.000000000 -0400
17079@@ -32,6 +32,12 @@
17080 #include <asm/segment.h>
17081 #include <asm/page_types.h>
17082
17083+#ifdef CONFIG_PAX_KERNEXEC
17084+#define ta(X) (X)
17085+#else
17086+#define ta(X) ((X) - __PAGE_OFFSET)
17087+#endif
17088+
17089 /* We can free up trampoline after bootup if cpu hotplug is not supported. */
17090 __CPUINITRODATA
17091 .code16
17092@@ -60,7 +66,7 @@ r_base = .
17093 inc %ax # protected mode (PE) bit
17094 lmsw %ax # into protected mode
17095 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
17096- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
17097+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
17098
17099 # These need to be in the same 64K segment as the above;
17100 # hence we don't use the boot_gdt_descr defined in head.S
17101diff -urNp linux-2.6.32.42/arch/x86/kernel/trampoline_64.S linux-2.6.32.42/arch/x86/kernel/trampoline_64.S
17102--- linux-2.6.32.42/arch/x86/kernel/trampoline_64.S 2011-03-27 14:31:47.000000000 -0400
17103+++ linux-2.6.32.42/arch/x86/kernel/trampoline_64.S 2011-07-01 18:53:26.000000000 -0400
17104@@ -91,7 +91,7 @@ startup_32:
17105 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
17106 movl %eax, %ds
17107
17108- movl $X86_CR4_PAE, %eax
17109+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17110 movl %eax, %cr4 # Enable PAE mode
17111
17112 # Setup trampoline 4 level pagetables
17113@@ -127,7 +127,7 @@ startup_64:
17114 no_longmode:
17115 hlt
17116 jmp no_longmode
17117-#include "verify_cpu_64.S"
17118+#include "verify_cpu.S"
17119
17120 # Careful these need to be in the same 64K segment as the above;
17121 tidt:
17122@@ -138,7 +138,7 @@ tidt:
17123 # so the kernel can live anywhere
17124 .balign 4
17125 tgdt:
17126- .short tgdt_end - tgdt # gdt limit
17127+ .short tgdt_end - tgdt - 1 # gdt limit
17128 .long tgdt - r_base
17129 .short 0
17130 .quad 0x00cf9b000000ffff # __KERNEL32_CS
17131diff -urNp linux-2.6.32.42/arch/x86/kernel/traps.c linux-2.6.32.42/arch/x86/kernel/traps.c
17132--- linux-2.6.32.42/arch/x86/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
17133+++ linux-2.6.32.42/arch/x86/kernel/traps.c 2011-07-06 19:53:33.000000000 -0400
17134@@ -69,12 +69,6 @@ asmlinkage int system_call(void);
17135
17136 /* Do we ignore FPU interrupts ? */
17137 char ignore_fpu_irq;
17138-
17139-/*
17140- * The IDT has to be page-aligned to simplify the Pentium
17141- * F0 0F bug workaround.
17142- */
17143-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
17144 #endif
17145
17146 DECLARE_BITMAP(used_vectors, NR_VECTORS);
17147@@ -112,19 +106,19 @@ static inline void preempt_conditional_c
17148 static inline void
17149 die_if_kernel(const char *str, struct pt_regs *regs, long err)
17150 {
17151- if (!user_mode_vm(regs))
17152+ if (!user_mode(regs))
17153 die(str, regs, err);
17154 }
17155 #endif
17156
17157 static void __kprobes
17158-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
17159+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
17160 long error_code, siginfo_t *info)
17161 {
17162 struct task_struct *tsk = current;
17163
17164 #ifdef CONFIG_X86_32
17165- if (regs->flags & X86_VM_MASK) {
17166+ if (v8086_mode(regs)) {
17167 /*
17168 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
17169 * On nmi (interrupt 2), do_trap should not be called.
17170@@ -135,7 +129,7 @@ do_trap(int trapnr, int signr, char *str
17171 }
17172 #endif
17173
17174- if (!user_mode(regs))
17175+ if (!user_mode_novm(regs))
17176 goto kernel_trap;
17177
17178 #ifdef CONFIG_X86_32
17179@@ -158,7 +152,7 @@ trap_signal:
17180 printk_ratelimit()) {
17181 printk(KERN_INFO
17182 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
17183- tsk->comm, tsk->pid, str,
17184+ tsk->comm, task_pid_nr(tsk), str,
17185 regs->ip, regs->sp, error_code);
17186 print_vma_addr(" in ", regs->ip);
17187 printk("\n");
17188@@ -175,8 +169,20 @@ kernel_trap:
17189 if (!fixup_exception(regs)) {
17190 tsk->thread.error_code = error_code;
17191 tsk->thread.trap_no = trapnr;
17192+
17193+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17194+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
17195+ str = "PAX: suspicious stack segment fault";
17196+#endif
17197+
17198 die(str, regs, error_code);
17199 }
17200+
17201+#ifdef CONFIG_PAX_REFCOUNT
17202+ if (trapnr == 4)
17203+ pax_report_refcount_overflow(regs);
17204+#endif
17205+
17206 return;
17207
17208 #ifdef CONFIG_X86_32
17209@@ -265,14 +271,30 @@ do_general_protection(struct pt_regs *re
17210 conditional_sti(regs);
17211
17212 #ifdef CONFIG_X86_32
17213- if (regs->flags & X86_VM_MASK)
17214+ if (v8086_mode(regs))
17215 goto gp_in_vm86;
17216 #endif
17217
17218 tsk = current;
17219- if (!user_mode(regs))
17220+ if (!user_mode_novm(regs))
17221 goto gp_in_kernel;
17222
17223+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17224+ if (!nx_enabled && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
17225+ struct mm_struct *mm = tsk->mm;
17226+ unsigned long limit;
17227+
17228+ down_write(&mm->mmap_sem);
17229+ limit = mm->context.user_cs_limit;
17230+ if (limit < TASK_SIZE) {
17231+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
17232+ up_write(&mm->mmap_sem);
17233+ return;
17234+ }
17235+ up_write(&mm->mmap_sem);
17236+ }
17237+#endif
17238+
17239 tsk->thread.error_code = error_code;
17240 tsk->thread.trap_no = 13;
17241
17242@@ -305,6 +327,13 @@ gp_in_kernel:
17243 if (notify_die(DIE_GPF, "general protection fault", regs,
17244 error_code, 13, SIGSEGV) == NOTIFY_STOP)
17245 return;
17246+
17247+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17248+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
17249+ die("PAX: suspicious general protection fault", regs, error_code);
17250+ else
17251+#endif
17252+
17253 die("general protection fault", regs, error_code);
17254 }
17255
17256@@ -435,6 +464,17 @@ static notrace __kprobes void default_do
17257 dotraplinkage notrace __kprobes void
17258 do_nmi(struct pt_regs *regs, long error_code)
17259 {
17260+
17261+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17262+ if (!user_mode(regs)) {
17263+ unsigned long cs = regs->cs & 0xFFFF;
17264+ unsigned long ip = ktva_ktla(regs->ip);
17265+
17266+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
17267+ regs->ip = ip;
17268+ }
17269+#endif
17270+
17271 nmi_enter();
17272
17273 inc_irq_stat(__nmi_count);
17274@@ -558,7 +598,7 @@ dotraplinkage void __kprobes do_debug(st
17275 }
17276
17277 #ifdef CONFIG_X86_32
17278- if (regs->flags & X86_VM_MASK)
17279+ if (v8086_mode(regs))
17280 goto debug_vm86;
17281 #endif
17282
17283@@ -570,7 +610,7 @@ dotraplinkage void __kprobes do_debug(st
17284 * kernel space (but re-enable TF when returning to user mode).
17285 */
17286 if (condition & DR_STEP) {
17287- if (!user_mode(regs))
17288+ if (!user_mode_novm(regs))
17289 goto clear_TF_reenable;
17290 }
17291
17292@@ -757,7 +797,7 @@ do_simd_coprocessor_error(struct pt_regs
17293 * Handle strange cache flush from user space exception
17294 * in all other cases. This is undocumented behaviour.
17295 */
17296- if (regs->flags & X86_VM_MASK) {
17297+ if (v8086_mode(regs)) {
17298 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
17299 return;
17300 }
17301@@ -798,7 +838,7 @@ asmlinkage void __attribute__((weak)) sm
17302 void __math_state_restore(void)
17303 {
17304 struct thread_info *thread = current_thread_info();
17305- struct task_struct *tsk = thread->task;
17306+ struct task_struct *tsk = current;
17307
17308 /*
17309 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
17310@@ -825,8 +865,7 @@ void __math_state_restore(void)
17311 */
17312 asmlinkage void math_state_restore(void)
17313 {
17314- struct thread_info *thread = current_thread_info();
17315- struct task_struct *tsk = thread->task;
17316+ struct task_struct *tsk = current;
17317
17318 if (!tsk_used_math(tsk)) {
17319 local_irq_enable();
17320diff -urNp linux-2.6.32.42/arch/x86/kernel/verify_cpu_64.S linux-2.6.32.42/arch/x86/kernel/verify_cpu_64.S
17321--- linux-2.6.32.42/arch/x86/kernel/verify_cpu_64.S 2011-03-27 14:31:47.000000000 -0400
17322+++ linux-2.6.32.42/arch/x86/kernel/verify_cpu_64.S 1969-12-31 19:00:00.000000000 -0500
17323@@ -1,105 +0,0 @@
17324-/*
17325- *
17326- * verify_cpu.S - Code for cpu long mode and SSE verification. This
17327- * code has been borrowed from boot/setup.S and was introduced by
17328- * Andi Kleen.
17329- *
17330- * Copyright (c) 2007 Andi Kleen (ak@suse.de)
17331- * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
17332- * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
17333- *
17334- * This source code is licensed under the GNU General Public License,
17335- * Version 2. See the file COPYING for more details.
17336- *
17337- * This is a common code for verification whether CPU supports
17338- * long mode and SSE or not. It is not called directly instead this
17339- * file is included at various places and compiled in that context.
17340- * Following are the current usage.
17341- *
17342- * This file is included by both 16bit and 32bit code.
17343- *
17344- * arch/x86_64/boot/setup.S : Boot cpu verification (16bit)
17345- * arch/x86_64/boot/compressed/head.S: Boot cpu verification (32bit)
17346- * arch/x86_64/kernel/trampoline.S: secondary processor verfication (16bit)
17347- * arch/x86_64/kernel/acpi/wakeup.S:Verfication at resume (16bit)
17348- *
17349- * verify_cpu, returns the status of cpu check in register %eax.
17350- * 0: Success 1: Failure
17351- *
17352- * The caller needs to check for the error code and take the action
17353- * appropriately. Either display a message or halt.
17354- */
17355-
17356-#include <asm/cpufeature.h>
17357-
17358-verify_cpu:
17359- pushfl # Save caller passed flags
17360- pushl $0 # Kill any dangerous flags
17361- popfl
17362-
17363- pushfl # standard way to check for cpuid
17364- popl %eax
17365- movl %eax,%ebx
17366- xorl $0x200000,%eax
17367- pushl %eax
17368- popfl
17369- pushfl
17370- popl %eax
17371- cmpl %eax,%ebx
17372- jz verify_cpu_no_longmode # cpu has no cpuid
17373-
17374- movl $0x0,%eax # See if cpuid 1 is implemented
17375- cpuid
17376- cmpl $0x1,%eax
17377- jb verify_cpu_no_longmode # no cpuid 1
17378-
17379- xor %di,%di
17380- cmpl $0x68747541,%ebx # AuthenticAMD
17381- jnz verify_cpu_noamd
17382- cmpl $0x69746e65,%edx
17383- jnz verify_cpu_noamd
17384- cmpl $0x444d4163,%ecx
17385- jnz verify_cpu_noamd
17386- mov $1,%di # cpu is from AMD
17387-
17388-verify_cpu_noamd:
17389- movl $0x1,%eax # Does the cpu have what it takes
17390- cpuid
17391- andl $REQUIRED_MASK0,%edx
17392- xorl $REQUIRED_MASK0,%edx
17393- jnz verify_cpu_no_longmode
17394-
17395- movl $0x80000000,%eax # See if extended cpuid is implemented
17396- cpuid
17397- cmpl $0x80000001,%eax
17398- jb verify_cpu_no_longmode # no extended cpuid
17399-
17400- movl $0x80000001,%eax # Does the cpu have what it takes
17401- cpuid
17402- andl $REQUIRED_MASK1,%edx
17403- xorl $REQUIRED_MASK1,%edx
17404- jnz verify_cpu_no_longmode
17405-
17406-verify_cpu_sse_test:
17407- movl $1,%eax
17408- cpuid
17409- andl $SSE_MASK,%edx
17410- cmpl $SSE_MASK,%edx
17411- je verify_cpu_sse_ok
17412- test %di,%di
17413- jz verify_cpu_no_longmode # only try to force SSE on AMD
17414- movl $0xc0010015,%ecx # HWCR
17415- rdmsr
17416- btr $15,%eax # enable SSE
17417- wrmsr
17418- xor %di,%di # don't loop
17419- jmp verify_cpu_sse_test # try again
17420-
17421-verify_cpu_no_longmode:
17422- popfl # Restore caller passed flags
17423- movl $1,%eax
17424- ret
17425-verify_cpu_sse_ok:
17426- popfl # Restore caller passed flags
17427- xorl %eax, %eax
17428- ret
17429diff -urNp linux-2.6.32.42/arch/x86/kernel/verify_cpu.S linux-2.6.32.42/arch/x86/kernel/verify_cpu.S
17430--- linux-2.6.32.42/arch/x86/kernel/verify_cpu.S 1969-12-31 19:00:00.000000000 -0500
17431+++ linux-2.6.32.42/arch/x86/kernel/verify_cpu.S 2011-07-01 18:28:42.000000000 -0400
17432@@ -0,0 +1,140 @@
17433+/*
17434+ *
17435+ * verify_cpu.S - Code for cpu long mode and SSE verification. This
17436+ * code has been borrowed from boot/setup.S and was introduced by
17437+ * Andi Kleen.
17438+ *
17439+ * Copyright (c) 2007 Andi Kleen (ak@suse.de)
17440+ * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
17441+ * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
17442+ * Copyright (c) 2010 Kees Cook (kees.cook@canonical.com)
17443+ *
17444+ * This source code is licensed under the GNU General Public License,
17445+ * Version 2. See the file COPYING for more details.
17446+ *
17447+ * This is a common code for verification whether CPU supports
17448+ * long mode and SSE or not. It is not called directly instead this
17449+ * file is included at various places and compiled in that context.
17450+ * This file is expected to run in 32bit code. Currently:
17451+ *
17452+ * arch/x86/boot/compressed/head_64.S: Boot cpu verification
17453+ * arch/x86/kernel/trampoline_64.S: secondary processor verification
17454+ * arch/x86/kernel/head_32.S: processor startup
17455+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
17456+ *
17457+ * verify_cpu, returns the status of longmode and SSE in register %eax.
17458+ * 0: Success 1: Failure
17459+ *
17460+ * On Intel, the XD_DISABLE flag will be cleared as a side-effect.
17461+ *
17462+ * The caller needs to check for the error code and take the action
17463+ * appropriately. Either display a message or halt.
17464+ */
17465+
17466+#include <asm/cpufeature.h>
17467+#include <asm/msr-index.h>
17468+
17469+verify_cpu:
17470+ pushfl # Save caller passed flags
17471+ pushl $0 # Kill any dangerous flags
17472+ popfl
17473+
17474+ pushfl # standard way to check for cpuid
17475+ popl %eax
17476+ movl %eax,%ebx
17477+ xorl $0x200000,%eax
17478+ pushl %eax
17479+ popfl
17480+ pushfl
17481+ popl %eax
17482+ cmpl %eax,%ebx
17483+ jz verify_cpu_no_longmode # cpu has no cpuid
17484+
17485+ movl $0x0,%eax # See if cpuid 1 is implemented
17486+ cpuid
17487+ cmpl $0x1,%eax
17488+ jb verify_cpu_no_longmode # no cpuid 1
17489+
17490+ xor %di,%di
17491+ cmpl $0x68747541,%ebx # AuthenticAMD
17492+ jnz verify_cpu_noamd
17493+ cmpl $0x69746e65,%edx
17494+ jnz verify_cpu_noamd
17495+ cmpl $0x444d4163,%ecx
17496+ jnz verify_cpu_noamd
17497+ mov $1,%di # cpu is from AMD
17498+ jmp verify_cpu_check
17499+
17500+verify_cpu_noamd:
17501+ cmpl $0x756e6547,%ebx # GenuineIntel?
17502+ jnz verify_cpu_check
17503+ cmpl $0x49656e69,%edx
17504+ jnz verify_cpu_check
17505+ cmpl $0x6c65746e,%ecx
17506+ jnz verify_cpu_check
17507+
17508+ # only call IA32_MISC_ENABLE when:
17509+ # family > 6 || (family == 6 && model >= 0xd)
17510+ movl $0x1, %eax # check CPU family and model
17511+ cpuid
17512+ movl %eax, %ecx
17513+
17514+ andl $0x0ff00f00, %eax # mask family and extended family
17515+ shrl $8, %eax
17516+ cmpl $6, %eax
17517+ ja verify_cpu_clear_xd # family > 6, ok
17518+ jb verify_cpu_check # family < 6, skip
17519+
17520+ andl $0x000f00f0, %ecx # mask model and extended model
17521+ shrl $4, %ecx
17522+ cmpl $0xd, %ecx
17523+ jb verify_cpu_check # family == 6, model < 0xd, skip
17524+
17525+verify_cpu_clear_xd:
17526+ movl $MSR_IA32_MISC_ENABLE, %ecx
17527+ rdmsr
17528+ btrl $2, %edx # clear MSR_IA32_MISC_ENABLE_XD_DISABLE
17529+ jnc verify_cpu_check # only write MSR if bit was changed
17530+ wrmsr
17531+
17532+verify_cpu_check:
17533+ movl $0x1,%eax # Does the cpu have what it takes
17534+ cpuid
17535+ andl $REQUIRED_MASK0,%edx
17536+ xorl $REQUIRED_MASK0,%edx
17537+ jnz verify_cpu_no_longmode
17538+
17539+ movl $0x80000000,%eax # See if extended cpuid is implemented
17540+ cpuid
17541+ cmpl $0x80000001,%eax
17542+ jb verify_cpu_no_longmode # no extended cpuid
17543+
17544+ movl $0x80000001,%eax # Does the cpu have what it takes
17545+ cpuid
17546+ andl $REQUIRED_MASK1,%edx
17547+ xorl $REQUIRED_MASK1,%edx
17548+ jnz verify_cpu_no_longmode
17549+
17550+verify_cpu_sse_test:
17551+ movl $1,%eax
17552+ cpuid
17553+ andl $SSE_MASK,%edx
17554+ cmpl $SSE_MASK,%edx
17555+ je verify_cpu_sse_ok
17556+ test %di,%di
17557+ jz verify_cpu_no_longmode # only try to force SSE on AMD
17558+ movl $MSR_K7_HWCR,%ecx
17559+ rdmsr
17560+ btr $15,%eax # enable SSE
17561+ wrmsr
17562+ xor %di,%di # don't loop
17563+ jmp verify_cpu_sse_test # try again
17564+
17565+verify_cpu_no_longmode:
17566+ popfl # Restore caller passed flags
17567+ movl $1,%eax
17568+ ret
17569+verify_cpu_sse_ok:
17570+ popfl # Restore caller passed flags
17571+ xorl %eax, %eax
17572+ ret
17573diff -urNp linux-2.6.32.42/arch/x86/kernel/vm86_32.c linux-2.6.32.42/arch/x86/kernel/vm86_32.c
17574--- linux-2.6.32.42/arch/x86/kernel/vm86_32.c 2011-03-27 14:31:47.000000000 -0400
17575+++ linux-2.6.32.42/arch/x86/kernel/vm86_32.c 2011-04-17 15:56:46.000000000 -0400
17576@@ -41,6 +41,7 @@
17577 #include <linux/ptrace.h>
17578 #include <linux/audit.h>
17579 #include <linux/stddef.h>
17580+#include <linux/grsecurity.h>
17581
17582 #include <asm/uaccess.h>
17583 #include <asm/io.h>
17584@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
17585 do_exit(SIGSEGV);
17586 }
17587
17588- tss = &per_cpu(init_tss, get_cpu());
17589+ tss = init_tss + get_cpu();
17590 current->thread.sp0 = current->thread.saved_sp0;
17591 current->thread.sysenter_cs = __KERNEL_CS;
17592 load_sp0(tss, &current->thread);
17593@@ -208,6 +209,13 @@ int sys_vm86old(struct pt_regs *regs)
17594 struct task_struct *tsk;
17595 int tmp, ret = -EPERM;
17596
17597+#ifdef CONFIG_GRKERNSEC_VM86
17598+ if (!capable(CAP_SYS_RAWIO)) {
17599+ gr_handle_vm86();
17600+ goto out;
17601+ }
17602+#endif
17603+
17604 tsk = current;
17605 if (tsk->thread.saved_sp0)
17606 goto out;
17607@@ -238,6 +246,14 @@ int sys_vm86(struct pt_regs *regs)
17608 int tmp, ret;
17609 struct vm86plus_struct __user *v86;
17610
17611+#ifdef CONFIG_GRKERNSEC_VM86
17612+ if (!capable(CAP_SYS_RAWIO)) {
17613+ gr_handle_vm86();
17614+ ret = -EPERM;
17615+ goto out;
17616+ }
17617+#endif
17618+
17619 tsk = current;
17620 switch (regs->bx) {
17621 case VM86_REQUEST_IRQ:
17622@@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
17623 tsk->thread.saved_fs = info->regs32->fs;
17624 tsk->thread.saved_gs = get_user_gs(info->regs32);
17625
17626- tss = &per_cpu(init_tss, get_cpu());
17627+ tss = init_tss + get_cpu();
17628 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
17629 if (cpu_has_sep)
17630 tsk->thread.sysenter_cs = 0;
17631@@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
17632 goto cannot_handle;
17633 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
17634 goto cannot_handle;
17635- intr_ptr = (unsigned long __user *) (i << 2);
17636+ intr_ptr = (__force unsigned long __user *) (i << 2);
17637 if (get_user(segoffs, intr_ptr))
17638 goto cannot_handle;
17639 if ((segoffs >> 16) == BIOSSEG)
17640diff -urNp linux-2.6.32.42/arch/x86/kernel/vmi_32.c linux-2.6.32.42/arch/x86/kernel/vmi_32.c
17641--- linux-2.6.32.42/arch/x86/kernel/vmi_32.c 2011-03-27 14:31:47.000000000 -0400
17642+++ linux-2.6.32.42/arch/x86/kernel/vmi_32.c 2011-04-17 15:56:46.000000000 -0400
17643@@ -44,12 +44,17 @@ typedef u32 __attribute__((regparm(1)))
17644 typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
17645
17646 #define call_vrom_func(rom,func) \
17647- (((VROMFUNC *)(rom->func))())
17648+ (((VROMFUNC *)(ktva_ktla(rom.func)))())
17649
17650 #define call_vrom_long_func(rom,func,arg) \
17651- (((VROMLONGFUNC *)(rom->func)) (arg))
17652+({\
17653+ u64 __reloc = ((VROMLONGFUNC *)(ktva_ktla(rom.func))) (arg);\
17654+ struct vmi_relocation_info *const __rel = (struct vmi_relocation_info *)&__reloc;\
17655+ __rel->eip = (unsigned char *)ktva_ktla((unsigned long)__rel->eip);\
17656+ __reloc;\
17657+})
17658
17659-static struct vrom_header *vmi_rom;
17660+static struct vrom_header vmi_rom __attribute((__section__(".vmi.rom"), __aligned__(PAGE_SIZE)));
17661 static int disable_pge;
17662 static int disable_pse;
17663 static int disable_sep;
17664@@ -76,10 +81,10 @@ static struct {
17665 void (*set_initial_ap_state)(int, int);
17666 void (*halt)(void);
17667 void (*set_lazy_mode)(int mode);
17668-} vmi_ops;
17669+} vmi_ops __read_only;
17670
17671 /* Cached VMI operations */
17672-struct vmi_timer_ops vmi_timer_ops;
17673+struct vmi_timer_ops vmi_timer_ops __read_only;
17674
17675 /*
17676 * VMI patching routines.
17677@@ -94,7 +99,7 @@ struct vmi_timer_ops vmi_timer_ops;
17678 static inline void patch_offset(void *insnbuf,
17679 unsigned long ip, unsigned long dest)
17680 {
17681- *(unsigned long *)(insnbuf+1) = dest-ip-5;
17682+ *(unsigned long *)(insnbuf+1) = dest-ip-5;
17683 }
17684
17685 static unsigned patch_internal(int call, unsigned len, void *insnbuf,
17686@@ -102,6 +107,7 @@ static unsigned patch_internal(int call,
17687 {
17688 u64 reloc;
17689 struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
17690+
17691 reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
17692 switch(rel->type) {
17693 case VMI_RELOCATION_CALL_REL:
17694@@ -404,13 +410,13 @@ static void vmi_set_pud(pud_t *pudp, pud
17695
17696 static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
17697 {
17698- const pte_t pte = { .pte = 0 };
17699+ const pte_t pte = __pte(0ULL);
17700 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
17701 }
17702
17703 static void vmi_pmd_clear(pmd_t *pmd)
17704 {
17705- const pte_t pte = { .pte = 0 };
17706+ const pte_t pte = __pte(0ULL);
17707 vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
17708 }
17709 #endif
17710@@ -438,10 +444,10 @@ vmi_startup_ipi_hook(int phys_apicid, un
17711 ap.ss = __KERNEL_DS;
17712 ap.esp = (unsigned long) start_esp;
17713
17714- ap.ds = __USER_DS;
17715- ap.es = __USER_DS;
17716+ ap.ds = __KERNEL_DS;
17717+ ap.es = __KERNEL_DS;
17718 ap.fs = __KERNEL_PERCPU;
17719- ap.gs = __KERNEL_STACK_CANARY;
17720+ savesegment(gs, ap.gs);
17721
17722 ap.eflags = 0;
17723
17724@@ -486,6 +492,18 @@ static void vmi_leave_lazy_mmu(void)
17725 paravirt_leave_lazy_mmu();
17726 }
17727
17728+#ifdef CONFIG_PAX_KERNEXEC
17729+static unsigned long vmi_pax_open_kernel(void)
17730+{
17731+ return 0;
17732+}
17733+
17734+static unsigned long vmi_pax_close_kernel(void)
17735+{
17736+ return 0;
17737+}
17738+#endif
17739+
17740 static inline int __init check_vmi_rom(struct vrom_header *rom)
17741 {
17742 struct pci_header *pci;
17743@@ -498,6 +516,10 @@ static inline int __init check_vmi_rom(s
17744 return 0;
17745 if (rom->vrom_signature != VMI_SIGNATURE)
17746 return 0;
17747+ if (rom->rom_length * 512 > sizeof(*rom)) {
17748+ printk(KERN_WARNING "PAX: VMI: ROM size too big: %x\n", rom->rom_length * 512);
17749+ return 0;
17750+ }
17751 if (rom->api_version_maj != VMI_API_REV_MAJOR ||
17752 rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
17753 printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
17754@@ -562,7 +584,7 @@ static inline int __init probe_vmi_rom(v
17755 struct vrom_header *romstart;
17756 romstart = (struct vrom_header *)isa_bus_to_virt(base);
17757 if (check_vmi_rom(romstart)) {
17758- vmi_rom = romstart;
17759+ vmi_rom = *romstart;
17760 return 1;
17761 }
17762 }
17763@@ -836,6 +858,11 @@ static inline int __init activate_vmi(vo
17764
17765 para_fill(pv_irq_ops.safe_halt, Halt);
17766
17767+#ifdef CONFIG_PAX_KERNEXEC
17768+ pv_mmu_ops.pax_open_kernel = vmi_pax_open_kernel;
17769+ pv_mmu_ops.pax_close_kernel = vmi_pax_close_kernel;
17770+#endif
17771+
17772 /*
17773 * Alternative instruction rewriting doesn't happen soon enough
17774 * to convert VMI_IRET to a call instead of a jump; so we have
17775@@ -853,16 +880,16 @@ static inline int __init activate_vmi(vo
17776
17777 void __init vmi_init(void)
17778 {
17779- if (!vmi_rom)
17780+ if (!vmi_rom.rom_signature)
17781 probe_vmi_rom();
17782 else
17783- check_vmi_rom(vmi_rom);
17784+ check_vmi_rom(&vmi_rom);
17785
17786 /* In case probing for or validating the ROM failed, basil */
17787- if (!vmi_rom)
17788+ if (!vmi_rom.rom_signature)
17789 return;
17790
17791- reserve_top_address(-vmi_rom->virtual_top);
17792+ reserve_top_address(-vmi_rom.virtual_top);
17793
17794 #ifdef CONFIG_X86_IO_APIC
17795 /* This is virtual hardware; timer routing is wired correctly */
17796@@ -874,7 +901,7 @@ void __init vmi_activate(void)
17797 {
17798 unsigned long flags;
17799
17800- if (!vmi_rom)
17801+ if (!vmi_rom.rom_signature)
17802 return;
17803
17804 local_irq_save(flags);
17805diff -urNp linux-2.6.32.42/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.42/arch/x86/kernel/vmlinux.lds.S
17806--- linux-2.6.32.42/arch/x86/kernel/vmlinux.lds.S 2011-03-27 14:31:47.000000000 -0400
17807+++ linux-2.6.32.42/arch/x86/kernel/vmlinux.lds.S 2011-04-17 15:56:46.000000000 -0400
17808@@ -26,6 +26,13 @@
17809 #include <asm/page_types.h>
17810 #include <asm/cache.h>
17811 #include <asm/boot.h>
17812+#include <asm/segment.h>
17813+
17814+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17815+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
17816+#else
17817+#define __KERNEL_TEXT_OFFSET 0
17818+#endif
17819
17820 #undef i386 /* in case the preprocessor is a 32bit one */
17821
17822@@ -34,40 +41,53 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONF
17823 #ifdef CONFIG_X86_32
17824 OUTPUT_ARCH(i386)
17825 ENTRY(phys_startup_32)
17826-jiffies = jiffies_64;
17827 #else
17828 OUTPUT_ARCH(i386:x86-64)
17829 ENTRY(phys_startup_64)
17830-jiffies_64 = jiffies;
17831 #endif
17832
17833 PHDRS {
17834 text PT_LOAD FLAGS(5); /* R_E */
17835- data PT_LOAD FLAGS(7); /* RWE */
17836+#ifdef CONFIG_X86_32
17837+ module PT_LOAD FLAGS(5); /* R_E */
17838+#endif
17839+#ifdef CONFIG_XEN
17840+ rodata PT_LOAD FLAGS(5); /* R_E */
17841+#else
17842+ rodata PT_LOAD FLAGS(4); /* R__ */
17843+#endif
17844+ data PT_LOAD FLAGS(6); /* RW_ */
17845 #ifdef CONFIG_X86_64
17846 user PT_LOAD FLAGS(5); /* R_E */
17847+#endif
17848+ init.begin PT_LOAD FLAGS(6); /* RW_ */
17849 #ifdef CONFIG_SMP
17850 percpu PT_LOAD FLAGS(6); /* RW_ */
17851 #endif
17852+ text.init PT_LOAD FLAGS(5); /* R_E */
17853+ text.exit PT_LOAD FLAGS(5); /* R_E */
17854 init PT_LOAD FLAGS(7); /* RWE */
17855-#endif
17856 note PT_NOTE FLAGS(0); /* ___ */
17857 }
17858
17859 SECTIONS
17860 {
17861 #ifdef CONFIG_X86_32
17862- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
17863- phys_startup_32 = startup_32 - LOAD_OFFSET;
17864+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
17865 #else
17866- . = __START_KERNEL;
17867- phys_startup_64 = startup_64 - LOAD_OFFSET;
17868+ . = __START_KERNEL;
17869 #endif
17870
17871 /* Text and read-only data */
17872- .text : AT(ADDR(.text) - LOAD_OFFSET) {
17873- _text = .;
17874+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
17875 /* bootstrapping code */
17876+#ifdef CONFIG_X86_32
17877+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17878+#else
17879+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17880+#endif
17881+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17882+ _text = .;
17883 HEAD_TEXT
17884 #ifdef CONFIG_X86_32
17885 . = ALIGN(PAGE_SIZE);
17886@@ -82,28 +102,71 @@ SECTIONS
17887 IRQENTRY_TEXT
17888 *(.fixup)
17889 *(.gnu.warning)
17890- /* End of text section */
17891- _etext = .;
17892 } :text = 0x9090
17893
17894- NOTES :text :note
17895+ . += __KERNEL_TEXT_OFFSET;
17896+
17897+#ifdef CONFIG_X86_32
17898+ . = ALIGN(PAGE_SIZE);
17899+ .vmi.rom : AT(ADDR(.vmi.rom) - LOAD_OFFSET) {
17900+ *(.vmi.rom)
17901+ } :module
17902+
17903+ . = ALIGN(PAGE_SIZE);
17904+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
17905+
17906+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
17907+ MODULES_EXEC_VADDR = .;
17908+ BYTE(0)
17909+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
17910+ . = ALIGN(HPAGE_SIZE);
17911+ MODULES_EXEC_END = . - 1;
17912+#endif
17913+
17914+ } :module
17915+#endif
17916
17917- EXCEPTION_TABLE(16) :text = 0x9090
17918+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
17919+ /* End of text section */
17920+ _etext = . - __KERNEL_TEXT_OFFSET;
17921+ }
17922+
17923+#ifdef CONFIG_X86_32
17924+ . = ALIGN(PAGE_SIZE);
17925+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
17926+ *(.idt)
17927+ . = ALIGN(PAGE_SIZE);
17928+ *(.empty_zero_page)
17929+ *(.swapper_pg_fixmap)
17930+ *(.swapper_pg_pmd)
17931+ *(.swapper_pg_dir)
17932+ *(.trampoline_pg_dir)
17933+ } :rodata
17934+#endif
17935+
17936+ . = ALIGN(PAGE_SIZE);
17937+ NOTES :rodata :note
17938+
17939+ EXCEPTION_TABLE(16) :rodata
17940
17941 RO_DATA(PAGE_SIZE)
17942
17943 /* Data */
17944 .data : AT(ADDR(.data) - LOAD_OFFSET) {
17945+
17946+#ifdef CONFIG_PAX_KERNEXEC
17947+ . = ALIGN(HPAGE_SIZE);
17948+#else
17949+ . = ALIGN(PAGE_SIZE);
17950+#endif
17951+
17952 /* Start of data section */
17953 _sdata = .;
17954
17955 /* init_task */
17956 INIT_TASK_DATA(THREAD_SIZE)
17957
17958-#ifdef CONFIG_X86_32
17959- /* 32 bit has nosave before _edata */
17960 NOSAVE_DATA
17961-#endif
17962
17963 PAGE_ALIGNED_DATA(PAGE_SIZE)
17964
17965@@ -112,6 +175,8 @@ SECTIONS
17966 DATA_DATA
17967 CONSTRUCTORS
17968
17969+ jiffies = jiffies_64;
17970+
17971 /* rarely changed data like cpu maps */
17972 READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES)
17973
17974@@ -166,12 +231,6 @@ SECTIONS
17975 }
17976 vgetcpu_mode = VVIRT(.vgetcpu_mode);
17977
17978- . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
17979- .jiffies : AT(VLOAD(.jiffies)) {
17980- *(.jiffies)
17981- }
17982- jiffies = VVIRT(.jiffies);
17983-
17984 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
17985 *(.vsyscall_3)
17986 }
17987@@ -187,12 +246,19 @@ SECTIONS
17988 #endif /* CONFIG_X86_64 */
17989
17990 /* Init code and data - will be freed after init */
17991- . = ALIGN(PAGE_SIZE);
17992 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
17993+ BYTE(0)
17994+
17995+#ifdef CONFIG_PAX_KERNEXEC
17996+ . = ALIGN(HPAGE_SIZE);
17997+#else
17998+ . = ALIGN(PAGE_SIZE);
17999+#endif
18000+
18001 __init_begin = .; /* paired with __init_end */
18002- }
18003+ } :init.begin
18004
18005-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
18006+#ifdef CONFIG_SMP
18007 /*
18008 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
18009 * output PHDR, so the next output section - .init.text - should
18010@@ -201,12 +267,27 @@ SECTIONS
18011 PERCPU_VADDR(0, :percpu)
18012 #endif
18013
18014- INIT_TEXT_SECTION(PAGE_SIZE)
18015-#ifdef CONFIG_X86_64
18016- :init
18017-#endif
18018+ . = ALIGN(PAGE_SIZE);
18019+ init_begin = .;
18020+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
18021+ VMLINUX_SYMBOL(_sinittext) = .;
18022+ INIT_TEXT
18023+ VMLINUX_SYMBOL(_einittext) = .;
18024+ . = ALIGN(PAGE_SIZE);
18025+ } :text.init
18026
18027- INIT_DATA_SECTION(16)
18028+ /*
18029+ * .exit.text is discard at runtime, not link time, to deal with
18030+ * references from .altinstructions and .eh_frame
18031+ */
18032+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
18033+ EXIT_TEXT
18034+ . = ALIGN(16);
18035+ } :text.exit
18036+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
18037+
18038+ . = ALIGN(PAGE_SIZE);
18039+ INIT_DATA_SECTION(16) :init
18040
18041 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
18042 __x86_cpu_dev_start = .;
18043@@ -232,19 +313,11 @@ SECTIONS
18044 *(.altinstr_replacement)
18045 }
18046
18047- /*
18048- * .exit.text is discard at runtime, not link time, to deal with
18049- * references from .altinstructions and .eh_frame
18050- */
18051- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
18052- EXIT_TEXT
18053- }
18054-
18055 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
18056 EXIT_DATA
18057 }
18058
18059-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
18060+#ifndef CONFIG_SMP
18061 PERCPU(PAGE_SIZE)
18062 #endif
18063
18064@@ -267,12 +340,6 @@ SECTIONS
18065 . = ALIGN(PAGE_SIZE);
18066 }
18067
18068-#ifdef CONFIG_X86_64
18069- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
18070- NOSAVE_DATA
18071- }
18072-#endif
18073-
18074 /* BSS */
18075 . = ALIGN(PAGE_SIZE);
18076 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
18077@@ -288,6 +355,7 @@ SECTIONS
18078 __brk_base = .;
18079 . += 64 * 1024; /* 64k alignment slop space */
18080 *(.brk_reservation) /* areas brk users have reserved */
18081+ . = ALIGN(HPAGE_SIZE);
18082 __brk_limit = .;
18083 }
18084
18085@@ -316,13 +384,12 @@ SECTIONS
18086 * for the boot processor.
18087 */
18088 #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
18089-INIT_PER_CPU(gdt_page);
18090 INIT_PER_CPU(irq_stack_union);
18091
18092 /*
18093 * Build-time check on the image size:
18094 */
18095-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
18096+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
18097 "kernel image bigger than KERNEL_IMAGE_SIZE");
18098
18099 #ifdef CONFIG_SMP
18100diff -urNp linux-2.6.32.42/arch/x86/kernel/vsyscall_64.c linux-2.6.32.42/arch/x86/kernel/vsyscall_64.c
18101--- linux-2.6.32.42/arch/x86/kernel/vsyscall_64.c 2011-03-27 14:31:47.000000000 -0400
18102+++ linux-2.6.32.42/arch/x86/kernel/vsyscall_64.c 2011-04-23 12:56:10.000000000 -0400
18103@@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wa
18104
18105 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
18106 /* copy vsyscall data */
18107+ strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
18108 vsyscall_gtod_data.clock.vread = clock->vread;
18109 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
18110 vsyscall_gtod_data.clock.mask = clock->mask;
18111@@ -203,7 +204,7 @@ vgetcpu(unsigned *cpu, unsigned *node, s
18112 We do this here because otherwise user space would do it on
18113 its own in a likely inferior way (no access to jiffies).
18114 If you don't like it pass NULL. */
18115- if (tcache && tcache->blob[0] == (j = __jiffies)) {
18116+ if (tcache && tcache->blob[0] == (j = jiffies)) {
18117 p = tcache->blob[1];
18118 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
18119 /* Load per CPU data from RDTSCP */
18120diff -urNp linux-2.6.32.42/arch/x86/kernel/x8664_ksyms_64.c linux-2.6.32.42/arch/x86/kernel/x8664_ksyms_64.c
18121--- linux-2.6.32.42/arch/x86/kernel/x8664_ksyms_64.c 2011-03-27 14:31:47.000000000 -0400
18122+++ linux-2.6.32.42/arch/x86/kernel/x8664_ksyms_64.c 2011-04-17 15:56:46.000000000 -0400
18123@@ -30,8 +30,6 @@ EXPORT_SYMBOL(__put_user_8);
18124
18125 EXPORT_SYMBOL(copy_user_generic);
18126 EXPORT_SYMBOL(__copy_user_nocache);
18127-EXPORT_SYMBOL(copy_from_user);
18128-EXPORT_SYMBOL(copy_to_user);
18129 EXPORT_SYMBOL(__copy_from_user_inatomic);
18130
18131 EXPORT_SYMBOL(copy_page);
18132diff -urNp linux-2.6.32.42/arch/x86/kernel/xsave.c linux-2.6.32.42/arch/x86/kernel/xsave.c
18133--- linux-2.6.32.42/arch/x86/kernel/xsave.c 2011-03-27 14:31:47.000000000 -0400
18134+++ linux-2.6.32.42/arch/x86/kernel/xsave.c 2011-04-17 15:56:46.000000000 -0400
18135@@ -54,7 +54,7 @@ int check_for_xstate(struct i387_fxsave_
18136 fx_sw_user->xstate_size > fx_sw_user->extended_size)
18137 return -1;
18138
18139- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
18140+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
18141 fx_sw_user->extended_size -
18142 FP_XSTATE_MAGIC2_SIZE));
18143 /*
18144@@ -196,7 +196,7 @@ fx_only:
18145 * the other extended state.
18146 */
18147 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
18148- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
18149+ return fxrstor_checking((struct i387_fxsave_struct __user *)buf);
18150 }
18151
18152 /*
18153@@ -228,7 +228,7 @@ int restore_i387_xstate(void __user *buf
18154 if (task_thread_info(tsk)->status & TS_XSAVE)
18155 err = restore_user_xstate(buf);
18156 else
18157- err = fxrstor_checking((__force struct i387_fxsave_struct *)
18158+ err = fxrstor_checking((struct i387_fxsave_struct __user *)
18159 buf);
18160 if (unlikely(err)) {
18161 /*
18162diff -urNp linux-2.6.32.42/arch/x86/kvm/emulate.c linux-2.6.32.42/arch/x86/kvm/emulate.c
18163--- linux-2.6.32.42/arch/x86/kvm/emulate.c 2011-03-27 14:31:47.000000000 -0400
18164+++ linux-2.6.32.42/arch/x86/kvm/emulate.c 2011-04-17 15:56:46.000000000 -0400
18165@@ -81,8 +81,8 @@
18166 #define Src2CL (1<<29)
18167 #define Src2ImmByte (2<<29)
18168 #define Src2One (3<<29)
18169-#define Src2Imm16 (4<<29)
18170-#define Src2Mask (7<<29)
18171+#define Src2Imm16 (4U<<29)
18172+#define Src2Mask (7U<<29)
18173
18174 enum {
18175 Group1_80, Group1_81, Group1_82, Group1_83,
18176@@ -411,6 +411,7 @@ static u32 group2_table[] = {
18177
18178 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
18179 do { \
18180+ unsigned long _tmp; \
18181 __asm__ __volatile__ ( \
18182 _PRE_EFLAGS("0", "4", "2") \
18183 _op _suffix " %"_x"3,%1; " \
18184@@ -424,8 +425,6 @@ static u32 group2_table[] = {
18185 /* Raw emulation: instruction has two explicit operands. */
18186 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
18187 do { \
18188- unsigned long _tmp; \
18189- \
18190 switch ((_dst).bytes) { \
18191 case 2: \
18192 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
18193@@ -441,7 +440,6 @@ static u32 group2_table[] = {
18194
18195 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
18196 do { \
18197- unsigned long _tmp; \
18198 switch ((_dst).bytes) { \
18199 case 1: \
18200 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
18201diff -urNp linux-2.6.32.42/arch/x86/kvm/lapic.c linux-2.6.32.42/arch/x86/kvm/lapic.c
18202--- linux-2.6.32.42/arch/x86/kvm/lapic.c 2011-03-27 14:31:47.000000000 -0400
18203+++ linux-2.6.32.42/arch/x86/kvm/lapic.c 2011-04-17 15:56:46.000000000 -0400
18204@@ -52,7 +52,7 @@
18205 #define APIC_BUS_CYCLE_NS 1
18206
18207 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
18208-#define apic_debug(fmt, arg...)
18209+#define apic_debug(fmt, arg...) do {} while (0)
18210
18211 #define APIC_LVT_NUM 6
18212 /* 14 is the version for Xeon and Pentium 8.4.8*/
18213diff -urNp linux-2.6.32.42/arch/x86/kvm/paging_tmpl.h linux-2.6.32.42/arch/x86/kvm/paging_tmpl.h
18214--- linux-2.6.32.42/arch/x86/kvm/paging_tmpl.h 2011-03-27 14:31:47.000000000 -0400
18215+++ linux-2.6.32.42/arch/x86/kvm/paging_tmpl.h 2011-05-16 21:46:57.000000000 -0400
18216@@ -416,6 +416,8 @@ static int FNAME(page_fault)(struct kvm_
18217 int level = PT_PAGE_TABLE_LEVEL;
18218 unsigned long mmu_seq;
18219
18220+ pax_track_stack();
18221+
18222 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
18223 kvm_mmu_audit(vcpu, "pre page fault");
18224
18225diff -urNp linux-2.6.32.42/arch/x86/kvm/svm.c linux-2.6.32.42/arch/x86/kvm/svm.c
18226--- linux-2.6.32.42/arch/x86/kvm/svm.c 2011-03-27 14:31:47.000000000 -0400
18227+++ linux-2.6.32.42/arch/x86/kvm/svm.c 2011-04-17 15:56:46.000000000 -0400
18228@@ -2483,9 +2483,12 @@ static int handle_exit(struct kvm_run *k
18229 static void reload_tss(struct kvm_vcpu *vcpu)
18230 {
18231 int cpu = raw_smp_processor_id();
18232-
18233 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
18234+
18235+ pax_open_kernel();
18236 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
18237+ pax_close_kernel();
18238+
18239 load_TR_desc();
18240 }
18241
18242@@ -2946,7 +2949,7 @@ static bool svm_gb_page_enable(void)
18243 return true;
18244 }
18245
18246-static struct kvm_x86_ops svm_x86_ops = {
18247+static const struct kvm_x86_ops svm_x86_ops = {
18248 .cpu_has_kvm_support = has_svm,
18249 .disabled_by_bios = is_disabled,
18250 .hardware_setup = svm_hardware_setup,
18251diff -urNp linux-2.6.32.42/arch/x86/kvm/vmx.c linux-2.6.32.42/arch/x86/kvm/vmx.c
18252--- linux-2.6.32.42/arch/x86/kvm/vmx.c 2011-03-27 14:31:47.000000000 -0400
18253+++ linux-2.6.32.42/arch/x86/kvm/vmx.c 2011-05-04 17:56:20.000000000 -0400
18254@@ -570,7 +570,11 @@ static void reload_tss(void)
18255
18256 kvm_get_gdt(&gdt);
18257 descs = (void *)gdt.base;
18258+
18259+ pax_open_kernel();
18260 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
18261+ pax_close_kernel();
18262+
18263 load_TR_desc();
18264 }
18265
18266@@ -1409,8 +1413,11 @@ static __init int hardware_setup(void)
18267 if (!cpu_has_vmx_flexpriority())
18268 flexpriority_enabled = 0;
18269
18270- if (!cpu_has_vmx_tpr_shadow())
18271- kvm_x86_ops->update_cr8_intercept = NULL;
18272+ if (!cpu_has_vmx_tpr_shadow()) {
18273+ pax_open_kernel();
18274+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
18275+ pax_close_kernel();
18276+ }
18277
18278 if (enable_ept && !cpu_has_vmx_ept_2m_page())
18279 kvm_disable_largepages();
18280@@ -2361,7 +2368,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
18281 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
18282
18283 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
18284- vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
18285+ vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
18286 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
18287 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
18288 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
18289@@ -3717,6 +3724,12 @@ static void vmx_vcpu_run(struct kvm_vcpu
18290 "jmp .Lkvm_vmx_return \n\t"
18291 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
18292 ".Lkvm_vmx_return: "
18293+
18294+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18295+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
18296+ ".Lkvm_vmx_return2: "
18297+#endif
18298+
18299 /* Save guest registers, load host registers, keep flags */
18300 "xchg %0, (%%"R"sp) \n\t"
18301 "mov %%"R"ax, %c[rax](%0) \n\t"
18302@@ -3763,8 +3776,13 @@ static void vmx_vcpu_run(struct kvm_vcpu
18303 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
18304 #endif
18305 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
18306+
18307+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18308+ ,[cs]"i"(__KERNEL_CS)
18309+#endif
18310+
18311 : "cc", "memory"
18312- , R"bx", R"di", R"si"
18313+ , R"ax", R"bx", R"di", R"si"
18314 #ifdef CONFIG_X86_64
18315 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
18316 #endif
18317@@ -3781,7 +3799,16 @@ static void vmx_vcpu_run(struct kvm_vcpu
18318 if (vmx->rmode.irq.pending)
18319 fixup_rmode_irq(vmx);
18320
18321- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
18322+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
18323+
18324+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18325+ loadsegment(fs, __KERNEL_PERCPU);
18326+#endif
18327+
18328+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18329+ __set_fs(current_thread_info()->addr_limit);
18330+#endif
18331+
18332 vmx->launched = 1;
18333
18334 vmx_complete_interrupts(vmx);
18335@@ -3956,7 +3983,7 @@ static bool vmx_gb_page_enable(void)
18336 return false;
18337 }
18338
18339-static struct kvm_x86_ops vmx_x86_ops = {
18340+static const struct kvm_x86_ops vmx_x86_ops = {
18341 .cpu_has_kvm_support = cpu_has_kvm_support,
18342 .disabled_by_bios = vmx_disabled_by_bios,
18343 .hardware_setup = hardware_setup,
18344diff -urNp linux-2.6.32.42/arch/x86/kvm/x86.c linux-2.6.32.42/arch/x86/kvm/x86.c
18345--- linux-2.6.32.42/arch/x86/kvm/x86.c 2011-05-10 22:12:01.000000000 -0400
18346+++ linux-2.6.32.42/arch/x86/kvm/x86.c 2011-05-10 22:12:26.000000000 -0400
18347@@ -82,7 +82,7 @@ static void update_cr8_intercept(struct
18348 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
18349 struct kvm_cpuid_entry2 __user *entries);
18350
18351-struct kvm_x86_ops *kvm_x86_ops;
18352+const struct kvm_x86_ops *kvm_x86_ops;
18353 EXPORT_SYMBOL_GPL(kvm_x86_ops);
18354
18355 int ignore_msrs = 0;
18356@@ -1430,15 +1430,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
18357 struct kvm_cpuid2 *cpuid,
18358 struct kvm_cpuid_entry2 __user *entries)
18359 {
18360- int r;
18361+ int r, i;
18362
18363 r = -E2BIG;
18364 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
18365 goto out;
18366 r = -EFAULT;
18367- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
18368- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18369+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18370 goto out;
18371+ for (i = 0; i < cpuid->nent; ++i) {
18372+ struct kvm_cpuid_entry2 cpuid_entry;
18373+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
18374+ goto out;
18375+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
18376+ }
18377 vcpu->arch.cpuid_nent = cpuid->nent;
18378 kvm_apic_set_version(vcpu);
18379 return 0;
18380@@ -1451,16 +1456,20 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
18381 struct kvm_cpuid2 *cpuid,
18382 struct kvm_cpuid_entry2 __user *entries)
18383 {
18384- int r;
18385+ int r, i;
18386
18387 vcpu_load(vcpu);
18388 r = -E2BIG;
18389 if (cpuid->nent < vcpu->arch.cpuid_nent)
18390 goto out;
18391 r = -EFAULT;
18392- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
18393- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18394+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18395 goto out;
18396+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
18397+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
18398+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
18399+ goto out;
18400+ }
18401 return 0;
18402
18403 out:
18404@@ -1678,7 +1687,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
18405 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
18406 struct kvm_interrupt *irq)
18407 {
18408- if (irq->irq < 0 || irq->irq >= 256)
18409+ if (irq->irq >= 256)
18410 return -EINVAL;
18411 if (irqchip_in_kernel(vcpu->kvm))
18412 return -ENXIO;
18413@@ -3260,10 +3269,10 @@ static struct notifier_block kvmclock_cp
18414 .notifier_call = kvmclock_cpufreq_notifier
18415 };
18416
18417-int kvm_arch_init(void *opaque)
18418+int kvm_arch_init(const void *opaque)
18419 {
18420 int r, cpu;
18421- struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
18422+ const struct kvm_x86_ops *ops = (const struct kvm_x86_ops *)opaque;
18423
18424 if (kvm_x86_ops) {
18425 printk(KERN_ERR "kvm: already loaded the other module\n");
18426diff -urNp linux-2.6.32.42/arch/x86/lib/atomic64_32.c linux-2.6.32.42/arch/x86/lib/atomic64_32.c
18427--- linux-2.6.32.42/arch/x86/lib/atomic64_32.c 2011-03-27 14:31:47.000000000 -0400
18428+++ linux-2.6.32.42/arch/x86/lib/atomic64_32.c 2011-05-04 17:56:28.000000000 -0400
18429@@ -25,6 +25,12 @@ u64 atomic64_cmpxchg(atomic64_t *ptr, u6
18430 }
18431 EXPORT_SYMBOL(atomic64_cmpxchg);
18432
18433+u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val)
18434+{
18435+ return cmpxchg8b(&ptr->counter, old_val, new_val);
18436+}
18437+EXPORT_SYMBOL(atomic64_cmpxchg_unchecked);
18438+
18439 /**
18440 * atomic64_xchg - xchg atomic64 variable
18441 * @ptr: pointer to type atomic64_t
18442@@ -56,6 +62,36 @@ u64 atomic64_xchg(atomic64_t *ptr, u64 n
18443 EXPORT_SYMBOL(atomic64_xchg);
18444
18445 /**
18446+ * atomic64_xchg_unchecked - xchg atomic64 variable
18447+ * @ptr: pointer to type atomic64_unchecked_t
18448+ * @new_val: value to assign
18449+ *
18450+ * Atomically xchgs the value of @ptr to @new_val and returns
18451+ * the old value.
18452+ */
18453+u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
18454+{
18455+ /*
18456+ * Try first with a (possibly incorrect) assumption about
18457+ * what we have there. We'll do two loops most likely,
18458+ * but we'll get an ownership MESI transaction straight away
18459+ * instead of a read transaction followed by a
18460+ * flush-for-ownership transaction:
18461+ */
18462+ u64 old_val, real_val = 0;
18463+
18464+ do {
18465+ old_val = real_val;
18466+
18467+ real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
18468+
18469+ } while (real_val != old_val);
18470+
18471+ return old_val;
18472+}
18473+EXPORT_SYMBOL(atomic64_xchg_unchecked);
18474+
18475+/**
18476 * atomic64_set - set atomic64 variable
18477 * @ptr: pointer to type atomic64_t
18478 * @new_val: value to assign
18479@@ -69,7 +105,19 @@ void atomic64_set(atomic64_t *ptr, u64 n
18480 EXPORT_SYMBOL(atomic64_set);
18481
18482 /**
18483-EXPORT_SYMBOL(atomic64_read);
18484+ * atomic64_unchecked_set - set atomic64 variable
18485+ * @ptr: pointer to type atomic64_unchecked_t
18486+ * @new_val: value to assign
18487+ *
18488+ * Atomically sets the value of @ptr to @new_val.
18489+ */
18490+void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
18491+{
18492+ atomic64_xchg_unchecked(ptr, new_val);
18493+}
18494+EXPORT_SYMBOL(atomic64_set_unchecked);
18495+
18496+/**
18497 * atomic64_add_return - add and return
18498 * @delta: integer value to add
18499 * @ptr: pointer to type atomic64_t
18500@@ -99,24 +147,72 @@ noinline u64 atomic64_add_return(u64 del
18501 }
18502 EXPORT_SYMBOL(atomic64_add_return);
18503
18504+/**
18505+ * atomic64_add_return_unchecked - add and return
18506+ * @delta: integer value to add
18507+ * @ptr: pointer to type atomic64_unchecked_t
18508+ *
18509+ * Atomically adds @delta to @ptr and returns @delta + *@ptr
18510+ */
18511+noinline u64 atomic64_add_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18512+{
18513+ /*
18514+ * Try first with a (possibly incorrect) assumption about
18515+ * what we have there. We'll do two loops most likely,
18516+ * but we'll get an ownership MESI transaction straight away
18517+ * instead of a read transaction followed by a
18518+ * flush-for-ownership transaction:
18519+ */
18520+ u64 old_val, new_val, real_val = 0;
18521+
18522+ do {
18523+ old_val = real_val;
18524+ new_val = old_val + delta;
18525+
18526+ real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
18527+
18528+ } while (real_val != old_val);
18529+
18530+ return new_val;
18531+}
18532+EXPORT_SYMBOL(atomic64_add_return_unchecked);
18533+
18534 u64 atomic64_sub_return(u64 delta, atomic64_t *ptr)
18535 {
18536 return atomic64_add_return(-delta, ptr);
18537 }
18538 EXPORT_SYMBOL(atomic64_sub_return);
18539
18540+u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18541+{
18542+ return atomic64_add_return_unchecked(-delta, ptr);
18543+}
18544+EXPORT_SYMBOL(atomic64_sub_return_unchecked);
18545+
18546 u64 atomic64_inc_return(atomic64_t *ptr)
18547 {
18548 return atomic64_add_return(1, ptr);
18549 }
18550 EXPORT_SYMBOL(atomic64_inc_return);
18551
18552+u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr)
18553+{
18554+ return atomic64_add_return_unchecked(1, ptr);
18555+}
18556+EXPORT_SYMBOL(atomic64_inc_return_unchecked);
18557+
18558 u64 atomic64_dec_return(atomic64_t *ptr)
18559 {
18560 return atomic64_sub_return(1, ptr);
18561 }
18562 EXPORT_SYMBOL(atomic64_dec_return);
18563
18564+u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr)
18565+{
18566+ return atomic64_sub_return_unchecked(1, ptr);
18567+}
18568+EXPORT_SYMBOL(atomic64_dec_return_unchecked);
18569+
18570 /**
18571 * atomic64_add - add integer to atomic64 variable
18572 * @delta: integer value to add
18573@@ -131,6 +227,19 @@ void atomic64_add(u64 delta, atomic64_t
18574 EXPORT_SYMBOL(atomic64_add);
18575
18576 /**
18577+ * atomic64_add_unchecked - add integer to atomic64 variable
18578+ * @delta: integer value to add
18579+ * @ptr: pointer to type atomic64_unchecked_t
18580+ *
18581+ * Atomically adds @delta to @ptr.
18582+ */
18583+void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18584+{
18585+ atomic64_add_return_unchecked(delta, ptr);
18586+}
18587+EXPORT_SYMBOL(atomic64_add_unchecked);
18588+
18589+/**
18590 * atomic64_sub - subtract the atomic64 variable
18591 * @delta: integer value to subtract
18592 * @ptr: pointer to type atomic64_t
18593@@ -144,6 +253,19 @@ void atomic64_sub(u64 delta, atomic64_t
18594 EXPORT_SYMBOL(atomic64_sub);
18595
18596 /**
18597+ * atomic64_sub_unchecked - subtract the atomic64 variable
18598+ * @delta: integer value to subtract
18599+ * @ptr: pointer to type atomic64_unchecked_t
18600+ *
18601+ * Atomically subtracts @delta from @ptr.
18602+ */
18603+void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18604+{
18605+ atomic64_add_unchecked(-delta, ptr);
18606+}
18607+EXPORT_SYMBOL(atomic64_sub_unchecked);
18608+
18609+/**
18610 * atomic64_sub_and_test - subtract value from variable and test result
18611 * @delta: integer value to subtract
18612 * @ptr: pointer to type atomic64_t
18613@@ -173,6 +295,18 @@ void atomic64_inc(atomic64_t *ptr)
18614 EXPORT_SYMBOL(atomic64_inc);
18615
18616 /**
18617+ * atomic64_inc_unchecked - increment atomic64 variable
18618+ * @ptr: pointer to type atomic64_unchecked_t
18619+ *
18620+ * Atomically increments @ptr by 1.
18621+ */
18622+void atomic64_inc_unchecked(atomic64_unchecked_t *ptr)
18623+{
18624+ atomic64_add_unchecked(1, ptr);
18625+}
18626+EXPORT_SYMBOL(atomic64_inc_unchecked);
18627+
18628+/**
18629 * atomic64_dec - decrement atomic64 variable
18630 * @ptr: pointer to type atomic64_t
18631 *
18632@@ -185,6 +319,18 @@ void atomic64_dec(atomic64_t *ptr)
18633 EXPORT_SYMBOL(atomic64_dec);
18634
18635 /**
18636+ * atomic64_dec_unchecked - decrement atomic64 variable
18637+ * @ptr: pointer to type atomic64_unchecked_t
18638+ *
18639+ * Atomically decrements @ptr by 1.
18640+ */
18641+void atomic64_dec_unchecked(atomic64_unchecked_t *ptr)
18642+{
18643+ atomic64_sub_unchecked(1, ptr);
18644+}
18645+EXPORT_SYMBOL(atomic64_dec_unchecked);
18646+
18647+/**
18648 * atomic64_dec_and_test - decrement and test
18649 * @ptr: pointer to type atomic64_t
18650 *
18651diff -urNp linux-2.6.32.42/arch/x86/lib/checksum_32.S linux-2.6.32.42/arch/x86/lib/checksum_32.S
18652--- linux-2.6.32.42/arch/x86/lib/checksum_32.S 2011-03-27 14:31:47.000000000 -0400
18653+++ linux-2.6.32.42/arch/x86/lib/checksum_32.S 2011-04-17 15:56:46.000000000 -0400
18654@@ -28,7 +28,8 @@
18655 #include <linux/linkage.h>
18656 #include <asm/dwarf2.h>
18657 #include <asm/errno.h>
18658-
18659+#include <asm/segment.h>
18660+
18661 /*
18662 * computes a partial checksum, e.g. for TCP/UDP fragments
18663 */
18664@@ -304,9 +305,28 @@ unsigned int csum_partial_copy_generic (
18665
18666 #define ARGBASE 16
18667 #define FP 12
18668-
18669-ENTRY(csum_partial_copy_generic)
18670+
18671+ENTRY(csum_partial_copy_generic_to_user)
18672 CFI_STARTPROC
18673+
18674+#ifdef CONFIG_PAX_MEMORY_UDEREF
18675+ pushl %gs
18676+ CFI_ADJUST_CFA_OFFSET 4
18677+ popl %es
18678+ CFI_ADJUST_CFA_OFFSET -4
18679+ jmp csum_partial_copy_generic
18680+#endif
18681+
18682+ENTRY(csum_partial_copy_generic_from_user)
18683+
18684+#ifdef CONFIG_PAX_MEMORY_UDEREF
18685+ pushl %gs
18686+ CFI_ADJUST_CFA_OFFSET 4
18687+ popl %ds
18688+ CFI_ADJUST_CFA_OFFSET -4
18689+#endif
18690+
18691+ENTRY(csum_partial_copy_generic)
18692 subl $4,%esp
18693 CFI_ADJUST_CFA_OFFSET 4
18694 pushl %edi
18695@@ -331,7 +351,7 @@ ENTRY(csum_partial_copy_generic)
18696 jmp 4f
18697 SRC(1: movw (%esi), %bx )
18698 addl $2, %esi
18699-DST( movw %bx, (%edi) )
18700+DST( movw %bx, %es:(%edi) )
18701 addl $2, %edi
18702 addw %bx, %ax
18703 adcl $0, %eax
18704@@ -343,30 +363,30 @@ DST( movw %bx, (%edi) )
18705 SRC(1: movl (%esi), %ebx )
18706 SRC( movl 4(%esi), %edx )
18707 adcl %ebx, %eax
18708-DST( movl %ebx, (%edi) )
18709+DST( movl %ebx, %es:(%edi) )
18710 adcl %edx, %eax
18711-DST( movl %edx, 4(%edi) )
18712+DST( movl %edx, %es:4(%edi) )
18713
18714 SRC( movl 8(%esi), %ebx )
18715 SRC( movl 12(%esi), %edx )
18716 adcl %ebx, %eax
18717-DST( movl %ebx, 8(%edi) )
18718+DST( movl %ebx, %es:8(%edi) )
18719 adcl %edx, %eax
18720-DST( movl %edx, 12(%edi) )
18721+DST( movl %edx, %es:12(%edi) )
18722
18723 SRC( movl 16(%esi), %ebx )
18724 SRC( movl 20(%esi), %edx )
18725 adcl %ebx, %eax
18726-DST( movl %ebx, 16(%edi) )
18727+DST( movl %ebx, %es:16(%edi) )
18728 adcl %edx, %eax
18729-DST( movl %edx, 20(%edi) )
18730+DST( movl %edx, %es:20(%edi) )
18731
18732 SRC( movl 24(%esi), %ebx )
18733 SRC( movl 28(%esi), %edx )
18734 adcl %ebx, %eax
18735-DST( movl %ebx, 24(%edi) )
18736+DST( movl %ebx, %es:24(%edi) )
18737 adcl %edx, %eax
18738-DST( movl %edx, 28(%edi) )
18739+DST( movl %edx, %es:28(%edi) )
18740
18741 lea 32(%esi), %esi
18742 lea 32(%edi), %edi
18743@@ -380,7 +400,7 @@ DST( movl %edx, 28(%edi) )
18744 shrl $2, %edx # This clears CF
18745 SRC(3: movl (%esi), %ebx )
18746 adcl %ebx, %eax
18747-DST( movl %ebx, (%edi) )
18748+DST( movl %ebx, %es:(%edi) )
18749 lea 4(%esi), %esi
18750 lea 4(%edi), %edi
18751 dec %edx
18752@@ -392,12 +412,12 @@ DST( movl %ebx, (%edi) )
18753 jb 5f
18754 SRC( movw (%esi), %cx )
18755 leal 2(%esi), %esi
18756-DST( movw %cx, (%edi) )
18757+DST( movw %cx, %es:(%edi) )
18758 leal 2(%edi), %edi
18759 je 6f
18760 shll $16,%ecx
18761 SRC(5: movb (%esi), %cl )
18762-DST( movb %cl, (%edi) )
18763+DST( movb %cl, %es:(%edi) )
18764 6: addl %ecx, %eax
18765 adcl $0, %eax
18766 7:
18767@@ -408,7 +428,7 @@ DST( movb %cl, (%edi) )
18768
18769 6001:
18770 movl ARGBASE+20(%esp), %ebx # src_err_ptr
18771- movl $-EFAULT, (%ebx)
18772+ movl $-EFAULT, %ss:(%ebx)
18773
18774 # zero the complete destination - computing the rest
18775 # is too much work
18776@@ -421,11 +441,19 @@ DST( movb %cl, (%edi) )
18777
18778 6002:
18779 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
18780- movl $-EFAULT,(%ebx)
18781+ movl $-EFAULT,%ss:(%ebx)
18782 jmp 5000b
18783
18784 .previous
18785
18786+ pushl %ss
18787+ CFI_ADJUST_CFA_OFFSET 4
18788+ popl %ds
18789+ CFI_ADJUST_CFA_OFFSET -4
18790+ pushl %ss
18791+ CFI_ADJUST_CFA_OFFSET 4
18792+ popl %es
18793+ CFI_ADJUST_CFA_OFFSET -4
18794 popl %ebx
18795 CFI_ADJUST_CFA_OFFSET -4
18796 CFI_RESTORE ebx
18797@@ -439,26 +467,47 @@ DST( movb %cl, (%edi) )
18798 CFI_ADJUST_CFA_OFFSET -4
18799 ret
18800 CFI_ENDPROC
18801-ENDPROC(csum_partial_copy_generic)
18802+ENDPROC(csum_partial_copy_generic_to_user)
18803
18804 #else
18805
18806 /* Version for PentiumII/PPro */
18807
18808 #define ROUND1(x) \
18809+ nop; nop; nop; \
18810 SRC(movl x(%esi), %ebx ) ; \
18811 addl %ebx, %eax ; \
18812- DST(movl %ebx, x(%edi) ) ;
18813+ DST(movl %ebx, %es:x(%edi)) ;
18814
18815 #define ROUND(x) \
18816+ nop; nop; nop; \
18817 SRC(movl x(%esi), %ebx ) ; \
18818 adcl %ebx, %eax ; \
18819- DST(movl %ebx, x(%edi) ) ;
18820+ DST(movl %ebx, %es:x(%edi)) ;
18821
18822 #define ARGBASE 12
18823-
18824-ENTRY(csum_partial_copy_generic)
18825+
18826+ENTRY(csum_partial_copy_generic_to_user)
18827 CFI_STARTPROC
18828+
18829+#ifdef CONFIG_PAX_MEMORY_UDEREF
18830+ pushl %gs
18831+ CFI_ADJUST_CFA_OFFSET 4
18832+ popl %es
18833+ CFI_ADJUST_CFA_OFFSET -4
18834+ jmp csum_partial_copy_generic
18835+#endif
18836+
18837+ENTRY(csum_partial_copy_generic_from_user)
18838+
18839+#ifdef CONFIG_PAX_MEMORY_UDEREF
18840+ pushl %gs
18841+ CFI_ADJUST_CFA_OFFSET 4
18842+ popl %ds
18843+ CFI_ADJUST_CFA_OFFSET -4
18844+#endif
18845+
18846+ENTRY(csum_partial_copy_generic)
18847 pushl %ebx
18848 CFI_ADJUST_CFA_OFFSET 4
18849 CFI_REL_OFFSET ebx, 0
18850@@ -482,7 +531,7 @@ ENTRY(csum_partial_copy_generic)
18851 subl %ebx, %edi
18852 lea -1(%esi),%edx
18853 andl $-32,%edx
18854- lea 3f(%ebx,%ebx), %ebx
18855+ lea 3f(%ebx,%ebx,2), %ebx
18856 testl %esi, %esi
18857 jmp *%ebx
18858 1: addl $64,%esi
18859@@ -503,19 +552,19 @@ ENTRY(csum_partial_copy_generic)
18860 jb 5f
18861 SRC( movw (%esi), %dx )
18862 leal 2(%esi), %esi
18863-DST( movw %dx, (%edi) )
18864+DST( movw %dx, %es:(%edi) )
18865 leal 2(%edi), %edi
18866 je 6f
18867 shll $16,%edx
18868 5:
18869 SRC( movb (%esi), %dl )
18870-DST( movb %dl, (%edi) )
18871+DST( movb %dl, %es:(%edi) )
18872 6: addl %edx, %eax
18873 adcl $0, %eax
18874 7:
18875 .section .fixup, "ax"
18876 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
18877- movl $-EFAULT, (%ebx)
18878+ movl $-EFAULT, %ss:(%ebx)
18879 # zero the complete destination (computing the rest is too much work)
18880 movl ARGBASE+8(%esp),%edi # dst
18881 movl ARGBASE+12(%esp),%ecx # len
18882@@ -523,10 +572,21 @@ DST( movb %dl, (%edi) )
18883 rep; stosb
18884 jmp 7b
18885 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
18886- movl $-EFAULT, (%ebx)
18887+ movl $-EFAULT, %ss:(%ebx)
18888 jmp 7b
18889 .previous
18890
18891+#ifdef CONFIG_PAX_MEMORY_UDEREF
18892+ pushl %ss
18893+ CFI_ADJUST_CFA_OFFSET 4
18894+ popl %ds
18895+ CFI_ADJUST_CFA_OFFSET -4
18896+ pushl %ss
18897+ CFI_ADJUST_CFA_OFFSET 4
18898+ popl %es
18899+ CFI_ADJUST_CFA_OFFSET -4
18900+#endif
18901+
18902 popl %esi
18903 CFI_ADJUST_CFA_OFFSET -4
18904 CFI_RESTORE esi
18905@@ -538,7 +598,7 @@ DST( movb %dl, (%edi) )
18906 CFI_RESTORE ebx
18907 ret
18908 CFI_ENDPROC
18909-ENDPROC(csum_partial_copy_generic)
18910+ENDPROC(csum_partial_copy_generic_to_user)
18911
18912 #undef ROUND
18913 #undef ROUND1
18914diff -urNp linux-2.6.32.42/arch/x86/lib/clear_page_64.S linux-2.6.32.42/arch/x86/lib/clear_page_64.S
18915--- linux-2.6.32.42/arch/x86/lib/clear_page_64.S 2011-03-27 14:31:47.000000000 -0400
18916+++ linux-2.6.32.42/arch/x86/lib/clear_page_64.S 2011-04-17 15:56:46.000000000 -0400
18917@@ -43,7 +43,7 @@ ENDPROC(clear_page)
18918
18919 #include <asm/cpufeature.h>
18920
18921- .section .altinstr_replacement,"ax"
18922+ .section .altinstr_replacement,"a"
18923 1: .byte 0xeb /* jmp <disp8> */
18924 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
18925 2:
18926diff -urNp linux-2.6.32.42/arch/x86/lib/copy_page_64.S linux-2.6.32.42/arch/x86/lib/copy_page_64.S
18927--- linux-2.6.32.42/arch/x86/lib/copy_page_64.S 2011-03-27 14:31:47.000000000 -0400
18928+++ linux-2.6.32.42/arch/x86/lib/copy_page_64.S 2011-04-17 15:56:46.000000000 -0400
18929@@ -104,7 +104,7 @@ ENDPROC(copy_page)
18930
18931 #include <asm/cpufeature.h>
18932
18933- .section .altinstr_replacement,"ax"
18934+ .section .altinstr_replacement,"a"
18935 1: .byte 0xeb /* jmp <disp8> */
18936 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
18937 2:
18938diff -urNp linux-2.6.32.42/arch/x86/lib/copy_user_64.S linux-2.6.32.42/arch/x86/lib/copy_user_64.S
18939--- linux-2.6.32.42/arch/x86/lib/copy_user_64.S 2011-06-25 12:55:34.000000000 -0400
18940+++ linux-2.6.32.42/arch/x86/lib/copy_user_64.S 2011-06-25 12:56:37.000000000 -0400
18941@@ -15,13 +15,14 @@
18942 #include <asm/asm-offsets.h>
18943 #include <asm/thread_info.h>
18944 #include <asm/cpufeature.h>
18945+#include <asm/pgtable.h>
18946
18947 .macro ALTERNATIVE_JUMP feature,orig,alt
18948 0:
18949 .byte 0xe9 /* 32bit jump */
18950 .long \orig-1f /* by default jump to orig */
18951 1:
18952- .section .altinstr_replacement,"ax"
18953+ .section .altinstr_replacement,"a"
18954 2: .byte 0xe9 /* near jump with 32bit immediate */
18955 .long \alt-1b /* offset */ /* or alternatively to alt */
18956 .previous
18957@@ -64,49 +65,19 @@
18958 #endif
18959 .endm
18960
18961-/* Standard copy_to_user with segment limit checking */
18962-ENTRY(copy_to_user)
18963- CFI_STARTPROC
18964- GET_THREAD_INFO(%rax)
18965- movq %rdi,%rcx
18966- addq %rdx,%rcx
18967- jc bad_to_user
18968- cmpq TI_addr_limit(%rax),%rcx
18969- ja bad_to_user
18970- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
18971- CFI_ENDPROC
18972-ENDPROC(copy_to_user)
18973-
18974-/* Standard copy_from_user with segment limit checking */
18975-ENTRY(copy_from_user)
18976- CFI_STARTPROC
18977- GET_THREAD_INFO(%rax)
18978- movq %rsi,%rcx
18979- addq %rdx,%rcx
18980- jc bad_from_user
18981- cmpq TI_addr_limit(%rax),%rcx
18982- ja bad_from_user
18983- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
18984- CFI_ENDPROC
18985-ENDPROC(copy_from_user)
18986-
18987 ENTRY(copy_user_generic)
18988 CFI_STARTPROC
18989 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
18990 CFI_ENDPROC
18991 ENDPROC(copy_user_generic)
18992
18993-ENTRY(__copy_from_user_inatomic)
18994- CFI_STARTPROC
18995- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
18996- CFI_ENDPROC
18997-ENDPROC(__copy_from_user_inatomic)
18998-
18999 .section .fixup,"ax"
19000 /* must zero dest */
19001 ENTRY(bad_from_user)
19002 bad_from_user:
19003 CFI_STARTPROC
19004+ testl %edx,%edx
19005+ js bad_to_user
19006 movl %edx,%ecx
19007 xorl %eax,%eax
19008 rep
19009diff -urNp linux-2.6.32.42/arch/x86/lib/copy_user_nocache_64.S linux-2.6.32.42/arch/x86/lib/copy_user_nocache_64.S
19010--- linux-2.6.32.42/arch/x86/lib/copy_user_nocache_64.S 2011-03-27 14:31:47.000000000 -0400
19011+++ linux-2.6.32.42/arch/x86/lib/copy_user_nocache_64.S 2011-04-17 15:56:46.000000000 -0400
19012@@ -14,6 +14,7 @@
19013 #include <asm/current.h>
19014 #include <asm/asm-offsets.h>
19015 #include <asm/thread_info.h>
19016+#include <asm/pgtable.h>
19017
19018 .macro ALIGN_DESTINATION
19019 #ifdef FIX_ALIGNMENT
19020@@ -50,6 +51,15 @@
19021 */
19022 ENTRY(__copy_user_nocache)
19023 CFI_STARTPROC
19024+
19025+#ifdef CONFIG_PAX_MEMORY_UDEREF
19026+ mov $PAX_USER_SHADOW_BASE,%rcx
19027+ cmp %rcx,%rsi
19028+ jae 1f
19029+ add %rcx,%rsi
19030+1:
19031+#endif
19032+
19033 cmpl $8,%edx
19034 jb 20f /* less then 8 bytes, go to byte copy loop */
19035 ALIGN_DESTINATION
19036diff -urNp linux-2.6.32.42/arch/x86/lib/csum-wrappers_64.c linux-2.6.32.42/arch/x86/lib/csum-wrappers_64.c
19037--- linux-2.6.32.42/arch/x86/lib/csum-wrappers_64.c 2011-03-27 14:31:47.000000000 -0400
19038+++ linux-2.6.32.42/arch/x86/lib/csum-wrappers_64.c 2011-05-04 17:56:20.000000000 -0400
19039@@ -52,6 +52,12 @@ csum_partial_copy_from_user(const void _
19040 len -= 2;
19041 }
19042 }
19043+
19044+#ifdef CONFIG_PAX_MEMORY_UDEREF
19045+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
19046+ src += PAX_USER_SHADOW_BASE;
19047+#endif
19048+
19049 isum = csum_partial_copy_generic((__force const void *)src,
19050 dst, len, isum, errp, NULL);
19051 if (unlikely(*errp))
19052@@ -105,6 +111,12 @@ csum_partial_copy_to_user(const void *sr
19053 }
19054
19055 *errp = 0;
19056+
19057+#ifdef CONFIG_PAX_MEMORY_UDEREF
19058+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
19059+ dst += PAX_USER_SHADOW_BASE;
19060+#endif
19061+
19062 return csum_partial_copy_generic(src, (void __force *)dst,
19063 len, isum, NULL, errp);
19064 }
19065diff -urNp linux-2.6.32.42/arch/x86/lib/getuser.S linux-2.6.32.42/arch/x86/lib/getuser.S
19066--- linux-2.6.32.42/arch/x86/lib/getuser.S 2011-03-27 14:31:47.000000000 -0400
19067+++ linux-2.6.32.42/arch/x86/lib/getuser.S 2011-04-17 15:56:46.000000000 -0400
19068@@ -33,14 +33,35 @@
19069 #include <asm/asm-offsets.h>
19070 #include <asm/thread_info.h>
19071 #include <asm/asm.h>
19072+#include <asm/segment.h>
19073+#include <asm/pgtable.h>
19074+
19075+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19076+#define __copyuser_seg gs;
19077+#else
19078+#define __copyuser_seg
19079+#endif
19080
19081 .text
19082 ENTRY(__get_user_1)
19083 CFI_STARTPROC
19084+
19085+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19086 GET_THREAD_INFO(%_ASM_DX)
19087 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19088 jae bad_get_user
19089-1: movzb (%_ASM_AX),%edx
19090+
19091+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19092+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19093+ cmp %_ASM_DX,%_ASM_AX
19094+ jae 1234f
19095+ add %_ASM_DX,%_ASM_AX
19096+1234:
19097+#endif
19098+
19099+#endif
19100+
19101+1: __copyuser_seg movzb (%_ASM_AX),%edx
19102 xor %eax,%eax
19103 ret
19104 CFI_ENDPROC
19105@@ -49,11 +70,24 @@ ENDPROC(__get_user_1)
19106 ENTRY(__get_user_2)
19107 CFI_STARTPROC
19108 add $1,%_ASM_AX
19109+
19110+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19111 jc bad_get_user
19112 GET_THREAD_INFO(%_ASM_DX)
19113 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19114 jae bad_get_user
19115-2: movzwl -1(%_ASM_AX),%edx
19116+
19117+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19118+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19119+ cmp %_ASM_DX,%_ASM_AX
19120+ jae 1234f
19121+ add %_ASM_DX,%_ASM_AX
19122+1234:
19123+#endif
19124+
19125+#endif
19126+
19127+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
19128 xor %eax,%eax
19129 ret
19130 CFI_ENDPROC
19131@@ -62,11 +96,24 @@ ENDPROC(__get_user_2)
19132 ENTRY(__get_user_4)
19133 CFI_STARTPROC
19134 add $3,%_ASM_AX
19135+
19136+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19137 jc bad_get_user
19138 GET_THREAD_INFO(%_ASM_DX)
19139 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19140 jae bad_get_user
19141-3: mov -3(%_ASM_AX),%edx
19142+
19143+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19144+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19145+ cmp %_ASM_DX,%_ASM_AX
19146+ jae 1234f
19147+ add %_ASM_DX,%_ASM_AX
19148+1234:
19149+#endif
19150+
19151+#endif
19152+
19153+3: __copyuser_seg mov -3(%_ASM_AX),%edx
19154 xor %eax,%eax
19155 ret
19156 CFI_ENDPROC
19157@@ -80,6 +127,15 @@ ENTRY(__get_user_8)
19158 GET_THREAD_INFO(%_ASM_DX)
19159 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19160 jae bad_get_user
19161+
19162+#ifdef CONFIG_PAX_MEMORY_UDEREF
19163+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19164+ cmp %_ASM_DX,%_ASM_AX
19165+ jae 1234f
19166+ add %_ASM_DX,%_ASM_AX
19167+1234:
19168+#endif
19169+
19170 4: movq -7(%_ASM_AX),%_ASM_DX
19171 xor %eax,%eax
19172 ret
19173diff -urNp linux-2.6.32.42/arch/x86/lib/memcpy_64.S linux-2.6.32.42/arch/x86/lib/memcpy_64.S
19174--- linux-2.6.32.42/arch/x86/lib/memcpy_64.S 2011-03-27 14:31:47.000000000 -0400
19175+++ linux-2.6.32.42/arch/x86/lib/memcpy_64.S 2011-04-17 15:56:46.000000000 -0400
19176@@ -128,7 +128,7 @@ ENDPROC(__memcpy)
19177 * It is also a lot simpler. Use this when possible:
19178 */
19179
19180- .section .altinstr_replacement, "ax"
19181+ .section .altinstr_replacement, "a"
19182 1: .byte 0xeb /* jmp <disp8> */
19183 .byte (memcpy_c - memcpy) - (2f - 1b) /* offset */
19184 2:
19185diff -urNp linux-2.6.32.42/arch/x86/lib/memset_64.S linux-2.6.32.42/arch/x86/lib/memset_64.S
19186--- linux-2.6.32.42/arch/x86/lib/memset_64.S 2011-03-27 14:31:47.000000000 -0400
19187+++ linux-2.6.32.42/arch/x86/lib/memset_64.S 2011-04-17 15:56:46.000000000 -0400
19188@@ -118,7 +118,7 @@ ENDPROC(__memset)
19189
19190 #include <asm/cpufeature.h>
19191
19192- .section .altinstr_replacement,"ax"
19193+ .section .altinstr_replacement,"a"
19194 1: .byte 0xeb /* jmp <disp8> */
19195 .byte (memset_c - memset) - (2f - 1b) /* offset */
19196 2:
19197diff -urNp linux-2.6.32.42/arch/x86/lib/mmx_32.c linux-2.6.32.42/arch/x86/lib/mmx_32.c
19198--- linux-2.6.32.42/arch/x86/lib/mmx_32.c 2011-03-27 14:31:47.000000000 -0400
19199+++ linux-2.6.32.42/arch/x86/lib/mmx_32.c 2011-04-17 15:56:46.000000000 -0400
19200@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
19201 {
19202 void *p;
19203 int i;
19204+ unsigned long cr0;
19205
19206 if (unlikely(in_interrupt()))
19207 return __memcpy(to, from, len);
19208@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
19209 kernel_fpu_begin();
19210
19211 __asm__ __volatile__ (
19212- "1: prefetch (%0)\n" /* This set is 28 bytes */
19213- " prefetch 64(%0)\n"
19214- " prefetch 128(%0)\n"
19215- " prefetch 192(%0)\n"
19216- " prefetch 256(%0)\n"
19217+ "1: prefetch (%1)\n" /* This set is 28 bytes */
19218+ " prefetch 64(%1)\n"
19219+ " prefetch 128(%1)\n"
19220+ " prefetch 192(%1)\n"
19221+ " prefetch 256(%1)\n"
19222 "2: \n"
19223 ".section .fixup, \"ax\"\n"
19224- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19225+ "3: \n"
19226+
19227+#ifdef CONFIG_PAX_KERNEXEC
19228+ " movl %%cr0, %0\n"
19229+ " movl %0, %%eax\n"
19230+ " andl $0xFFFEFFFF, %%eax\n"
19231+ " movl %%eax, %%cr0\n"
19232+#endif
19233+
19234+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19235+
19236+#ifdef CONFIG_PAX_KERNEXEC
19237+ " movl %0, %%cr0\n"
19238+#endif
19239+
19240 " jmp 2b\n"
19241 ".previous\n"
19242 _ASM_EXTABLE(1b, 3b)
19243- : : "r" (from));
19244+ : "=&r" (cr0) : "r" (from) : "ax");
19245
19246 for ( ; i > 5; i--) {
19247 __asm__ __volatile__ (
19248- "1: prefetch 320(%0)\n"
19249- "2: movq (%0), %%mm0\n"
19250- " movq 8(%0), %%mm1\n"
19251- " movq 16(%0), %%mm2\n"
19252- " movq 24(%0), %%mm3\n"
19253- " movq %%mm0, (%1)\n"
19254- " movq %%mm1, 8(%1)\n"
19255- " movq %%mm2, 16(%1)\n"
19256- " movq %%mm3, 24(%1)\n"
19257- " movq 32(%0), %%mm0\n"
19258- " movq 40(%0), %%mm1\n"
19259- " movq 48(%0), %%mm2\n"
19260- " movq 56(%0), %%mm3\n"
19261- " movq %%mm0, 32(%1)\n"
19262- " movq %%mm1, 40(%1)\n"
19263- " movq %%mm2, 48(%1)\n"
19264- " movq %%mm3, 56(%1)\n"
19265+ "1: prefetch 320(%1)\n"
19266+ "2: movq (%1), %%mm0\n"
19267+ " movq 8(%1), %%mm1\n"
19268+ " movq 16(%1), %%mm2\n"
19269+ " movq 24(%1), %%mm3\n"
19270+ " movq %%mm0, (%2)\n"
19271+ " movq %%mm1, 8(%2)\n"
19272+ " movq %%mm2, 16(%2)\n"
19273+ " movq %%mm3, 24(%2)\n"
19274+ " movq 32(%1), %%mm0\n"
19275+ " movq 40(%1), %%mm1\n"
19276+ " movq 48(%1), %%mm2\n"
19277+ " movq 56(%1), %%mm3\n"
19278+ " movq %%mm0, 32(%2)\n"
19279+ " movq %%mm1, 40(%2)\n"
19280+ " movq %%mm2, 48(%2)\n"
19281+ " movq %%mm3, 56(%2)\n"
19282 ".section .fixup, \"ax\"\n"
19283- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19284+ "3:\n"
19285+
19286+#ifdef CONFIG_PAX_KERNEXEC
19287+ " movl %%cr0, %0\n"
19288+ " movl %0, %%eax\n"
19289+ " andl $0xFFFEFFFF, %%eax\n"
19290+ " movl %%eax, %%cr0\n"
19291+#endif
19292+
19293+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19294+
19295+#ifdef CONFIG_PAX_KERNEXEC
19296+ " movl %0, %%cr0\n"
19297+#endif
19298+
19299 " jmp 2b\n"
19300 ".previous\n"
19301 _ASM_EXTABLE(1b, 3b)
19302- : : "r" (from), "r" (to) : "memory");
19303+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
19304
19305 from += 64;
19306 to += 64;
19307@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
19308 static void fast_copy_page(void *to, void *from)
19309 {
19310 int i;
19311+ unsigned long cr0;
19312
19313 kernel_fpu_begin();
19314
19315@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
19316 * but that is for later. -AV
19317 */
19318 __asm__ __volatile__(
19319- "1: prefetch (%0)\n"
19320- " prefetch 64(%0)\n"
19321- " prefetch 128(%0)\n"
19322- " prefetch 192(%0)\n"
19323- " prefetch 256(%0)\n"
19324+ "1: prefetch (%1)\n"
19325+ " prefetch 64(%1)\n"
19326+ " prefetch 128(%1)\n"
19327+ " prefetch 192(%1)\n"
19328+ " prefetch 256(%1)\n"
19329 "2: \n"
19330 ".section .fixup, \"ax\"\n"
19331- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19332+ "3: \n"
19333+
19334+#ifdef CONFIG_PAX_KERNEXEC
19335+ " movl %%cr0, %0\n"
19336+ " movl %0, %%eax\n"
19337+ " andl $0xFFFEFFFF, %%eax\n"
19338+ " movl %%eax, %%cr0\n"
19339+#endif
19340+
19341+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19342+
19343+#ifdef CONFIG_PAX_KERNEXEC
19344+ " movl %0, %%cr0\n"
19345+#endif
19346+
19347 " jmp 2b\n"
19348 ".previous\n"
19349- _ASM_EXTABLE(1b, 3b) : : "r" (from));
19350+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
19351
19352 for (i = 0; i < (4096-320)/64; i++) {
19353 __asm__ __volatile__ (
19354- "1: prefetch 320(%0)\n"
19355- "2: movq (%0), %%mm0\n"
19356- " movntq %%mm0, (%1)\n"
19357- " movq 8(%0), %%mm1\n"
19358- " movntq %%mm1, 8(%1)\n"
19359- " movq 16(%0), %%mm2\n"
19360- " movntq %%mm2, 16(%1)\n"
19361- " movq 24(%0), %%mm3\n"
19362- " movntq %%mm3, 24(%1)\n"
19363- " movq 32(%0), %%mm4\n"
19364- " movntq %%mm4, 32(%1)\n"
19365- " movq 40(%0), %%mm5\n"
19366- " movntq %%mm5, 40(%1)\n"
19367- " movq 48(%0), %%mm6\n"
19368- " movntq %%mm6, 48(%1)\n"
19369- " movq 56(%0), %%mm7\n"
19370- " movntq %%mm7, 56(%1)\n"
19371+ "1: prefetch 320(%1)\n"
19372+ "2: movq (%1), %%mm0\n"
19373+ " movntq %%mm0, (%2)\n"
19374+ " movq 8(%1), %%mm1\n"
19375+ " movntq %%mm1, 8(%2)\n"
19376+ " movq 16(%1), %%mm2\n"
19377+ " movntq %%mm2, 16(%2)\n"
19378+ " movq 24(%1), %%mm3\n"
19379+ " movntq %%mm3, 24(%2)\n"
19380+ " movq 32(%1), %%mm4\n"
19381+ " movntq %%mm4, 32(%2)\n"
19382+ " movq 40(%1), %%mm5\n"
19383+ " movntq %%mm5, 40(%2)\n"
19384+ " movq 48(%1), %%mm6\n"
19385+ " movntq %%mm6, 48(%2)\n"
19386+ " movq 56(%1), %%mm7\n"
19387+ " movntq %%mm7, 56(%2)\n"
19388 ".section .fixup, \"ax\"\n"
19389- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19390+ "3:\n"
19391+
19392+#ifdef CONFIG_PAX_KERNEXEC
19393+ " movl %%cr0, %0\n"
19394+ " movl %0, %%eax\n"
19395+ " andl $0xFFFEFFFF, %%eax\n"
19396+ " movl %%eax, %%cr0\n"
19397+#endif
19398+
19399+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19400+
19401+#ifdef CONFIG_PAX_KERNEXEC
19402+ " movl %0, %%cr0\n"
19403+#endif
19404+
19405 " jmp 2b\n"
19406 ".previous\n"
19407- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
19408+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
19409
19410 from += 64;
19411 to += 64;
19412@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
19413 static void fast_copy_page(void *to, void *from)
19414 {
19415 int i;
19416+ unsigned long cr0;
19417
19418 kernel_fpu_begin();
19419
19420 __asm__ __volatile__ (
19421- "1: prefetch (%0)\n"
19422- " prefetch 64(%0)\n"
19423- " prefetch 128(%0)\n"
19424- " prefetch 192(%0)\n"
19425- " prefetch 256(%0)\n"
19426+ "1: prefetch (%1)\n"
19427+ " prefetch 64(%1)\n"
19428+ " prefetch 128(%1)\n"
19429+ " prefetch 192(%1)\n"
19430+ " prefetch 256(%1)\n"
19431 "2: \n"
19432 ".section .fixup, \"ax\"\n"
19433- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19434+ "3: \n"
19435+
19436+#ifdef CONFIG_PAX_KERNEXEC
19437+ " movl %%cr0, %0\n"
19438+ " movl %0, %%eax\n"
19439+ " andl $0xFFFEFFFF, %%eax\n"
19440+ " movl %%eax, %%cr0\n"
19441+#endif
19442+
19443+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19444+
19445+#ifdef CONFIG_PAX_KERNEXEC
19446+ " movl %0, %%cr0\n"
19447+#endif
19448+
19449 " jmp 2b\n"
19450 ".previous\n"
19451- _ASM_EXTABLE(1b, 3b) : : "r" (from));
19452+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
19453
19454 for (i = 0; i < 4096/64; i++) {
19455 __asm__ __volatile__ (
19456- "1: prefetch 320(%0)\n"
19457- "2: movq (%0), %%mm0\n"
19458- " movq 8(%0), %%mm1\n"
19459- " movq 16(%0), %%mm2\n"
19460- " movq 24(%0), %%mm3\n"
19461- " movq %%mm0, (%1)\n"
19462- " movq %%mm1, 8(%1)\n"
19463- " movq %%mm2, 16(%1)\n"
19464- " movq %%mm3, 24(%1)\n"
19465- " movq 32(%0), %%mm0\n"
19466- " movq 40(%0), %%mm1\n"
19467- " movq 48(%0), %%mm2\n"
19468- " movq 56(%0), %%mm3\n"
19469- " movq %%mm0, 32(%1)\n"
19470- " movq %%mm1, 40(%1)\n"
19471- " movq %%mm2, 48(%1)\n"
19472- " movq %%mm3, 56(%1)\n"
19473+ "1: prefetch 320(%1)\n"
19474+ "2: movq (%1), %%mm0\n"
19475+ " movq 8(%1), %%mm1\n"
19476+ " movq 16(%1), %%mm2\n"
19477+ " movq 24(%1), %%mm3\n"
19478+ " movq %%mm0, (%2)\n"
19479+ " movq %%mm1, 8(%2)\n"
19480+ " movq %%mm2, 16(%2)\n"
19481+ " movq %%mm3, 24(%2)\n"
19482+ " movq 32(%1), %%mm0\n"
19483+ " movq 40(%1), %%mm1\n"
19484+ " movq 48(%1), %%mm2\n"
19485+ " movq 56(%1), %%mm3\n"
19486+ " movq %%mm0, 32(%2)\n"
19487+ " movq %%mm1, 40(%2)\n"
19488+ " movq %%mm2, 48(%2)\n"
19489+ " movq %%mm3, 56(%2)\n"
19490 ".section .fixup, \"ax\"\n"
19491- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19492+ "3:\n"
19493+
19494+#ifdef CONFIG_PAX_KERNEXEC
19495+ " movl %%cr0, %0\n"
19496+ " movl %0, %%eax\n"
19497+ " andl $0xFFFEFFFF, %%eax\n"
19498+ " movl %%eax, %%cr0\n"
19499+#endif
19500+
19501+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19502+
19503+#ifdef CONFIG_PAX_KERNEXEC
19504+ " movl %0, %%cr0\n"
19505+#endif
19506+
19507 " jmp 2b\n"
19508 ".previous\n"
19509 _ASM_EXTABLE(1b, 3b)
19510- : : "r" (from), "r" (to) : "memory");
19511+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
19512
19513 from += 64;
19514 to += 64;
19515diff -urNp linux-2.6.32.42/arch/x86/lib/putuser.S linux-2.6.32.42/arch/x86/lib/putuser.S
19516--- linux-2.6.32.42/arch/x86/lib/putuser.S 2011-03-27 14:31:47.000000000 -0400
19517+++ linux-2.6.32.42/arch/x86/lib/putuser.S 2011-04-17 15:56:46.000000000 -0400
19518@@ -15,7 +15,8 @@
19519 #include <asm/thread_info.h>
19520 #include <asm/errno.h>
19521 #include <asm/asm.h>
19522-
19523+#include <asm/segment.h>
19524+#include <asm/pgtable.h>
19525
19526 /*
19527 * __put_user_X
19528@@ -29,52 +30,119 @@
19529 * as they get called from within inline assembly.
19530 */
19531
19532-#define ENTER CFI_STARTPROC ; \
19533- GET_THREAD_INFO(%_ASM_BX)
19534+#define ENTER CFI_STARTPROC
19535 #define EXIT ret ; \
19536 CFI_ENDPROC
19537
19538+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19539+#define _DEST %_ASM_CX,%_ASM_BX
19540+#else
19541+#define _DEST %_ASM_CX
19542+#endif
19543+
19544+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19545+#define __copyuser_seg gs;
19546+#else
19547+#define __copyuser_seg
19548+#endif
19549+
19550 .text
19551 ENTRY(__put_user_1)
19552 ENTER
19553+
19554+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19555+ GET_THREAD_INFO(%_ASM_BX)
19556 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
19557 jae bad_put_user
19558-1: movb %al,(%_ASM_CX)
19559+
19560+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19561+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19562+ cmp %_ASM_BX,%_ASM_CX
19563+ jb 1234f
19564+ xor %ebx,%ebx
19565+1234:
19566+#endif
19567+
19568+#endif
19569+
19570+1: __copyuser_seg movb %al,(_DEST)
19571 xor %eax,%eax
19572 EXIT
19573 ENDPROC(__put_user_1)
19574
19575 ENTRY(__put_user_2)
19576 ENTER
19577+
19578+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19579+ GET_THREAD_INFO(%_ASM_BX)
19580 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
19581 sub $1,%_ASM_BX
19582 cmp %_ASM_BX,%_ASM_CX
19583 jae bad_put_user
19584-2: movw %ax,(%_ASM_CX)
19585+
19586+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19587+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19588+ cmp %_ASM_BX,%_ASM_CX
19589+ jb 1234f
19590+ xor %ebx,%ebx
19591+1234:
19592+#endif
19593+
19594+#endif
19595+
19596+2: __copyuser_seg movw %ax,(_DEST)
19597 xor %eax,%eax
19598 EXIT
19599 ENDPROC(__put_user_2)
19600
19601 ENTRY(__put_user_4)
19602 ENTER
19603+
19604+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19605+ GET_THREAD_INFO(%_ASM_BX)
19606 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
19607 sub $3,%_ASM_BX
19608 cmp %_ASM_BX,%_ASM_CX
19609 jae bad_put_user
19610-3: movl %eax,(%_ASM_CX)
19611+
19612+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19613+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19614+ cmp %_ASM_BX,%_ASM_CX
19615+ jb 1234f
19616+ xor %ebx,%ebx
19617+1234:
19618+#endif
19619+
19620+#endif
19621+
19622+3: __copyuser_seg movl %eax,(_DEST)
19623 xor %eax,%eax
19624 EXIT
19625 ENDPROC(__put_user_4)
19626
19627 ENTRY(__put_user_8)
19628 ENTER
19629+
19630+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19631+ GET_THREAD_INFO(%_ASM_BX)
19632 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
19633 sub $7,%_ASM_BX
19634 cmp %_ASM_BX,%_ASM_CX
19635 jae bad_put_user
19636-4: mov %_ASM_AX,(%_ASM_CX)
19637+
19638+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19639+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19640+ cmp %_ASM_BX,%_ASM_CX
19641+ jb 1234f
19642+ xor %ebx,%ebx
19643+1234:
19644+#endif
19645+
19646+#endif
19647+
19648+4: __copyuser_seg mov %_ASM_AX,(_DEST)
19649 #ifdef CONFIG_X86_32
19650-5: movl %edx,4(%_ASM_CX)
19651+5: __copyuser_seg movl %edx,4(_DEST)
19652 #endif
19653 xor %eax,%eax
19654 EXIT
19655diff -urNp linux-2.6.32.42/arch/x86/lib/usercopy_32.c linux-2.6.32.42/arch/x86/lib/usercopy_32.c
19656--- linux-2.6.32.42/arch/x86/lib/usercopy_32.c 2011-03-27 14:31:47.000000000 -0400
19657+++ linux-2.6.32.42/arch/x86/lib/usercopy_32.c 2011-04-23 21:12:28.000000000 -0400
19658@@ -43,7 +43,7 @@ do { \
19659 __asm__ __volatile__( \
19660 " testl %1,%1\n" \
19661 " jz 2f\n" \
19662- "0: lodsb\n" \
19663+ "0: "__copyuser_seg"lodsb\n" \
19664 " stosb\n" \
19665 " testb %%al,%%al\n" \
19666 " jz 1f\n" \
19667@@ -128,10 +128,12 @@ do { \
19668 int __d0; \
19669 might_fault(); \
19670 __asm__ __volatile__( \
19671+ __COPYUSER_SET_ES \
19672 "0: rep; stosl\n" \
19673 " movl %2,%0\n" \
19674 "1: rep; stosb\n" \
19675 "2:\n" \
19676+ __COPYUSER_RESTORE_ES \
19677 ".section .fixup,\"ax\"\n" \
19678 "3: lea 0(%2,%0,4),%0\n" \
19679 " jmp 2b\n" \
19680@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
19681 might_fault();
19682
19683 __asm__ __volatile__(
19684+ __COPYUSER_SET_ES
19685 " testl %0, %0\n"
19686 " jz 3f\n"
19687 " andl %0,%%ecx\n"
19688@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
19689 " subl %%ecx,%0\n"
19690 " addl %0,%%eax\n"
19691 "1:\n"
19692+ __COPYUSER_RESTORE_ES
19693 ".section .fixup,\"ax\"\n"
19694 "2: xorl %%eax,%%eax\n"
19695 " jmp 1b\n"
19696@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
19697
19698 #ifdef CONFIG_X86_INTEL_USERCOPY
19699 static unsigned long
19700-__copy_user_intel(void __user *to, const void *from, unsigned long size)
19701+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
19702 {
19703 int d0, d1;
19704 __asm__ __volatile__(
19705@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
19706 " .align 2,0x90\n"
19707 "3: movl 0(%4), %%eax\n"
19708 "4: movl 4(%4), %%edx\n"
19709- "5: movl %%eax, 0(%3)\n"
19710- "6: movl %%edx, 4(%3)\n"
19711+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
19712+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
19713 "7: movl 8(%4), %%eax\n"
19714 "8: movl 12(%4),%%edx\n"
19715- "9: movl %%eax, 8(%3)\n"
19716- "10: movl %%edx, 12(%3)\n"
19717+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
19718+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
19719 "11: movl 16(%4), %%eax\n"
19720 "12: movl 20(%4), %%edx\n"
19721- "13: movl %%eax, 16(%3)\n"
19722- "14: movl %%edx, 20(%3)\n"
19723+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
19724+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
19725 "15: movl 24(%4), %%eax\n"
19726 "16: movl 28(%4), %%edx\n"
19727- "17: movl %%eax, 24(%3)\n"
19728- "18: movl %%edx, 28(%3)\n"
19729+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
19730+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
19731 "19: movl 32(%4), %%eax\n"
19732 "20: movl 36(%4), %%edx\n"
19733- "21: movl %%eax, 32(%3)\n"
19734- "22: movl %%edx, 36(%3)\n"
19735+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
19736+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
19737 "23: movl 40(%4), %%eax\n"
19738 "24: movl 44(%4), %%edx\n"
19739- "25: movl %%eax, 40(%3)\n"
19740- "26: movl %%edx, 44(%3)\n"
19741+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
19742+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
19743 "27: movl 48(%4), %%eax\n"
19744 "28: movl 52(%4), %%edx\n"
19745- "29: movl %%eax, 48(%3)\n"
19746- "30: movl %%edx, 52(%3)\n"
19747+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
19748+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
19749 "31: movl 56(%4), %%eax\n"
19750 "32: movl 60(%4), %%edx\n"
19751- "33: movl %%eax, 56(%3)\n"
19752- "34: movl %%edx, 60(%3)\n"
19753+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
19754+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
19755 " addl $-64, %0\n"
19756 " addl $64, %4\n"
19757 " addl $64, %3\n"
19758@@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
19759 " shrl $2, %0\n"
19760 " andl $3, %%eax\n"
19761 " cld\n"
19762+ __COPYUSER_SET_ES
19763 "99: rep; movsl\n"
19764 "36: movl %%eax, %0\n"
19765 "37: rep; movsb\n"
19766 "100:\n"
19767+ __COPYUSER_RESTORE_ES
19768+ ".section .fixup,\"ax\"\n"
19769+ "101: lea 0(%%eax,%0,4),%0\n"
19770+ " jmp 100b\n"
19771+ ".previous\n"
19772+ ".section __ex_table,\"a\"\n"
19773+ " .align 4\n"
19774+ " .long 1b,100b\n"
19775+ " .long 2b,100b\n"
19776+ " .long 3b,100b\n"
19777+ " .long 4b,100b\n"
19778+ " .long 5b,100b\n"
19779+ " .long 6b,100b\n"
19780+ " .long 7b,100b\n"
19781+ " .long 8b,100b\n"
19782+ " .long 9b,100b\n"
19783+ " .long 10b,100b\n"
19784+ " .long 11b,100b\n"
19785+ " .long 12b,100b\n"
19786+ " .long 13b,100b\n"
19787+ " .long 14b,100b\n"
19788+ " .long 15b,100b\n"
19789+ " .long 16b,100b\n"
19790+ " .long 17b,100b\n"
19791+ " .long 18b,100b\n"
19792+ " .long 19b,100b\n"
19793+ " .long 20b,100b\n"
19794+ " .long 21b,100b\n"
19795+ " .long 22b,100b\n"
19796+ " .long 23b,100b\n"
19797+ " .long 24b,100b\n"
19798+ " .long 25b,100b\n"
19799+ " .long 26b,100b\n"
19800+ " .long 27b,100b\n"
19801+ " .long 28b,100b\n"
19802+ " .long 29b,100b\n"
19803+ " .long 30b,100b\n"
19804+ " .long 31b,100b\n"
19805+ " .long 32b,100b\n"
19806+ " .long 33b,100b\n"
19807+ " .long 34b,100b\n"
19808+ " .long 35b,100b\n"
19809+ " .long 36b,100b\n"
19810+ " .long 37b,100b\n"
19811+ " .long 99b,101b\n"
19812+ ".previous"
19813+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
19814+ : "1"(to), "2"(from), "0"(size)
19815+ : "eax", "edx", "memory");
19816+ return size;
19817+}
19818+
19819+static unsigned long
19820+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
19821+{
19822+ int d0, d1;
19823+ __asm__ __volatile__(
19824+ " .align 2,0x90\n"
19825+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
19826+ " cmpl $67, %0\n"
19827+ " jbe 3f\n"
19828+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
19829+ " .align 2,0x90\n"
19830+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
19831+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
19832+ "5: movl %%eax, 0(%3)\n"
19833+ "6: movl %%edx, 4(%3)\n"
19834+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
19835+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
19836+ "9: movl %%eax, 8(%3)\n"
19837+ "10: movl %%edx, 12(%3)\n"
19838+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
19839+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
19840+ "13: movl %%eax, 16(%3)\n"
19841+ "14: movl %%edx, 20(%3)\n"
19842+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
19843+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
19844+ "17: movl %%eax, 24(%3)\n"
19845+ "18: movl %%edx, 28(%3)\n"
19846+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
19847+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
19848+ "21: movl %%eax, 32(%3)\n"
19849+ "22: movl %%edx, 36(%3)\n"
19850+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
19851+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
19852+ "25: movl %%eax, 40(%3)\n"
19853+ "26: movl %%edx, 44(%3)\n"
19854+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
19855+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
19856+ "29: movl %%eax, 48(%3)\n"
19857+ "30: movl %%edx, 52(%3)\n"
19858+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
19859+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
19860+ "33: movl %%eax, 56(%3)\n"
19861+ "34: movl %%edx, 60(%3)\n"
19862+ " addl $-64, %0\n"
19863+ " addl $64, %4\n"
19864+ " addl $64, %3\n"
19865+ " cmpl $63, %0\n"
19866+ " ja 1b\n"
19867+ "35: movl %0, %%eax\n"
19868+ " shrl $2, %0\n"
19869+ " andl $3, %%eax\n"
19870+ " cld\n"
19871+ "99: rep; "__copyuser_seg" movsl\n"
19872+ "36: movl %%eax, %0\n"
19873+ "37: rep; "__copyuser_seg" movsb\n"
19874+ "100:\n"
19875 ".section .fixup,\"ax\"\n"
19876 "101: lea 0(%%eax,%0,4),%0\n"
19877 " jmp 100b\n"
19878@@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
19879 int d0, d1;
19880 __asm__ __volatile__(
19881 " .align 2,0x90\n"
19882- "0: movl 32(%4), %%eax\n"
19883+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
19884 " cmpl $67, %0\n"
19885 " jbe 2f\n"
19886- "1: movl 64(%4), %%eax\n"
19887+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
19888 " .align 2,0x90\n"
19889- "2: movl 0(%4), %%eax\n"
19890- "21: movl 4(%4), %%edx\n"
19891+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
19892+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
19893 " movl %%eax, 0(%3)\n"
19894 " movl %%edx, 4(%3)\n"
19895- "3: movl 8(%4), %%eax\n"
19896- "31: movl 12(%4),%%edx\n"
19897+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
19898+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
19899 " movl %%eax, 8(%3)\n"
19900 " movl %%edx, 12(%3)\n"
19901- "4: movl 16(%4), %%eax\n"
19902- "41: movl 20(%4), %%edx\n"
19903+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
19904+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
19905 " movl %%eax, 16(%3)\n"
19906 " movl %%edx, 20(%3)\n"
19907- "10: movl 24(%4), %%eax\n"
19908- "51: movl 28(%4), %%edx\n"
19909+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
19910+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
19911 " movl %%eax, 24(%3)\n"
19912 " movl %%edx, 28(%3)\n"
19913- "11: movl 32(%4), %%eax\n"
19914- "61: movl 36(%4), %%edx\n"
19915+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
19916+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
19917 " movl %%eax, 32(%3)\n"
19918 " movl %%edx, 36(%3)\n"
19919- "12: movl 40(%4), %%eax\n"
19920- "71: movl 44(%4), %%edx\n"
19921+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
19922+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
19923 " movl %%eax, 40(%3)\n"
19924 " movl %%edx, 44(%3)\n"
19925- "13: movl 48(%4), %%eax\n"
19926- "81: movl 52(%4), %%edx\n"
19927+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
19928+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
19929 " movl %%eax, 48(%3)\n"
19930 " movl %%edx, 52(%3)\n"
19931- "14: movl 56(%4), %%eax\n"
19932- "91: movl 60(%4), %%edx\n"
19933+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
19934+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
19935 " movl %%eax, 56(%3)\n"
19936 " movl %%edx, 60(%3)\n"
19937 " addl $-64, %0\n"
19938@@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
19939 " shrl $2, %0\n"
19940 " andl $3, %%eax\n"
19941 " cld\n"
19942- "6: rep; movsl\n"
19943+ "6: rep; "__copyuser_seg" movsl\n"
19944 " movl %%eax,%0\n"
19945- "7: rep; movsb\n"
19946+ "7: rep; "__copyuser_seg" movsb\n"
19947 "8:\n"
19948 ".section .fixup,\"ax\"\n"
19949 "9: lea 0(%%eax,%0,4),%0\n"
19950@@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
19951
19952 __asm__ __volatile__(
19953 " .align 2,0x90\n"
19954- "0: movl 32(%4), %%eax\n"
19955+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
19956 " cmpl $67, %0\n"
19957 " jbe 2f\n"
19958- "1: movl 64(%4), %%eax\n"
19959+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
19960 " .align 2,0x90\n"
19961- "2: movl 0(%4), %%eax\n"
19962- "21: movl 4(%4), %%edx\n"
19963+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
19964+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
19965 " movnti %%eax, 0(%3)\n"
19966 " movnti %%edx, 4(%3)\n"
19967- "3: movl 8(%4), %%eax\n"
19968- "31: movl 12(%4),%%edx\n"
19969+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
19970+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
19971 " movnti %%eax, 8(%3)\n"
19972 " movnti %%edx, 12(%3)\n"
19973- "4: movl 16(%4), %%eax\n"
19974- "41: movl 20(%4), %%edx\n"
19975+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
19976+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
19977 " movnti %%eax, 16(%3)\n"
19978 " movnti %%edx, 20(%3)\n"
19979- "10: movl 24(%4), %%eax\n"
19980- "51: movl 28(%4), %%edx\n"
19981+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
19982+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
19983 " movnti %%eax, 24(%3)\n"
19984 " movnti %%edx, 28(%3)\n"
19985- "11: movl 32(%4), %%eax\n"
19986- "61: movl 36(%4), %%edx\n"
19987+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
19988+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
19989 " movnti %%eax, 32(%3)\n"
19990 " movnti %%edx, 36(%3)\n"
19991- "12: movl 40(%4), %%eax\n"
19992- "71: movl 44(%4), %%edx\n"
19993+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
19994+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
19995 " movnti %%eax, 40(%3)\n"
19996 " movnti %%edx, 44(%3)\n"
19997- "13: movl 48(%4), %%eax\n"
19998- "81: movl 52(%4), %%edx\n"
19999+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20000+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20001 " movnti %%eax, 48(%3)\n"
20002 " movnti %%edx, 52(%3)\n"
20003- "14: movl 56(%4), %%eax\n"
20004- "91: movl 60(%4), %%edx\n"
20005+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
20006+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
20007 " movnti %%eax, 56(%3)\n"
20008 " movnti %%edx, 60(%3)\n"
20009 " addl $-64, %0\n"
20010@@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
20011 " shrl $2, %0\n"
20012 " andl $3, %%eax\n"
20013 " cld\n"
20014- "6: rep; movsl\n"
20015+ "6: rep; "__copyuser_seg" movsl\n"
20016 " movl %%eax,%0\n"
20017- "7: rep; movsb\n"
20018+ "7: rep; "__copyuser_seg" movsb\n"
20019 "8:\n"
20020 ".section .fixup,\"ax\"\n"
20021 "9: lea 0(%%eax,%0,4),%0\n"
20022@@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
20023
20024 __asm__ __volatile__(
20025 " .align 2,0x90\n"
20026- "0: movl 32(%4), %%eax\n"
20027+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20028 " cmpl $67, %0\n"
20029 " jbe 2f\n"
20030- "1: movl 64(%4), %%eax\n"
20031+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20032 " .align 2,0x90\n"
20033- "2: movl 0(%4), %%eax\n"
20034- "21: movl 4(%4), %%edx\n"
20035+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20036+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20037 " movnti %%eax, 0(%3)\n"
20038 " movnti %%edx, 4(%3)\n"
20039- "3: movl 8(%4), %%eax\n"
20040- "31: movl 12(%4),%%edx\n"
20041+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20042+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20043 " movnti %%eax, 8(%3)\n"
20044 " movnti %%edx, 12(%3)\n"
20045- "4: movl 16(%4), %%eax\n"
20046- "41: movl 20(%4), %%edx\n"
20047+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20048+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20049 " movnti %%eax, 16(%3)\n"
20050 " movnti %%edx, 20(%3)\n"
20051- "10: movl 24(%4), %%eax\n"
20052- "51: movl 28(%4), %%edx\n"
20053+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20054+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20055 " movnti %%eax, 24(%3)\n"
20056 " movnti %%edx, 28(%3)\n"
20057- "11: movl 32(%4), %%eax\n"
20058- "61: movl 36(%4), %%edx\n"
20059+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20060+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20061 " movnti %%eax, 32(%3)\n"
20062 " movnti %%edx, 36(%3)\n"
20063- "12: movl 40(%4), %%eax\n"
20064- "71: movl 44(%4), %%edx\n"
20065+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20066+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20067 " movnti %%eax, 40(%3)\n"
20068 " movnti %%edx, 44(%3)\n"
20069- "13: movl 48(%4), %%eax\n"
20070- "81: movl 52(%4), %%edx\n"
20071+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20072+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20073 " movnti %%eax, 48(%3)\n"
20074 " movnti %%edx, 52(%3)\n"
20075- "14: movl 56(%4), %%eax\n"
20076- "91: movl 60(%4), %%edx\n"
20077+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
20078+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
20079 " movnti %%eax, 56(%3)\n"
20080 " movnti %%edx, 60(%3)\n"
20081 " addl $-64, %0\n"
20082@@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
20083 " shrl $2, %0\n"
20084 " andl $3, %%eax\n"
20085 " cld\n"
20086- "6: rep; movsl\n"
20087+ "6: rep; "__copyuser_seg" movsl\n"
20088 " movl %%eax,%0\n"
20089- "7: rep; movsb\n"
20090+ "7: rep; "__copyuser_seg" movsb\n"
20091 "8:\n"
20092 ".section .fixup,\"ax\"\n"
20093 "9: lea 0(%%eax,%0,4),%0\n"
20094@@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
20095 */
20096 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
20097 unsigned long size);
20098-unsigned long __copy_user_intel(void __user *to, const void *from,
20099+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
20100+ unsigned long size);
20101+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
20102 unsigned long size);
20103 unsigned long __copy_user_zeroing_intel_nocache(void *to,
20104 const void __user *from, unsigned long size);
20105 #endif /* CONFIG_X86_INTEL_USERCOPY */
20106
20107 /* Generic arbitrary sized copy. */
20108-#define __copy_user(to, from, size) \
20109+#define __copy_user(to, from, size, prefix, set, restore) \
20110 do { \
20111 int __d0, __d1, __d2; \
20112 __asm__ __volatile__( \
20113+ set \
20114 " cmp $7,%0\n" \
20115 " jbe 1f\n" \
20116 " movl %1,%0\n" \
20117 " negl %0\n" \
20118 " andl $7,%0\n" \
20119 " subl %0,%3\n" \
20120- "4: rep; movsb\n" \
20121+ "4: rep; "prefix"movsb\n" \
20122 " movl %3,%0\n" \
20123 " shrl $2,%0\n" \
20124 " andl $3,%3\n" \
20125 " .align 2,0x90\n" \
20126- "0: rep; movsl\n" \
20127+ "0: rep; "prefix"movsl\n" \
20128 " movl %3,%0\n" \
20129- "1: rep; movsb\n" \
20130+ "1: rep; "prefix"movsb\n" \
20131 "2:\n" \
20132+ restore \
20133 ".section .fixup,\"ax\"\n" \
20134 "5: addl %3,%0\n" \
20135 " jmp 2b\n" \
20136@@ -682,14 +799,14 @@ do { \
20137 " negl %0\n" \
20138 " andl $7,%0\n" \
20139 " subl %0,%3\n" \
20140- "4: rep; movsb\n" \
20141+ "4: rep; "__copyuser_seg"movsb\n" \
20142 " movl %3,%0\n" \
20143 " shrl $2,%0\n" \
20144 " andl $3,%3\n" \
20145 " .align 2,0x90\n" \
20146- "0: rep; movsl\n" \
20147+ "0: rep; "__copyuser_seg"movsl\n" \
20148 " movl %3,%0\n" \
20149- "1: rep; movsb\n" \
20150+ "1: rep; "__copyuser_seg"movsb\n" \
20151 "2:\n" \
20152 ".section .fixup,\"ax\"\n" \
20153 "5: addl %3,%0\n" \
20154@@ -775,9 +892,9 @@ survive:
20155 }
20156 #endif
20157 if (movsl_is_ok(to, from, n))
20158- __copy_user(to, from, n);
20159+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
20160 else
20161- n = __copy_user_intel(to, from, n);
20162+ n = __generic_copy_to_user_intel(to, from, n);
20163 return n;
20164 }
20165 EXPORT_SYMBOL(__copy_to_user_ll);
20166@@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
20167 unsigned long n)
20168 {
20169 if (movsl_is_ok(to, from, n))
20170- __copy_user(to, from, n);
20171+ __copy_user(to, from, n, __copyuser_seg, "", "");
20172 else
20173- n = __copy_user_intel((void __user *)to,
20174- (const void *)from, n);
20175+ n = __generic_copy_from_user_intel(to, from, n);
20176 return n;
20177 }
20178 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
20179@@ -827,59 +943,38 @@ unsigned long __copy_from_user_ll_nocach
20180 if (n > 64 && cpu_has_xmm2)
20181 n = __copy_user_intel_nocache(to, from, n);
20182 else
20183- __copy_user(to, from, n);
20184+ __copy_user(to, from, n, __copyuser_seg, "", "");
20185 #else
20186- __copy_user(to, from, n);
20187+ __copy_user(to, from, n, __copyuser_seg, "", "");
20188 #endif
20189 return n;
20190 }
20191 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
20192
20193-/**
20194- * copy_to_user: - Copy a block of data into user space.
20195- * @to: Destination address, in user space.
20196- * @from: Source address, in kernel space.
20197- * @n: Number of bytes to copy.
20198- *
20199- * Context: User context only. This function may sleep.
20200- *
20201- * Copy data from kernel space to user space.
20202- *
20203- * Returns number of bytes that could not be copied.
20204- * On success, this will be zero.
20205- */
20206-unsigned long
20207-copy_to_user(void __user *to, const void *from, unsigned long n)
20208+#ifdef CONFIG_PAX_MEMORY_UDEREF
20209+void __set_fs(mm_segment_t x)
20210 {
20211- if (access_ok(VERIFY_WRITE, to, n))
20212- n = __copy_to_user(to, from, n);
20213- return n;
20214+ switch (x.seg) {
20215+ case 0:
20216+ loadsegment(gs, 0);
20217+ break;
20218+ case TASK_SIZE_MAX:
20219+ loadsegment(gs, __USER_DS);
20220+ break;
20221+ case -1UL:
20222+ loadsegment(gs, __KERNEL_DS);
20223+ break;
20224+ default:
20225+ BUG();
20226+ }
20227+ return;
20228 }
20229-EXPORT_SYMBOL(copy_to_user);
20230+EXPORT_SYMBOL(__set_fs);
20231
20232-/**
20233- * copy_from_user: - Copy a block of data from user space.
20234- * @to: Destination address, in kernel space.
20235- * @from: Source address, in user space.
20236- * @n: Number of bytes to copy.
20237- *
20238- * Context: User context only. This function may sleep.
20239- *
20240- * Copy data from user space to kernel space.
20241- *
20242- * Returns number of bytes that could not be copied.
20243- * On success, this will be zero.
20244- *
20245- * If some data could not be copied, this function will pad the copied
20246- * data to the requested size using zero bytes.
20247- */
20248-unsigned long
20249-copy_from_user(void *to, const void __user *from, unsigned long n)
20250+void set_fs(mm_segment_t x)
20251 {
20252- if (access_ok(VERIFY_READ, from, n))
20253- n = __copy_from_user(to, from, n);
20254- else
20255- memset(to, 0, n);
20256- return n;
20257+ current_thread_info()->addr_limit = x;
20258+ __set_fs(x);
20259 }
20260-EXPORT_SYMBOL(copy_from_user);
20261+EXPORT_SYMBOL(set_fs);
20262+#endif
20263diff -urNp linux-2.6.32.42/arch/x86/lib/usercopy_64.c linux-2.6.32.42/arch/x86/lib/usercopy_64.c
20264--- linux-2.6.32.42/arch/x86/lib/usercopy_64.c 2011-03-27 14:31:47.000000000 -0400
20265+++ linux-2.6.32.42/arch/x86/lib/usercopy_64.c 2011-05-04 17:56:20.000000000 -0400
20266@@ -42,6 +42,12 @@ long
20267 __strncpy_from_user(char *dst, const char __user *src, long count)
20268 {
20269 long res;
20270+
20271+#ifdef CONFIG_PAX_MEMORY_UDEREF
20272+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
20273+ src += PAX_USER_SHADOW_BASE;
20274+#endif
20275+
20276 __do_strncpy_from_user(dst, src, count, res);
20277 return res;
20278 }
20279@@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
20280 {
20281 long __d0;
20282 might_fault();
20283+
20284+#ifdef CONFIG_PAX_MEMORY_UDEREF
20285+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
20286+ addr += PAX_USER_SHADOW_BASE;
20287+#endif
20288+
20289 /* no memory constraint because it doesn't change any memory gcc knows
20290 about */
20291 asm volatile(
20292@@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
20293
20294 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
20295 {
20296- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
20297+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
20298+
20299+#ifdef CONFIG_PAX_MEMORY_UDEREF
20300+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
20301+ to += PAX_USER_SHADOW_BASE;
20302+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
20303+ from += PAX_USER_SHADOW_BASE;
20304+#endif
20305+
20306 return copy_user_generic((__force void *)to, (__force void *)from, len);
20307- }
20308- return len;
20309+ }
20310+ return len;
20311 }
20312 EXPORT_SYMBOL(copy_in_user);
20313
20314diff -urNp linux-2.6.32.42/arch/x86/Makefile linux-2.6.32.42/arch/x86/Makefile
20315--- linux-2.6.32.42/arch/x86/Makefile 2011-03-27 14:31:47.000000000 -0400
20316+++ linux-2.6.32.42/arch/x86/Makefile 2011-04-17 15:56:46.000000000 -0400
20317@@ -189,3 +189,12 @@ define archhelp
20318 echo ' FDARGS="..." arguments for the booted kernel'
20319 echo ' FDINITRD=file initrd for the booted kernel'
20320 endef
20321+
20322+define OLD_LD
20323+
20324+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
20325+*** Please upgrade your binutils to 2.18 or newer
20326+endef
20327+
20328+archprepare:
20329+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
20330diff -urNp linux-2.6.32.42/arch/x86/mm/extable.c linux-2.6.32.42/arch/x86/mm/extable.c
20331--- linux-2.6.32.42/arch/x86/mm/extable.c 2011-03-27 14:31:47.000000000 -0400
20332+++ linux-2.6.32.42/arch/x86/mm/extable.c 2011-04-17 15:56:46.000000000 -0400
20333@@ -1,14 +1,71 @@
20334 #include <linux/module.h>
20335 #include <linux/spinlock.h>
20336+#include <linux/sort.h>
20337 #include <asm/uaccess.h>
20338+#include <asm/pgtable.h>
20339
20340+/*
20341+ * The exception table needs to be sorted so that the binary
20342+ * search that we use to find entries in it works properly.
20343+ * This is used both for the kernel exception table and for
20344+ * the exception tables of modules that get loaded.
20345+ */
20346+static int cmp_ex(const void *a, const void *b)
20347+{
20348+ const struct exception_table_entry *x = a, *y = b;
20349+
20350+ /* avoid overflow */
20351+ if (x->insn > y->insn)
20352+ return 1;
20353+ if (x->insn < y->insn)
20354+ return -1;
20355+ return 0;
20356+}
20357+
20358+static void swap_ex(void *a, void *b, int size)
20359+{
20360+ struct exception_table_entry t, *x = a, *y = b;
20361+
20362+ t = *x;
20363+
20364+ pax_open_kernel();
20365+ *x = *y;
20366+ *y = t;
20367+ pax_close_kernel();
20368+}
20369+
20370+void sort_extable(struct exception_table_entry *start,
20371+ struct exception_table_entry *finish)
20372+{
20373+ sort(start, finish - start, sizeof(struct exception_table_entry),
20374+ cmp_ex, swap_ex);
20375+}
20376+
20377+#ifdef CONFIG_MODULES
20378+/*
20379+ * If the exception table is sorted, any referring to the module init
20380+ * will be at the beginning or the end.
20381+ */
20382+void trim_init_extable(struct module *m)
20383+{
20384+ /*trim the beginning*/
20385+ while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
20386+ m->extable++;
20387+ m->num_exentries--;
20388+ }
20389+ /*trim the end*/
20390+ while (m->num_exentries &&
20391+ within_module_init(m->extable[m->num_exentries-1].insn, m))
20392+ m->num_exentries--;
20393+}
20394+#endif /* CONFIG_MODULES */
20395
20396 int fixup_exception(struct pt_regs *regs)
20397 {
20398 const struct exception_table_entry *fixup;
20399
20400 #ifdef CONFIG_PNPBIOS
20401- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
20402+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
20403 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
20404 extern u32 pnp_bios_is_utter_crap;
20405 pnp_bios_is_utter_crap = 1;
20406diff -urNp linux-2.6.32.42/arch/x86/mm/fault.c linux-2.6.32.42/arch/x86/mm/fault.c
20407--- linux-2.6.32.42/arch/x86/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
20408+++ linux-2.6.32.42/arch/x86/mm/fault.c 2011-06-06 17:35:16.000000000 -0400
20409@@ -11,10 +11,19 @@
20410 #include <linux/kprobes.h> /* __kprobes, ... */
20411 #include <linux/mmiotrace.h> /* kmmio_handler, ... */
20412 #include <linux/perf_event.h> /* perf_sw_event */
20413+#include <linux/unistd.h>
20414+#include <linux/compiler.h>
20415
20416 #include <asm/traps.h> /* dotraplinkage, ... */
20417 #include <asm/pgalloc.h> /* pgd_*(), ... */
20418 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
20419+#include <asm/vsyscall.h>
20420+#include <asm/tlbflush.h>
20421+
20422+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20423+#include <asm/stacktrace.h>
20424+#include "../kernel/dumpstack.h"
20425+#endif
20426
20427 /*
20428 * Page fault error code bits:
20429@@ -51,7 +60,7 @@ static inline int notify_page_fault(stru
20430 int ret = 0;
20431
20432 /* kprobe_running() needs smp_processor_id() */
20433- if (kprobes_built_in() && !user_mode_vm(regs)) {
20434+ if (kprobes_built_in() && !user_mode(regs)) {
20435 preempt_disable();
20436 if (kprobe_running() && kprobe_fault_handler(regs, 14))
20437 ret = 1;
20438@@ -112,7 +121,10 @@ check_prefetch_opcode(struct pt_regs *re
20439 return !instr_lo || (instr_lo>>1) == 1;
20440 case 0x00:
20441 /* Prefetch instruction is 0x0F0D or 0x0F18 */
20442- if (probe_kernel_address(instr, opcode))
20443+ if (user_mode(regs)) {
20444+ if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
20445+ return 0;
20446+ } else if (probe_kernel_address(instr, opcode))
20447 return 0;
20448
20449 *prefetch = (instr_lo == 0xF) &&
20450@@ -146,7 +158,10 @@ is_prefetch(struct pt_regs *regs, unsign
20451 while (instr < max_instr) {
20452 unsigned char opcode;
20453
20454- if (probe_kernel_address(instr, opcode))
20455+ if (user_mode(regs)) {
20456+ if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
20457+ break;
20458+ } else if (probe_kernel_address(instr, opcode))
20459 break;
20460
20461 instr++;
20462@@ -172,6 +187,30 @@ force_sig_info_fault(int si_signo, int s
20463 force_sig_info(si_signo, &info, tsk);
20464 }
20465
20466+#ifdef CONFIG_PAX_EMUTRAMP
20467+static int pax_handle_fetch_fault(struct pt_regs *regs);
20468+#endif
20469+
20470+#ifdef CONFIG_PAX_PAGEEXEC
20471+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
20472+{
20473+ pgd_t *pgd;
20474+ pud_t *pud;
20475+ pmd_t *pmd;
20476+
20477+ pgd = pgd_offset(mm, address);
20478+ if (!pgd_present(*pgd))
20479+ return NULL;
20480+ pud = pud_offset(pgd, address);
20481+ if (!pud_present(*pud))
20482+ return NULL;
20483+ pmd = pmd_offset(pud, address);
20484+ if (!pmd_present(*pmd))
20485+ return NULL;
20486+ return pmd;
20487+}
20488+#endif
20489+
20490 DEFINE_SPINLOCK(pgd_lock);
20491 LIST_HEAD(pgd_list);
20492
20493@@ -224,11 +263,24 @@ void vmalloc_sync_all(void)
20494 address += PMD_SIZE) {
20495
20496 unsigned long flags;
20497+
20498+#ifdef CONFIG_PAX_PER_CPU_PGD
20499+ unsigned long cpu;
20500+#else
20501 struct page *page;
20502+#endif
20503
20504 spin_lock_irqsave(&pgd_lock, flags);
20505+
20506+#ifdef CONFIG_PAX_PER_CPU_PGD
20507+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20508+ pgd_t *pgd = get_cpu_pgd(cpu);
20509+#else
20510 list_for_each_entry(page, &pgd_list, lru) {
20511- if (!vmalloc_sync_one(page_address(page), address))
20512+ pgd_t *pgd = page_address(page);
20513+#endif
20514+
20515+ if (!vmalloc_sync_one(pgd, address))
20516 break;
20517 }
20518 spin_unlock_irqrestore(&pgd_lock, flags);
20519@@ -258,6 +310,11 @@ static noinline int vmalloc_fault(unsign
20520 * an interrupt in the middle of a task switch..
20521 */
20522 pgd_paddr = read_cr3();
20523+
20524+#ifdef CONFIG_PAX_PER_CPU_PGD
20525+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
20526+#endif
20527+
20528 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
20529 if (!pmd_k)
20530 return -1;
20531@@ -332,15 +389,27 @@ void vmalloc_sync_all(void)
20532
20533 const pgd_t *pgd_ref = pgd_offset_k(address);
20534 unsigned long flags;
20535+
20536+#ifdef CONFIG_PAX_PER_CPU_PGD
20537+ unsigned long cpu;
20538+#else
20539 struct page *page;
20540+#endif
20541
20542 if (pgd_none(*pgd_ref))
20543 continue;
20544
20545 spin_lock_irqsave(&pgd_lock, flags);
20546+
20547+#ifdef CONFIG_PAX_PER_CPU_PGD
20548+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20549+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
20550+#else
20551 list_for_each_entry(page, &pgd_list, lru) {
20552 pgd_t *pgd;
20553 pgd = (pgd_t *)page_address(page) + pgd_index(address);
20554+#endif
20555+
20556 if (pgd_none(*pgd))
20557 set_pgd(pgd, *pgd_ref);
20558 else
20559@@ -373,7 +442,14 @@ static noinline int vmalloc_fault(unsign
20560 * happen within a race in page table update. In the later
20561 * case just flush:
20562 */
20563+
20564+#ifdef CONFIG_PAX_PER_CPU_PGD
20565+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
20566+ pgd = pgd_offset_cpu(smp_processor_id(), address);
20567+#else
20568 pgd = pgd_offset(current->active_mm, address);
20569+#endif
20570+
20571 pgd_ref = pgd_offset_k(address);
20572 if (pgd_none(*pgd_ref))
20573 return -1;
20574@@ -535,7 +611,7 @@ static int is_errata93(struct pt_regs *r
20575 static int is_errata100(struct pt_regs *regs, unsigned long address)
20576 {
20577 #ifdef CONFIG_X86_64
20578- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
20579+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
20580 return 1;
20581 #endif
20582 return 0;
20583@@ -562,7 +638,7 @@ static int is_f00f_bug(struct pt_regs *r
20584 }
20585
20586 static const char nx_warning[] = KERN_CRIT
20587-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
20588+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
20589
20590 static void
20591 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
20592@@ -571,15 +647,26 @@ show_fault_oops(struct pt_regs *regs, un
20593 if (!oops_may_print())
20594 return;
20595
20596- if (error_code & PF_INSTR) {
20597+ if (nx_enabled && (error_code & PF_INSTR)) {
20598 unsigned int level;
20599
20600 pte_t *pte = lookup_address(address, &level);
20601
20602 if (pte && pte_present(*pte) && !pte_exec(*pte))
20603- printk(nx_warning, current_uid());
20604+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
20605 }
20606
20607+#ifdef CONFIG_PAX_KERNEXEC
20608+ if (init_mm.start_code <= address && address < init_mm.end_code) {
20609+ if (current->signal->curr_ip)
20610+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
20611+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
20612+ else
20613+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
20614+ current->comm, task_pid_nr(current), current_uid(), current_euid());
20615+ }
20616+#endif
20617+
20618 printk(KERN_ALERT "BUG: unable to handle kernel ");
20619 if (address < PAGE_SIZE)
20620 printk(KERN_CONT "NULL pointer dereference");
20621@@ -704,6 +791,68 @@ __bad_area_nosemaphore(struct pt_regs *r
20622 unsigned long address, int si_code)
20623 {
20624 struct task_struct *tsk = current;
20625+ struct mm_struct *mm = tsk->mm;
20626+
20627+#ifdef CONFIG_X86_64
20628+ if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
20629+ if (regs->ip == (unsigned long)vgettimeofday) {
20630+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
20631+ return;
20632+ } else if (regs->ip == (unsigned long)vtime) {
20633+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
20634+ return;
20635+ } else if (regs->ip == (unsigned long)vgetcpu) {
20636+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
20637+ return;
20638+ }
20639+ }
20640+#endif
20641+
20642+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20643+ if (mm && (error_code & PF_USER)) {
20644+ unsigned long ip = regs->ip;
20645+
20646+ if (v8086_mode(regs))
20647+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
20648+
20649+ /*
20650+ * It's possible to have interrupts off here:
20651+ */
20652+ local_irq_enable();
20653+
20654+#ifdef CONFIG_PAX_PAGEEXEC
20655+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) &&
20656+ ((nx_enabled && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && ip == address))) {
20657+
20658+#ifdef CONFIG_PAX_EMUTRAMP
20659+ switch (pax_handle_fetch_fault(regs)) {
20660+ case 2:
20661+ return;
20662+ }
20663+#endif
20664+
20665+ pax_report_fault(regs, (void *)ip, (void *)regs->sp);
20666+ do_group_exit(SIGKILL);
20667+ }
20668+#endif
20669+
20670+#ifdef CONFIG_PAX_SEGMEXEC
20671+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) {
20672+
20673+#ifdef CONFIG_PAX_EMUTRAMP
20674+ switch (pax_handle_fetch_fault(regs)) {
20675+ case 2:
20676+ return;
20677+ }
20678+#endif
20679+
20680+ pax_report_fault(regs, (void *)ip, (void *)regs->sp);
20681+ do_group_exit(SIGKILL);
20682+ }
20683+#endif
20684+
20685+ }
20686+#endif
20687
20688 /* User mode accesses just cause a SIGSEGV */
20689 if (error_code & PF_USER) {
20690@@ -857,6 +1006,99 @@ static int spurious_fault_check(unsigned
20691 return 1;
20692 }
20693
20694+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
20695+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
20696+{
20697+ pte_t *pte;
20698+ pmd_t *pmd;
20699+ spinlock_t *ptl;
20700+ unsigned char pte_mask;
20701+
20702+ if (nx_enabled || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
20703+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
20704+ return 0;
20705+
20706+ /* PaX: it's our fault, let's handle it if we can */
20707+
20708+ /* PaX: take a look at read faults before acquiring any locks */
20709+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
20710+ /* instruction fetch attempt from a protected page in user mode */
20711+ up_read(&mm->mmap_sem);
20712+
20713+#ifdef CONFIG_PAX_EMUTRAMP
20714+ switch (pax_handle_fetch_fault(regs)) {
20715+ case 2:
20716+ return 1;
20717+ }
20718+#endif
20719+
20720+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
20721+ do_group_exit(SIGKILL);
20722+ }
20723+
20724+ pmd = pax_get_pmd(mm, address);
20725+ if (unlikely(!pmd))
20726+ return 0;
20727+
20728+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
20729+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
20730+ pte_unmap_unlock(pte, ptl);
20731+ return 0;
20732+ }
20733+
20734+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
20735+ /* write attempt to a protected page in user mode */
20736+ pte_unmap_unlock(pte, ptl);
20737+ return 0;
20738+ }
20739+
20740+#ifdef CONFIG_SMP
20741+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
20742+#else
20743+ if (likely(address > get_limit(regs->cs)))
20744+#endif
20745+ {
20746+ set_pte(pte, pte_mkread(*pte));
20747+ __flush_tlb_one(address);
20748+ pte_unmap_unlock(pte, ptl);
20749+ up_read(&mm->mmap_sem);
20750+ return 1;
20751+ }
20752+
20753+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
20754+
20755+ /*
20756+ * PaX: fill DTLB with user rights and retry
20757+ */
20758+ __asm__ __volatile__ (
20759+ "orb %2,(%1)\n"
20760+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
20761+/*
20762+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
20763+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
20764+ * page fault when examined during a TLB load attempt. this is true not only
20765+ * for PTEs holding a non-present entry but also present entries that will
20766+ * raise a page fault (such as those set up by PaX, or the copy-on-write
20767+ * mechanism). in effect it means that we do *not* need to flush the TLBs
20768+ * for our target pages since their PTEs are simply not in the TLBs at all.
20769+
20770+ * the best thing in omitting it is that we gain around 15-20% speed in the
20771+ * fast path of the page fault handler and can get rid of tracing since we
20772+ * can no longer flush unintended entries.
20773+ */
20774+ "invlpg (%0)\n"
20775+#endif
20776+ __copyuser_seg"testb $0,(%0)\n"
20777+ "xorb %3,(%1)\n"
20778+ :
20779+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
20780+ : "memory", "cc");
20781+ pte_unmap_unlock(pte, ptl);
20782+ up_read(&mm->mmap_sem);
20783+ return 1;
20784+}
20785+#endif
20786+
20787 /*
20788 * Handle a spurious fault caused by a stale TLB entry.
20789 *
20790@@ -923,6 +1165,9 @@ int show_unhandled_signals = 1;
20791 static inline int
20792 access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
20793 {
20794+ if (nx_enabled && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
20795+ return 1;
20796+
20797 if (write) {
20798 /* write, present and write, not present: */
20799 if (unlikely(!(vma->vm_flags & VM_WRITE)))
20800@@ -956,17 +1201,31 @@ do_page_fault(struct pt_regs *regs, unsi
20801 {
20802 struct vm_area_struct *vma;
20803 struct task_struct *tsk;
20804- unsigned long address;
20805 struct mm_struct *mm;
20806 int write;
20807 int fault;
20808
20809+ /* Get the faulting address: */
20810+ unsigned long address = read_cr2();
20811+
20812+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20813+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
20814+ if (!search_exception_tables(regs->ip)) {
20815+ bad_area_nosemaphore(regs, error_code, address);
20816+ return;
20817+ }
20818+ if (address < PAX_USER_SHADOW_BASE) {
20819+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
20820+ printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
20821+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
20822+ } else
20823+ address -= PAX_USER_SHADOW_BASE;
20824+ }
20825+#endif
20826+
20827 tsk = current;
20828 mm = tsk->mm;
20829
20830- /* Get the faulting address: */
20831- address = read_cr2();
20832-
20833 /*
20834 * Detect and handle instructions that would cause a page fault for
20835 * both a tracked kernel page and a userspace page.
20836@@ -1026,7 +1285,7 @@ do_page_fault(struct pt_regs *regs, unsi
20837 * User-mode registers count as a user access even for any
20838 * potential system fault or CPU buglet:
20839 */
20840- if (user_mode_vm(regs)) {
20841+ if (user_mode(regs)) {
20842 local_irq_enable();
20843 error_code |= PF_USER;
20844 } else {
20845@@ -1080,6 +1339,11 @@ do_page_fault(struct pt_regs *regs, unsi
20846 might_sleep();
20847 }
20848
20849+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
20850+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
20851+ return;
20852+#endif
20853+
20854 vma = find_vma(mm, address);
20855 if (unlikely(!vma)) {
20856 bad_area(regs, error_code, address);
20857@@ -1091,18 +1355,24 @@ do_page_fault(struct pt_regs *regs, unsi
20858 bad_area(regs, error_code, address);
20859 return;
20860 }
20861- if (error_code & PF_USER) {
20862- /*
20863- * Accessing the stack below %sp is always a bug.
20864- * The large cushion allows instructions like enter
20865- * and pusha to work. ("enter $65535, $31" pushes
20866- * 32 pointers and then decrements %sp by 65535.)
20867- */
20868- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
20869- bad_area(regs, error_code, address);
20870- return;
20871- }
20872+ /*
20873+ * Accessing the stack below %sp is always a bug.
20874+ * The large cushion allows instructions like enter
20875+ * and pusha to work. ("enter $65535, $31" pushes
20876+ * 32 pointers and then decrements %sp by 65535.)
20877+ */
20878+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
20879+ bad_area(regs, error_code, address);
20880+ return;
20881+ }
20882+
20883+#ifdef CONFIG_PAX_SEGMEXEC
20884+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
20885+ bad_area(regs, error_code, address);
20886+ return;
20887 }
20888+#endif
20889+
20890 if (unlikely(expand_stack(vma, address))) {
20891 bad_area(regs, error_code, address);
20892 return;
20893@@ -1146,3 +1416,199 @@ good_area:
20894
20895 up_read(&mm->mmap_sem);
20896 }
20897+
20898+#ifdef CONFIG_PAX_EMUTRAMP
20899+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
20900+{
20901+ int err;
20902+
20903+ do { /* PaX: gcc trampoline emulation #1 */
20904+ unsigned char mov1, mov2;
20905+ unsigned short jmp;
20906+ unsigned int addr1, addr2;
20907+
20908+#ifdef CONFIG_X86_64
20909+ if ((regs->ip + 11) >> 32)
20910+ break;
20911+#endif
20912+
20913+ err = get_user(mov1, (unsigned char __user *)regs->ip);
20914+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
20915+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
20916+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
20917+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
20918+
20919+ if (err)
20920+ break;
20921+
20922+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
20923+ regs->cx = addr1;
20924+ regs->ax = addr2;
20925+ regs->ip = addr2;
20926+ return 2;
20927+ }
20928+ } while (0);
20929+
20930+ do { /* PaX: gcc trampoline emulation #2 */
20931+ unsigned char mov, jmp;
20932+ unsigned int addr1, addr2;
20933+
20934+#ifdef CONFIG_X86_64
20935+ if ((regs->ip + 9) >> 32)
20936+ break;
20937+#endif
20938+
20939+ err = get_user(mov, (unsigned char __user *)regs->ip);
20940+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
20941+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
20942+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
20943+
20944+ if (err)
20945+ break;
20946+
20947+ if (mov == 0xB9 && jmp == 0xE9) {
20948+ regs->cx = addr1;
20949+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
20950+ return 2;
20951+ }
20952+ } while (0);
20953+
20954+ return 1; /* PaX in action */
20955+}
20956+
20957+#ifdef CONFIG_X86_64
20958+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
20959+{
20960+ int err;
20961+
20962+ do { /* PaX: gcc trampoline emulation #1 */
20963+ unsigned short mov1, mov2, jmp1;
20964+ unsigned char jmp2;
20965+ unsigned int addr1;
20966+ unsigned long addr2;
20967+
20968+ err = get_user(mov1, (unsigned short __user *)regs->ip);
20969+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
20970+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
20971+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
20972+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
20973+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
20974+
20975+ if (err)
20976+ break;
20977+
20978+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
20979+ regs->r11 = addr1;
20980+ regs->r10 = addr2;
20981+ regs->ip = addr1;
20982+ return 2;
20983+ }
20984+ } while (0);
20985+
20986+ do { /* PaX: gcc trampoline emulation #2 */
20987+ unsigned short mov1, mov2, jmp1;
20988+ unsigned char jmp2;
20989+ unsigned long addr1, addr2;
20990+
20991+ err = get_user(mov1, (unsigned short __user *)regs->ip);
20992+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
20993+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
20994+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
20995+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
20996+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
20997+
20998+ if (err)
20999+ break;
21000+
21001+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
21002+ regs->r11 = addr1;
21003+ regs->r10 = addr2;
21004+ regs->ip = addr1;
21005+ return 2;
21006+ }
21007+ } while (0);
21008+
21009+ return 1; /* PaX in action */
21010+}
21011+#endif
21012+
21013+/*
21014+ * PaX: decide what to do with offenders (regs->ip = fault address)
21015+ *
21016+ * returns 1 when task should be killed
21017+ * 2 when gcc trampoline was detected
21018+ */
21019+static int pax_handle_fetch_fault(struct pt_regs *regs)
21020+{
21021+ if (v8086_mode(regs))
21022+ return 1;
21023+
21024+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
21025+ return 1;
21026+
21027+#ifdef CONFIG_X86_32
21028+ return pax_handle_fetch_fault_32(regs);
21029+#else
21030+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
21031+ return pax_handle_fetch_fault_32(regs);
21032+ else
21033+ return pax_handle_fetch_fault_64(regs);
21034+#endif
21035+}
21036+#endif
21037+
21038+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21039+void pax_report_insns(void *pc, void *sp)
21040+{
21041+ long i;
21042+
21043+ printk(KERN_ERR "PAX: bytes at PC: ");
21044+ for (i = 0; i < 20; i++) {
21045+ unsigned char c;
21046+ if (get_user(c, (__force unsigned char __user *)pc+i))
21047+ printk(KERN_CONT "?? ");
21048+ else
21049+ printk(KERN_CONT "%02x ", c);
21050+ }
21051+ printk("\n");
21052+
21053+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
21054+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
21055+ unsigned long c;
21056+ if (get_user(c, (__force unsigned long __user *)sp+i))
21057+#ifdef CONFIG_X86_32
21058+ printk(KERN_CONT "???????? ");
21059+#else
21060+ printk(KERN_CONT "???????????????? ");
21061+#endif
21062+ else
21063+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
21064+ }
21065+ printk("\n");
21066+}
21067+#endif
21068+
21069+/**
21070+ * probe_kernel_write(): safely attempt to write to a location
21071+ * @dst: address to write to
21072+ * @src: pointer to the data that shall be written
21073+ * @size: size of the data chunk
21074+ *
21075+ * Safely write to address @dst from the buffer at @src. If a kernel fault
21076+ * happens, handle that and return -EFAULT.
21077+ */
21078+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
21079+{
21080+ long ret;
21081+ mm_segment_t old_fs = get_fs();
21082+
21083+ set_fs(KERNEL_DS);
21084+ pagefault_disable();
21085+ pax_open_kernel();
21086+ ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
21087+ pax_close_kernel();
21088+ pagefault_enable();
21089+ set_fs(old_fs);
21090+
21091+ return ret ? -EFAULT : 0;
21092+}
21093diff -urNp linux-2.6.32.42/arch/x86/mm/gup.c linux-2.6.32.42/arch/x86/mm/gup.c
21094--- linux-2.6.32.42/arch/x86/mm/gup.c 2011-03-27 14:31:47.000000000 -0400
21095+++ linux-2.6.32.42/arch/x86/mm/gup.c 2011-04-17 15:56:46.000000000 -0400
21096@@ -237,7 +237,7 @@ int __get_user_pages_fast(unsigned long
21097 addr = start;
21098 len = (unsigned long) nr_pages << PAGE_SHIFT;
21099 end = start + len;
21100- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
21101+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
21102 (void __user *)start, len)))
21103 return 0;
21104
21105diff -urNp linux-2.6.32.42/arch/x86/mm/highmem_32.c linux-2.6.32.42/arch/x86/mm/highmem_32.c
21106--- linux-2.6.32.42/arch/x86/mm/highmem_32.c 2011-03-27 14:31:47.000000000 -0400
21107+++ linux-2.6.32.42/arch/x86/mm/highmem_32.c 2011-04-17 15:56:46.000000000 -0400
21108@@ -43,7 +43,10 @@ void *kmap_atomic_prot(struct page *page
21109 idx = type + KM_TYPE_NR*smp_processor_id();
21110 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
21111 BUG_ON(!pte_none(*(kmap_pte-idx)));
21112+
21113+ pax_open_kernel();
21114 set_pte(kmap_pte-idx, mk_pte(page, prot));
21115+ pax_close_kernel();
21116
21117 return (void *)vaddr;
21118 }
21119diff -urNp linux-2.6.32.42/arch/x86/mm/hugetlbpage.c linux-2.6.32.42/arch/x86/mm/hugetlbpage.c
21120--- linux-2.6.32.42/arch/x86/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
21121+++ linux-2.6.32.42/arch/x86/mm/hugetlbpage.c 2011-04-17 15:56:46.000000000 -0400
21122@@ -267,13 +267,20 @@ static unsigned long hugetlb_get_unmappe
21123 struct hstate *h = hstate_file(file);
21124 struct mm_struct *mm = current->mm;
21125 struct vm_area_struct *vma;
21126- unsigned long start_addr;
21127+ unsigned long start_addr, pax_task_size = TASK_SIZE;
21128+
21129+#ifdef CONFIG_PAX_SEGMEXEC
21130+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
21131+ pax_task_size = SEGMEXEC_TASK_SIZE;
21132+#endif
21133+
21134+ pax_task_size -= PAGE_SIZE;
21135
21136 if (len > mm->cached_hole_size) {
21137- start_addr = mm->free_area_cache;
21138+ start_addr = mm->free_area_cache;
21139 } else {
21140- start_addr = TASK_UNMAPPED_BASE;
21141- mm->cached_hole_size = 0;
21142+ start_addr = mm->mmap_base;
21143+ mm->cached_hole_size = 0;
21144 }
21145
21146 full_search:
21147@@ -281,26 +288,27 @@ full_search:
21148
21149 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
21150 /* At this point: (!vma || addr < vma->vm_end). */
21151- if (TASK_SIZE - len < addr) {
21152+ if (pax_task_size - len < addr) {
21153 /*
21154 * Start a new search - just in case we missed
21155 * some holes.
21156 */
21157- if (start_addr != TASK_UNMAPPED_BASE) {
21158- start_addr = TASK_UNMAPPED_BASE;
21159+ if (start_addr != mm->mmap_base) {
21160+ start_addr = mm->mmap_base;
21161 mm->cached_hole_size = 0;
21162 goto full_search;
21163 }
21164 return -ENOMEM;
21165 }
21166- if (!vma || addr + len <= vma->vm_start) {
21167- mm->free_area_cache = addr + len;
21168- return addr;
21169- }
21170+ if (check_heap_stack_gap(vma, addr, len))
21171+ break;
21172 if (addr + mm->cached_hole_size < vma->vm_start)
21173 mm->cached_hole_size = vma->vm_start - addr;
21174 addr = ALIGN(vma->vm_end, huge_page_size(h));
21175 }
21176+
21177+ mm->free_area_cache = addr + len;
21178+ return addr;
21179 }
21180
21181 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
21182@@ -309,10 +317,9 @@ static unsigned long hugetlb_get_unmappe
21183 {
21184 struct hstate *h = hstate_file(file);
21185 struct mm_struct *mm = current->mm;
21186- struct vm_area_struct *vma, *prev_vma;
21187- unsigned long base = mm->mmap_base, addr = addr0;
21188+ struct vm_area_struct *vma;
21189+ unsigned long base = mm->mmap_base, addr;
21190 unsigned long largest_hole = mm->cached_hole_size;
21191- int first_time = 1;
21192
21193 /* don't allow allocations above current base */
21194 if (mm->free_area_cache > base)
21195@@ -322,64 +329,63 @@ static unsigned long hugetlb_get_unmappe
21196 largest_hole = 0;
21197 mm->free_area_cache = base;
21198 }
21199-try_again:
21200+
21201 /* make sure it can fit in the remaining address space */
21202 if (mm->free_area_cache < len)
21203 goto fail;
21204
21205 /* either no address requested or cant fit in requested address hole */
21206- addr = (mm->free_area_cache - len) & huge_page_mask(h);
21207+ addr = (mm->free_area_cache - len);
21208 do {
21209+ addr &= huge_page_mask(h);
21210+ vma = find_vma(mm, addr);
21211 /*
21212 * Lookup failure means no vma is above this address,
21213 * i.e. return with success:
21214- */
21215- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
21216- return addr;
21217-
21218- /*
21219 * new region fits between prev_vma->vm_end and
21220 * vma->vm_start, use it:
21221 */
21222- if (addr + len <= vma->vm_start &&
21223- (!prev_vma || (addr >= prev_vma->vm_end))) {
21224+ if (check_heap_stack_gap(vma, addr, len)) {
21225 /* remember the address as a hint for next time */
21226- mm->cached_hole_size = largest_hole;
21227- return (mm->free_area_cache = addr);
21228- } else {
21229- /* pull free_area_cache down to the first hole */
21230- if (mm->free_area_cache == vma->vm_end) {
21231- mm->free_area_cache = vma->vm_start;
21232- mm->cached_hole_size = largest_hole;
21233- }
21234+ mm->cached_hole_size = largest_hole;
21235+ return (mm->free_area_cache = addr);
21236+ }
21237+ /* pull free_area_cache down to the first hole */
21238+ if (mm->free_area_cache == vma->vm_end) {
21239+ mm->free_area_cache = vma->vm_start;
21240+ mm->cached_hole_size = largest_hole;
21241 }
21242
21243 /* remember the largest hole we saw so far */
21244 if (addr + largest_hole < vma->vm_start)
21245- largest_hole = vma->vm_start - addr;
21246+ largest_hole = vma->vm_start - addr;
21247
21248 /* try just below the current vma->vm_start */
21249- addr = (vma->vm_start - len) & huge_page_mask(h);
21250- } while (len <= vma->vm_start);
21251+ addr = skip_heap_stack_gap(vma, len);
21252+ } while (!IS_ERR_VALUE(addr));
21253
21254 fail:
21255 /*
21256- * if hint left us with no space for the requested
21257- * mapping then try again:
21258- */
21259- if (first_time) {
21260- mm->free_area_cache = base;
21261- largest_hole = 0;
21262- first_time = 0;
21263- goto try_again;
21264- }
21265- /*
21266 * A failed mmap() very likely causes application failure,
21267 * so fall back to the bottom-up function here. This scenario
21268 * can happen with large stack limits and large mmap()
21269 * allocations.
21270 */
21271- mm->free_area_cache = TASK_UNMAPPED_BASE;
21272+
21273+#ifdef CONFIG_PAX_SEGMEXEC
21274+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
21275+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
21276+ else
21277+#endif
21278+
21279+ mm->mmap_base = TASK_UNMAPPED_BASE;
21280+
21281+#ifdef CONFIG_PAX_RANDMMAP
21282+ if (mm->pax_flags & MF_PAX_RANDMMAP)
21283+ mm->mmap_base += mm->delta_mmap;
21284+#endif
21285+
21286+ mm->free_area_cache = mm->mmap_base;
21287 mm->cached_hole_size = ~0UL;
21288 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
21289 len, pgoff, flags);
21290@@ -387,6 +393,7 @@ fail:
21291 /*
21292 * Restore the topdown base:
21293 */
21294+ mm->mmap_base = base;
21295 mm->free_area_cache = base;
21296 mm->cached_hole_size = ~0UL;
21297
21298@@ -400,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *f
21299 struct hstate *h = hstate_file(file);
21300 struct mm_struct *mm = current->mm;
21301 struct vm_area_struct *vma;
21302+ unsigned long pax_task_size = TASK_SIZE;
21303
21304 if (len & ~huge_page_mask(h))
21305 return -EINVAL;
21306- if (len > TASK_SIZE)
21307+
21308+#ifdef CONFIG_PAX_SEGMEXEC
21309+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
21310+ pax_task_size = SEGMEXEC_TASK_SIZE;
21311+#endif
21312+
21313+ pax_task_size -= PAGE_SIZE;
21314+
21315+ if (len > pax_task_size)
21316 return -ENOMEM;
21317
21318 if (flags & MAP_FIXED) {
21319@@ -415,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *f
21320 if (addr) {
21321 addr = ALIGN(addr, huge_page_size(h));
21322 vma = find_vma(mm, addr);
21323- if (TASK_SIZE - len >= addr &&
21324- (!vma || addr + len <= vma->vm_start))
21325+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
21326 return addr;
21327 }
21328 if (mm->get_unmapped_area == arch_get_unmapped_area)
21329diff -urNp linux-2.6.32.42/arch/x86/mm/init_32.c linux-2.6.32.42/arch/x86/mm/init_32.c
21330--- linux-2.6.32.42/arch/x86/mm/init_32.c 2011-03-27 14:31:47.000000000 -0400
21331+++ linux-2.6.32.42/arch/x86/mm/init_32.c 2011-04-17 15:56:46.000000000 -0400
21332@@ -72,36 +72,6 @@ static __init void *alloc_low_page(void)
21333 }
21334
21335 /*
21336- * Creates a middle page table and puts a pointer to it in the
21337- * given global directory entry. This only returns the gd entry
21338- * in non-PAE compilation mode, since the middle layer is folded.
21339- */
21340-static pmd_t * __init one_md_table_init(pgd_t *pgd)
21341-{
21342- pud_t *pud;
21343- pmd_t *pmd_table;
21344-
21345-#ifdef CONFIG_X86_PAE
21346- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
21347- if (after_bootmem)
21348- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
21349- else
21350- pmd_table = (pmd_t *)alloc_low_page();
21351- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
21352- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
21353- pud = pud_offset(pgd, 0);
21354- BUG_ON(pmd_table != pmd_offset(pud, 0));
21355-
21356- return pmd_table;
21357- }
21358-#endif
21359- pud = pud_offset(pgd, 0);
21360- pmd_table = pmd_offset(pud, 0);
21361-
21362- return pmd_table;
21363-}
21364-
21365-/*
21366 * Create a page table and place a pointer to it in a middle page
21367 * directory entry:
21368 */
21369@@ -121,13 +91,28 @@ static pte_t * __init one_page_table_ini
21370 page_table = (pte_t *)alloc_low_page();
21371
21372 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
21373+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21374+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
21375+#else
21376 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
21377+#endif
21378 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
21379 }
21380
21381 return pte_offset_kernel(pmd, 0);
21382 }
21383
21384+static pmd_t * __init one_md_table_init(pgd_t *pgd)
21385+{
21386+ pud_t *pud;
21387+ pmd_t *pmd_table;
21388+
21389+ pud = pud_offset(pgd, 0);
21390+ pmd_table = pmd_offset(pud, 0);
21391+
21392+ return pmd_table;
21393+}
21394+
21395 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
21396 {
21397 int pgd_idx = pgd_index(vaddr);
21398@@ -201,6 +186,7 @@ page_table_range_init(unsigned long star
21399 int pgd_idx, pmd_idx;
21400 unsigned long vaddr;
21401 pgd_t *pgd;
21402+ pud_t *pud;
21403 pmd_t *pmd;
21404 pte_t *pte = NULL;
21405
21406@@ -210,8 +196,13 @@ page_table_range_init(unsigned long star
21407 pgd = pgd_base + pgd_idx;
21408
21409 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
21410- pmd = one_md_table_init(pgd);
21411- pmd = pmd + pmd_index(vaddr);
21412+ pud = pud_offset(pgd, vaddr);
21413+ pmd = pmd_offset(pud, vaddr);
21414+
21415+#ifdef CONFIG_X86_PAE
21416+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
21417+#endif
21418+
21419 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
21420 pmd++, pmd_idx++) {
21421 pte = page_table_kmap_check(one_page_table_init(pmd),
21422@@ -223,11 +214,20 @@ page_table_range_init(unsigned long star
21423 }
21424 }
21425
21426-static inline int is_kernel_text(unsigned long addr)
21427+static inline int is_kernel_text(unsigned long start, unsigned long end)
21428 {
21429- if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
21430- return 1;
21431- return 0;
21432+ if ((start > ktla_ktva((unsigned long)_etext) ||
21433+ end <= ktla_ktva((unsigned long)_stext)) &&
21434+ (start > ktla_ktva((unsigned long)_einittext) ||
21435+ end <= ktla_ktva((unsigned long)_sinittext)) &&
21436+
21437+#ifdef CONFIG_ACPI_SLEEP
21438+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
21439+#endif
21440+
21441+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
21442+ return 0;
21443+ return 1;
21444 }
21445
21446 /*
21447@@ -243,9 +243,10 @@ kernel_physical_mapping_init(unsigned lo
21448 int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
21449 unsigned long start_pfn, end_pfn;
21450 pgd_t *pgd_base = swapper_pg_dir;
21451- int pgd_idx, pmd_idx, pte_ofs;
21452+ unsigned int pgd_idx, pmd_idx, pte_ofs;
21453 unsigned long pfn;
21454 pgd_t *pgd;
21455+ pud_t *pud;
21456 pmd_t *pmd;
21457 pte_t *pte;
21458 unsigned pages_2m, pages_4k;
21459@@ -278,8 +279,13 @@ repeat:
21460 pfn = start_pfn;
21461 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
21462 pgd = pgd_base + pgd_idx;
21463- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
21464- pmd = one_md_table_init(pgd);
21465+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
21466+ pud = pud_offset(pgd, 0);
21467+ pmd = pmd_offset(pud, 0);
21468+
21469+#ifdef CONFIG_X86_PAE
21470+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
21471+#endif
21472
21473 if (pfn >= end_pfn)
21474 continue;
21475@@ -291,14 +297,13 @@ repeat:
21476 #endif
21477 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
21478 pmd++, pmd_idx++) {
21479- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
21480+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
21481
21482 /*
21483 * Map with big pages if possible, otherwise
21484 * create normal page tables:
21485 */
21486 if (use_pse) {
21487- unsigned int addr2;
21488 pgprot_t prot = PAGE_KERNEL_LARGE;
21489 /*
21490 * first pass will use the same initial
21491@@ -308,11 +313,7 @@ repeat:
21492 __pgprot(PTE_IDENT_ATTR |
21493 _PAGE_PSE);
21494
21495- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
21496- PAGE_OFFSET + PAGE_SIZE-1;
21497-
21498- if (is_kernel_text(addr) ||
21499- is_kernel_text(addr2))
21500+ if (is_kernel_text(address, address + PMD_SIZE))
21501 prot = PAGE_KERNEL_LARGE_EXEC;
21502
21503 pages_2m++;
21504@@ -329,7 +330,7 @@ repeat:
21505 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
21506 pte += pte_ofs;
21507 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
21508- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
21509+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
21510 pgprot_t prot = PAGE_KERNEL;
21511 /*
21512 * first pass will use the same initial
21513@@ -337,7 +338,7 @@ repeat:
21514 */
21515 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
21516
21517- if (is_kernel_text(addr))
21518+ if (is_kernel_text(address, address + PAGE_SIZE))
21519 prot = PAGE_KERNEL_EXEC;
21520
21521 pages_4k++;
21522@@ -489,7 +490,7 @@ void __init native_pagetable_setup_start
21523
21524 pud = pud_offset(pgd, va);
21525 pmd = pmd_offset(pud, va);
21526- if (!pmd_present(*pmd))
21527+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
21528 break;
21529
21530 pte = pte_offset_kernel(pmd, va);
21531@@ -541,9 +542,7 @@ void __init early_ioremap_page_table_ran
21532
21533 static void __init pagetable_init(void)
21534 {
21535- pgd_t *pgd_base = swapper_pg_dir;
21536-
21537- permanent_kmaps_init(pgd_base);
21538+ permanent_kmaps_init(swapper_pg_dir);
21539 }
21540
21541 #ifdef CONFIG_ACPI_SLEEP
21542@@ -551,12 +550,12 @@ static void __init pagetable_init(void)
21543 * ACPI suspend needs this for resume, because things like the intel-agp
21544 * driver might have split up a kernel 4MB mapping.
21545 */
21546-char swsusp_pg_dir[PAGE_SIZE]
21547+pgd_t swsusp_pg_dir[PTRS_PER_PGD]
21548 __attribute__ ((aligned(PAGE_SIZE)));
21549
21550 static inline void save_pg_dir(void)
21551 {
21552- memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
21553+ clone_pgd_range(swsusp_pg_dir, swapper_pg_dir, PTRS_PER_PGD);
21554 }
21555 #else /* !CONFIG_ACPI_SLEEP */
21556 static inline void save_pg_dir(void)
21557@@ -588,7 +587,7 @@ void zap_low_mappings(bool early)
21558 flush_tlb_all();
21559 }
21560
21561-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
21562+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
21563 EXPORT_SYMBOL_GPL(__supported_pte_mask);
21564
21565 /* user-defined highmem size */
21566@@ -777,7 +776,7 @@ void __init setup_bootmem_allocator(void
21567 * Initialize the boot-time allocator (with low memory only):
21568 */
21569 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
21570- bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
21571+ bootmap = find_e820_area(0x100000, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
21572 PAGE_SIZE);
21573 if (bootmap == -1L)
21574 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
21575@@ -864,6 +863,12 @@ void __init mem_init(void)
21576
21577 pci_iommu_alloc();
21578
21579+#ifdef CONFIG_PAX_PER_CPU_PGD
21580+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
21581+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
21582+ KERNEL_PGD_PTRS);
21583+#endif
21584+
21585 #ifdef CONFIG_FLATMEM
21586 BUG_ON(!mem_map);
21587 #endif
21588@@ -881,7 +886,7 @@ void __init mem_init(void)
21589 set_highmem_pages_init();
21590
21591 codesize = (unsigned long) &_etext - (unsigned long) &_text;
21592- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
21593+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
21594 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
21595
21596 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
21597@@ -923,10 +928,10 @@ void __init mem_init(void)
21598 ((unsigned long)&__init_end -
21599 (unsigned long)&__init_begin) >> 10,
21600
21601- (unsigned long)&_etext, (unsigned long)&_edata,
21602- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
21603+ (unsigned long)&_sdata, (unsigned long)&_edata,
21604+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
21605
21606- (unsigned long)&_text, (unsigned long)&_etext,
21607+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
21608 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
21609
21610 /*
21611@@ -1007,6 +1012,7 @@ void set_kernel_text_rw(void)
21612 if (!kernel_set_to_readonly)
21613 return;
21614
21615+ start = ktla_ktva(start);
21616 pr_debug("Set kernel text: %lx - %lx for read write\n",
21617 start, start+size);
21618
21619@@ -1021,6 +1027,7 @@ void set_kernel_text_ro(void)
21620 if (!kernel_set_to_readonly)
21621 return;
21622
21623+ start = ktla_ktva(start);
21624 pr_debug("Set kernel text: %lx - %lx for read only\n",
21625 start, start+size);
21626
21627@@ -1032,6 +1039,7 @@ void mark_rodata_ro(void)
21628 unsigned long start = PFN_ALIGN(_text);
21629 unsigned long size = PFN_ALIGN(_etext) - start;
21630
21631+ start = ktla_ktva(start);
21632 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
21633 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
21634 size >> 10);
21635diff -urNp linux-2.6.32.42/arch/x86/mm/init_64.c linux-2.6.32.42/arch/x86/mm/init_64.c
21636--- linux-2.6.32.42/arch/x86/mm/init_64.c 2011-04-17 17:00:52.000000000 -0400
21637+++ linux-2.6.32.42/arch/x86/mm/init_64.c 2011-04-17 17:03:05.000000000 -0400
21638@@ -164,7 +164,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
21639 pmd = fill_pmd(pud, vaddr);
21640 pte = fill_pte(pmd, vaddr);
21641
21642+ pax_open_kernel();
21643 set_pte(pte, new_pte);
21644+ pax_close_kernel();
21645
21646 /*
21647 * It's enough to flush this one mapping.
21648@@ -223,14 +225,12 @@ static void __init __init_extra_mapping(
21649 pgd = pgd_offset_k((unsigned long)__va(phys));
21650 if (pgd_none(*pgd)) {
21651 pud = (pud_t *) spp_getpage();
21652- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
21653- _PAGE_USER));
21654+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
21655 }
21656 pud = pud_offset(pgd, (unsigned long)__va(phys));
21657 if (pud_none(*pud)) {
21658 pmd = (pmd_t *) spp_getpage();
21659- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
21660- _PAGE_USER));
21661+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
21662 }
21663 pmd = pmd_offset(pud, phys);
21664 BUG_ON(!pmd_none(*pmd));
21665@@ -675,6 +675,12 @@ void __init mem_init(void)
21666
21667 pci_iommu_alloc();
21668
21669+#ifdef CONFIG_PAX_PER_CPU_PGD
21670+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
21671+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
21672+ KERNEL_PGD_PTRS);
21673+#endif
21674+
21675 /* clear_bss() already clear the empty_zero_page */
21676
21677 reservedpages = 0;
21678@@ -861,8 +867,8 @@ int kern_addr_valid(unsigned long addr)
21679 static struct vm_area_struct gate_vma = {
21680 .vm_start = VSYSCALL_START,
21681 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
21682- .vm_page_prot = PAGE_READONLY_EXEC,
21683- .vm_flags = VM_READ | VM_EXEC
21684+ .vm_page_prot = PAGE_READONLY,
21685+ .vm_flags = VM_READ
21686 };
21687
21688 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
21689@@ -896,7 +902,7 @@ int in_gate_area_no_task(unsigned long a
21690
21691 const char *arch_vma_name(struct vm_area_struct *vma)
21692 {
21693- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
21694+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
21695 return "[vdso]";
21696 if (vma == &gate_vma)
21697 return "[vsyscall]";
21698diff -urNp linux-2.6.32.42/arch/x86/mm/init.c linux-2.6.32.42/arch/x86/mm/init.c
21699--- linux-2.6.32.42/arch/x86/mm/init.c 2011-04-17 17:00:52.000000000 -0400
21700+++ linux-2.6.32.42/arch/x86/mm/init.c 2011-06-07 19:06:09.000000000 -0400
21701@@ -69,11 +69,7 @@ static void __init find_early_table_spac
21702 * cause a hotspot and fill up ZONE_DMA. The page tables
21703 * need roughly 0.5KB per GB.
21704 */
21705-#ifdef CONFIG_X86_32
21706- start = 0x7000;
21707-#else
21708- start = 0x8000;
21709-#endif
21710+ start = 0x100000;
21711 e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
21712 tables, PAGE_SIZE);
21713 if (e820_table_start == -1UL)
21714@@ -147,7 +143,7 @@ unsigned long __init_refok init_memory_m
21715 #endif
21716
21717 set_nx();
21718- if (nx_enabled)
21719+ if (nx_enabled && cpu_has_nx)
21720 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
21721
21722 /* Enable PSE if available */
21723@@ -329,10 +325,27 @@ unsigned long __init_refok init_memory_m
21724 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
21725 * mmio resources as well as potential bios/acpi data regions.
21726 */
21727+
21728 int devmem_is_allowed(unsigned long pagenr)
21729 {
21730+#ifdef CONFIG_GRKERNSEC_KMEM
21731+ /* allow BDA */
21732+ if (!pagenr)
21733+ return 1;
21734+ /* allow EBDA */
21735+ if ((0x9f000 >> PAGE_SHIFT) == pagenr)
21736+ return 1;
21737+ /* allow ISA/video mem */
21738+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
21739+ return 1;
21740+ /* throw out everything else below 1MB */
21741+ if (pagenr <= 256)
21742+ return 0;
21743+#else
21744 if (pagenr <= 256)
21745 return 1;
21746+#endif
21747+
21748 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
21749 return 0;
21750 if (!page_is_ram(pagenr))
21751@@ -379,6 +392,86 @@ void free_init_pages(char *what, unsigne
21752
21753 void free_initmem(void)
21754 {
21755+
21756+#ifdef CONFIG_PAX_KERNEXEC
21757+#ifdef CONFIG_X86_32
21758+ /* PaX: limit KERNEL_CS to actual size */
21759+ unsigned long addr, limit;
21760+ struct desc_struct d;
21761+ int cpu;
21762+
21763+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
21764+ limit = (limit - 1UL) >> PAGE_SHIFT;
21765+
21766+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
21767+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
21768+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
21769+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
21770+ }
21771+
21772+ /* PaX: make KERNEL_CS read-only */
21773+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
21774+ if (!paravirt_enabled())
21775+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
21776+/*
21777+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
21778+ pgd = pgd_offset_k(addr);
21779+ pud = pud_offset(pgd, addr);
21780+ pmd = pmd_offset(pud, addr);
21781+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
21782+ }
21783+*/
21784+#ifdef CONFIG_X86_PAE
21785+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
21786+/*
21787+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
21788+ pgd = pgd_offset_k(addr);
21789+ pud = pud_offset(pgd, addr);
21790+ pmd = pmd_offset(pud, addr);
21791+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
21792+ }
21793+*/
21794+#endif
21795+
21796+#ifdef CONFIG_MODULES
21797+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
21798+#endif
21799+
21800+#else
21801+ pgd_t *pgd;
21802+ pud_t *pud;
21803+ pmd_t *pmd;
21804+ unsigned long addr, end;
21805+
21806+ /* PaX: make kernel code/rodata read-only, rest non-executable */
21807+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
21808+ pgd = pgd_offset_k(addr);
21809+ pud = pud_offset(pgd, addr);
21810+ pmd = pmd_offset(pud, addr);
21811+ if (!pmd_present(*pmd))
21812+ continue;
21813+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
21814+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
21815+ else
21816+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
21817+ }
21818+
21819+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
21820+ end = addr + KERNEL_IMAGE_SIZE;
21821+ for (; addr < end; addr += PMD_SIZE) {
21822+ pgd = pgd_offset_k(addr);
21823+ pud = pud_offset(pgd, addr);
21824+ pmd = pmd_offset(pud, addr);
21825+ if (!pmd_present(*pmd))
21826+ continue;
21827+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
21828+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
21829+ }
21830+#endif
21831+
21832+ flush_tlb_all();
21833+#endif
21834+
21835 free_init_pages("unused kernel memory",
21836 (unsigned long)(&__init_begin),
21837 (unsigned long)(&__init_end));
21838diff -urNp linux-2.6.32.42/arch/x86/mm/iomap_32.c linux-2.6.32.42/arch/x86/mm/iomap_32.c
21839--- linux-2.6.32.42/arch/x86/mm/iomap_32.c 2011-03-27 14:31:47.000000000 -0400
21840+++ linux-2.6.32.42/arch/x86/mm/iomap_32.c 2011-04-17 15:56:46.000000000 -0400
21841@@ -65,7 +65,11 @@ void *kmap_atomic_prot_pfn(unsigned long
21842 debug_kmap_atomic(type);
21843 idx = type + KM_TYPE_NR * smp_processor_id();
21844 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
21845+
21846+ pax_open_kernel();
21847 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
21848+ pax_close_kernel();
21849+
21850 arch_flush_lazy_mmu_mode();
21851
21852 return (void *)vaddr;
21853diff -urNp linux-2.6.32.42/arch/x86/mm/ioremap.c linux-2.6.32.42/arch/x86/mm/ioremap.c
21854--- linux-2.6.32.42/arch/x86/mm/ioremap.c 2011-03-27 14:31:47.000000000 -0400
21855+++ linux-2.6.32.42/arch/x86/mm/ioremap.c 2011-04-17 15:56:46.000000000 -0400
21856@@ -41,8 +41,8 @@ int page_is_ram(unsigned long pagenr)
21857 * Second special case: Some BIOSen report the PC BIOS
21858 * area (640->1Mb) as ram even though it is not.
21859 */
21860- if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
21861- pagenr < (BIOS_END >> PAGE_SHIFT))
21862+ if (pagenr >= (ISA_START_ADDRESS >> PAGE_SHIFT) &&
21863+ pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
21864 return 0;
21865
21866 for (i = 0; i < e820.nr_map; i++) {
21867@@ -137,13 +137,10 @@ static void __iomem *__ioremap_caller(re
21868 /*
21869 * Don't allow anybody to remap normal RAM that we're using..
21870 */
21871- for (pfn = phys_addr >> PAGE_SHIFT;
21872- (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
21873- pfn++) {
21874-
21875+ for (pfn = phys_addr >> PAGE_SHIFT; ((resource_size_t)pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); pfn++) {
21876 int is_ram = page_is_ram(pfn);
21877
21878- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
21879+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
21880 return NULL;
21881 WARN_ON_ONCE(is_ram);
21882 }
21883@@ -407,7 +404,7 @@ static int __init early_ioremap_debug_se
21884 early_param("early_ioremap_debug", early_ioremap_debug_setup);
21885
21886 static __initdata int after_paging_init;
21887-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
21888+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
21889
21890 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
21891 {
21892@@ -439,8 +436,7 @@ void __init early_ioremap_init(void)
21893 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
21894
21895 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
21896- memset(bm_pte, 0, sizeof(bm_pte));
21897- pmd_populate_kernel(&init_mm, pmd, bm_pte);
21898+ pmd_populate_user(&init_mm, pmd, bm_pte);
21899
21900 /*
21901 * The boot-ioremap range spans multiple pmds, for which
21902diff -urNp linux-2.6.32.42/arch/x86/mm/kmemcheck/kmemcheck.c linux-2.6.32.42/arch/x86/mm/kmemcheck/kmemcheck.c
21903--- linux-2.6.32.42/arch/x86/mm/kmemcheck/kmemcheck.c 2011-03-27 14:31:47.000000000 -0400
21904+++ linux-2.6.32.42/arch/x86/mm/kmemcheck/kmemcheck.c 2011-04-17 15:56:46.000000000 -0400
21905@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
21906 * memory (e.g. tracked pages)? For now, we need this to avoid
21907 * invoking kmemcheck for PnP BIOS calls.
21908 */
21909- if (regs->flags & X86_VM_MASK)
21910+ if (v8086_mode(regs))
21911 return false;
21912- if (regs->cs != __KERNEL_CS)
21913+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
21914 return false;
21915
21916 pte = kmemcheck_pte_lookup(address);
21917diff -urNp linux-2.6.32.42/arch/x86/mm/mmap.c linux-2.6.32.42/arch/x86/mm/mmap.c
21918--- linux-2.6.32.42/arch/x86/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
21919+++ linux-2.6.32.42/arch/x86/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
21920@@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
21921 * Leave an at least ~128 MB hole with possible stack randomization.
21922 */
21923 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
21924-#define MAX_GAP (TASK_SIZE/6*5)
21925+#define MAX_GAP (pax_task_size/6*5)
21926
21927 /*
21928 * True on X86_32 or when emulating IA32 on X86_64
21929@@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
21930 return rnd << PAGE_SHIFT;
21931 }
21932
21933-static unsigned long mmap_base(void)
21934+static unsigned long mmap_base(struct mm_struct *mm)
21935 {
21936 unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
21937+ unsigned long pax_task_size = TASK_SIZE;
21938+
21939+#ifdef CONFIG_PAX_SEGMEXEC
21940+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
21941+ pax_task_size = SEGMEXEC_TASK_SIZE;
21942+#endif
21943
21944 if (gap < MIN_GAP)
21945 gap = MIN_GAP;
21946 else if (gap > MAX_GAP)
21947 gap = MAX_GAP;
21948
21949- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
21950+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
21951 }
21952
21953 /*
21954 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
21955 * does, but not when emulating X86_32
21956 */
21957-static unsigned long mmap_legacy_base(void)
21958+static unsigned long mmap_legacy_base(struct mm_struct *mm)
21959 {
21960- if (mmap_is_ia32())
21961+ if (mmap_is_ia32()) {
21962+
21963+#ifdef CONFIG_PAX_SEGMEXEC
21964+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
21965+ return SEGMEXEC_TASK_UNMAPPED_BASE;
21966+ else
21967+#endif
21968+
21969 return TASK_UNMAPPED_BASE;
21970- else
21971+ } else
21972 return TASK_UNMAPPED_BASE + mmap_rnd();
21973 }
21974
21975@@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
21976 void arch_pick_mmap_layout(struct mm_struct *mm)
21977 {
21978 if (mmap_is_legacy()) {
21979- mm->mmap_base = mmap_legacy_base();
21980+ mm->mmap_base = mmap_legacy_base(mm);
21981+
21982+#ifdef CONFIG_PAX_RANDMMAP
21983+ if (mm->pax_flags & MF_PAX_RANDMMAP)
21984+ mm->mmap_base += mm->delta_mmap;
21985+#endif
21986+
21987 mm->get_unmapped_area = arch_get_unmapped_area;
21988 mm->unmap_area = arch_unmap_area;
21989 } else {
21990- mm->mmap_base = mmap_base();
21991+ mm->mmap_base = mmap_base(mm);
21992+
21993+#ifdef CONFIG_PAX_RANDMMAP
21994+ if (mm->pax_flags & MF_PAX_RANDMMAP)
21995+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
21996+#endif
21997+
21998 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
21999 mm->unmap_area = arch_unmap_area_topdown;
22000 }
22001diff -urNp linux-2.6.32.42/arch/x86/mm/mmio-mod.c linux-2.6.32.42/arch/x86/mm/mmio-mod.c
22002--- linux-2.6.32.42/arch/x86/mm/mmio-mod.c 2011-03-27 14:31:47.000000000 -0400
22003+++ linux-2.6.32.42/arch/x86/mm/mmio-mod.c 2011-07-06 19:53:33.000000000 -0400
22004@@ -193,7 +193,7 @@ static void pre(struct kmmio_probe *p, s
22005 break;
22006 default:
22007 {
22008- unsigned char *ip = (unsigned char *)instptr;
22009+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
22010 my_trace->opcode = MMIO_UNKNOWN_OP;
22011 my_trace->width = 0;
22012 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
22013@@ -233,7 +233,7 @@ static void post(struct kmmio_probe *p,
22014 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
22015 void __iomem *addr)
22016 {
22017- static atomic_t next_id;
22018+ static atomic_unchecked_t next_id;
22019 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
22020 /* These are page-unaligned. */
22021 struct mmiotrace_map map = {
22022@@ -257,7 +257,7 @@ static void ioremap_trace_core(resource_
22023 .private = trace
22024 },
22025 .phys = offset,
22026- .id = atomic_inc_return(&next_id)
22027+ .id = atomic_inc_return_unchecked(&next_id)
22028 };
22029 map.map_id = trace->id;
22030
22031diff -urNp linux-2.6.32.42/arch/x86/mm/numa_32.c linux-2.6.32.42/arch/x86/mm/numa_32.c
22032--- linux-2.6.32.42/arch/x86/mm/numa_32.c 2011-03-27 14:31:47.000000000 -0400
22033+++ linux-2.6.32.42/arch/x86/mm/numa_32.c 2011-04-17 15:56:46.000000000 -0400
22034@@ -98,7 +98,6 @@ unsigned long node_memmap_size_bytes(int
22035 }
22036 #endif
22037
22038-extern unsigned long find_max_low_pfn(void);
22039 extern unsigned long highend_pfn, highstart_pfn;
22040
22041 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
22042diff -urNp linux-2.6.32.42/arch/x86/mm/pageattr.c linux-2.6.32.42/arch/x86/mm/pageattr.c
22043--- linux-2.6.32.42/arch/x86/mm/pageattr.c 2011-03-27 14:31:47.000000000 -0400
22044+++ linux-2.6.32.42/arch/x86/mm/pageattr.c 2011-04-17 15:56:46.000000000 -0400
22045@@ -261,16 +261,17 @@ static inline pgprot_t static_protection
22046 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
22047 */
22048 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
22049- pgprot_val(forbidden) |= _PAGE_NX;
22050+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
22051
22052 /*
22053 * The kernel text needs to be executable for obvious reasons
22054 * Does not cover __inittext since that is gone later on. On
22055 * 64bit we do not enforce !NX on the low mapping
22056 */
22057- if (within(address, (unsigned long)_text, (unsigned long)_etext))
22058- pgprot_val(forbidden) |= _PAGE_NX;
22059+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
22060+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
22061
22062+#ifdef CONFIG_DEBUG_RODATA
22063 /*
22064 * The .rodata section needs to be read-only. Using the pfn
22065 * catches all aliases.
22066@@ -278,6 +279,14 @@ static inline pgprot_t static_protection
22067 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
22068 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
22069 pgprot_val(forbidden) |= _PAGE_RW;
22070+#endif
22071+
22072+#ifdef CONFIG_PAX_KERNEXEC
22073+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
22074+ pgprot_val(forbidden) |= _PAGE_RW;
22075+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
22076+ }
22077+#endif
22078
22079 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
22080
22081@@ -331,23 +340,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
22082 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
22083 {
22084 /* change init_mm */
22085+ pax_open_kernel();
22086 set_pte_atomic(kpte, pte);
22087+
22088 #ifdef CONFIG_X86_32
22089 if (!SHARED_KERNEL_PMD) {
22090+
22091+#ifdef CONFIG_PAX_PER_CPU_PGD
22092+ unsigned long cpu;
22093+#else
22094 struct page *page;
22095+#endif
22096
22097+#ifdef CONFIG_PAX_PER_CPU_PGD
22098+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
22099+ pgd_t *pgd = get_cpu_pgd(cpu);
22100+#else
22101 list_for_each_entry(page, &pgd_list, lru) {
22102- pgd_t *pgd;
22103+ pgd_t *pgd = (pgd_t *)page_address(page);
22104+#endif
22105+
22106 pud_t *pud;
22107 pmd_t *pmd;
22108
22109- pgd = (pgd_t *)page_address(page) + pgd_index(address);
22110+ pgd += pgd_index(address);
22111 pud = pud_offset(pgd, address);
22112 pmd = pmd_offset(pud, address);
22113 set_pte_atomic((pte_t *)pmd, pte);
22114 }
22115 }
22116 #endif
22117+ pax_close_kernel();
22118 }
22119
22120 static int
22121diff -urNp linux-2.6.32.42/arch/x86/mm/pageattr-test.c linux-2.6.32.42/arch/x86/mm/pageattr-test.c
22122--- linux-2.6.32.42/arch/x86/mm/pageattr-test.c 2011-03-27 14:31:47.000000000 -0400
22123+++ linux-2.6.32.42/arch/x86/mm/pageattr-test.c 2011-04-17 15:56:46.000000000 -0400
22124@@ -36,7 +36,7 @@ enum {
22125
22126 static int pte_testbit(pte_t pte)
22127 {
22128- return pte_flags(pte) & _PAGE_UNUSED1;
22129+ return pte_flags(pte) & _PAGE_CPA_TEST;
22130 }
22131
22132 struct split_state {
22133diff -urNp linux-2.6.32.42/arch/x86/mm/pat.c linux-2.6.32.42/arch/x86/mm/pat.c
22134--- linux-2.6.32.42/arch/x86/mm/pat.c 2011-03-27 14:31:47.000000000 -0400
22135+++ linux-2.6.32.42/arch/x86/mm/pat.c 2011-04-17 15:56:46.000000000 -0400
22136@@ -258,7 +258,7 @@ chk_conflict(struct memtype *new, struct
22137
22138 conflict:
22139 printk(KERN_INFO "%s:%d conflicting memory types "
22140- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
22141+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), new->start,
22142 new->end, cattr_name(new->type), cattr_name(entry->type));
22143 return -EBUSY;
22144 }
22145@@ -559,7 +559,7 @@ unlock_ret:
22146
22147 if (err) {
22148 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
22149- current->comm, current->pid, start, end);
22150+ current->comm, task_pid_nr(current), start, end);
22151 }
22152
22153 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
22154@@ -689,8 +689,8 @@ static inline int range_is_allowed(unsig
22155 while (cursor < to) {
22156 if (!devmem_is_allowed(pfn)) {
22157 printk(KERN_INFO
22158- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
22159- current->comm, from, to);
22160+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
22161+ current->comm, from, to, cursor);
22162 return 0;
22163 }
22164 cursor += PAGE_SIZE;
22165@@ -755,7 +755,7 @@ int kernel_map_sync_memtype(u64 base, un
22166 printk(KERN_INFO
22167 "%s:%d ioremap_change_attr failed %s "
22168 "for %Lx-%Lx\n",
22169- current->comm, current->pid,
22170+ current->comm, task_pid_nr(current),
22171 cattr_name(flags),
22172 base, (unsigned long long)(base + size));
22173 return -EINVAL;
22174@@ -813,7 +813,7 @@ static int reserve_pfn_range(u64 paddr,
22175 free_memtype(paddr, paddr + size);
22176 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
22177 " for %Lx-%Lx, got %s\n",
22178- current->comm, current->pid,
22179+ current->comm, task_pid_nr(current),
22180 cattr_name(want_flags),
22181 (unsigned long long)paddr,
22182 (unsigned long long)(paddr + size),
22183diff -urNp linux-2.6.32.42/arch/x86/mm/pf_in.c linux-2.6.32.42/arch/x86/mm/pf_in.c
22184--- linux-2.6.32.42/arch/x86/mm/pf_in.c 2011-03-27 14:31:47.000000000 -0400
22185+++ linux-2.6.32.42/arch/x86/mm/pf_in.c 2011-07-06 19:53:33.000000000 -0400
22186@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned l
22187 int i;
22188 enum reason_type rv = OTHERS;
22189
22190- p = (unsigned char *)ins_addr;
22191+ p = (unsigned char *)ktla_ktva(ins_addr);
22192 p += skip_prefix(p, &prf);
22193 p += get_opcode(p, &opcode);
22194
22195@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(un
22196 struct prefix_bits prf;
22197 int i;
22198
22199- p = (unsigned char *)ins_addr;
22200+ p = (unsigned char *)ktla_ktva(ins_addr);
22201 p += skip_prefix(p, &prf);
22202 p += get_opcode(p, &opcode);
22203
22204@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned
22205 struct prefix_bits prf;
22206 int i;
22207
22208- p = (unsigned char *)ins_addr;
22209+ p = (unsigned char *)ktla_ktva(ins_addr);
22210 p += skip_prefix(p, &prf);
22211 p += get_opcode(p, &opcode);
22212
22213@@ -417,7 +417,7 @@ unsigned long get_ins_reg_val(unsigned l
22214 int i;
22215 unsigned long rv;
22216
22217- p = (unsigned char *)ins_addr;
22218+ p = (unsigned char *)ktla_ktva(ins_addr);
22219 p += skip_prefix(p, &prf);
22220 p += get_opcode(p, &opcode);
22221 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
22222@@ -472,7 +472,7 @@ unsigned long get_ins_imm_val(unsigned l
22223 int i;
22224 unsigned long rv;
22225
22226- p = (unsigned char *)ins_addr;
22227+ p = (unsigned char *)ktla_ktva(ins_addr);
22228 p += skip_prefix(p, &prf);
22229 p += get_opcode(p, &opcode);
22230 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
22231diff -urNp linux-2.6.32.42/arch/x86/mm/pgtable_32.c linux-2.6.32.42/arch/x86/mm/pgtable_32.c
22232--- linux-2.6.32.42/arch/x86/mm/pgtable_32.c 2011-03-27 14:31:47.000000000 -0400
22233+++ linux-2.6.32.42/arch/x86/mm/pgtable_32.c 2011-04-17 15:56:46.000000000 -0400
22234@@ -49,10 +49,13 @@ void set_pte_vaddr(unsigned long vaddr,
22235 return;
22236 }
22237 pte = pte_offset_kernel(pmd, vaddr);
22238+
22239+ pax_open_kernel();
22240 if (pte_val(pteval))
22241 set_pte_at(&init_mm, vaddr, pte, pteval);
22242 else
22243 pte_clear(&init_mm, vaddr, pte);
22244+ pax_close_kernel();
22245
22246 /*
22247 * It's enough to flush this one mapping.
22248diff -urNp linux-2.6.32.42/arch/x86/mm/pgtable.c linux-2.6.32.42/arch/x86/mm/pgtable.c
22249--- linux-2.6.32.42/arch/x86/mm/pgtable.c 2011-03-27 14:31:47.000000000 -0400
22250+++ linux-2.6.32.42/arch/x86/mm/pgtable.c 2011-05-11 18:25:15.000000000 -0400
22251@@ -83,9 +83,52 @@ static inline void pgd_list_del(pgd_t *p
22252 list_del(&page->lru);
22253 }
22254
22255-#define UNSHARED_PTRS_PER_PGD \
22256- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
22257+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22258+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
22259
22260+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
22261+{
22262+ while (count--)
22263+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
22264+}
22265+#endif
22266+
22267+#ifdef CONFIG_PAX_PER_CPU_PGD
22268+void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
22269+{
22270+ while (count--)
22271+
22272+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22273+ *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
22274+#else
22275+ *dst++ = *src++;
22276+#endif
22277+
22278+}
22279+#endif
22280+
22281+#ifdef CONFIG_X86_64
22282+#define pxd_t pud_t
22283+#define pyd_t pgd_t
22284+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
22285+#define pxd_free(mm, pud) pud_free((mm), (pud))
22286+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
22287+#define pyd_offset(mm ,address) pgd_offset((mm), (address))
22288+#define PYD_SIZE PGDIR_SIZE
22289+#else
22290+#define pxd_t pmd_t
22291+#define pyd_t pud_t
22292+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
22293+#define pxd_free(mm, pud) pmd_free((mm), (pud))
22294+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
22295+#define pyd_offset(mm ,address) pud_offset((mm), (address))
22296+#define PYD_SIZE PUD_SIZE
22297+#endif
22298+
22299+#ifdef CONFIG_PAX_PER_CPU_PGD
22300+static inline void pgd_ctor(pgd_t *pgd) {}
22301+static inline void pgd_dtor(pgd_t *pgd) {}
22302+#else
22303 static void pgd_ctor(pgd_t *pgd)
22304 {
22305 /* If the pgd points to a shared pagetable level (either the
22306@@ -119,6 +162,7 @@ static void pgd_dtor(pgd_t *pgd)
22307 pgd_list_del(pgd);
22308 spin_unlock_irqrestore(&pgd_lock, flags);
22309 }
22310+#endif
22311
22312 /*
22313 * List of all pgd's needed for non-PAE so it can invalidate entries
22314@@ -131,7 +175,7 @@ static void pgd_dtor(pgd_t *pgd)
22315 * -- wli
22316 */
22317
22318-#ifdef CONFIG_X86_PAE
22319+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
22320 /*
22321 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
22322 * updating the top-level pagetable entries to guarantee the
22323@@ -143,7 +187,7 @@ static void pgd_dtor(pgd_t *pgd)
22324 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
22325 * and initialize the kernel pmds here.
22326 */
22327-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
22328+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
22329
22330 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
22331 {
22332@@ -161,36 +205,38 @@ void pud_populate(struct mm_struct *mm,
22333 */
22334 flush_tlb_mm(mm);
22335 }
22336+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
22337+#define PREALLOCATED_PXDS USER_PGD_PTRS
22338 #else /* !CONFIG_X86_PAE */
22339
22340 /* No need to prepopulate any pagetable entries in non-PAE modes. */
22341-#define PREALLOCATED_PMDS 0
22342+#define PREALLOCATED_PXDS 0
22343
22344 #endif /* CONFIG_X86_PAE */
22345
22346-static void free_pmds(pmd_t *pmds[])
22347+static void free_pxds(pxd_t *pxds[])
22348 {
22349 int i;
22350
22351- for(i = 0; i < PREALLOCATED_PMDS; i++)
22352- if (pmds[i])
22353- free_page((unsigned long)pmds[i]);
22354+ for(i = 0; i < PREALLOCATED_PXDS; i++)
22355+ if (pxds[i])
22356+ free_page((unsigned long)pxds[i]);
22357 }
22358
22359-static int preallocate_pmds(pmd_t *pmds[])
22360+static int preallocate_pxds(pxd_t *pxds[])
22361 {
22362 int i;
22363 bool failed = false;
22364
22365- for(i = 0; i < PREALLOCATED_PMDS; i++) {
22366- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
22367- if (pmd == NULL)
22368+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
22369+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
22370+ if (pxd == NULL)
22371 failed = true;
22372- pmds[i] = pmd;
22373+ pxds[i] = pxd;
22374 }
22375
22376 if (failed) {
22377- free_pmds(pmds);
22378+ free_pxds(pxds);
22379 return -ENOMEM;
22380 }
22381
22382@@ -203,51 +249,56 @@ static int preallocate_pmds(pmd_t *pmds[
22383 * preallocate which never got a corresponding vma will need to be
22384 * freed manually.
22385 */
22386-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
22387+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
22388 {
22389 int i;
22390
22391- for(i = 0; i < PREALLOCATED_PMDS; i++) {
22392+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
22393 pgd_t pgd = pgdp[i];
22394
22395 if (pgd_val(pgd) != 0) {
22396- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
22397+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
22398
22399- pgdp[i] = native_make_pgd(0);
22400+ set_pgd(pgdp + i, native_make_pgd(0));
22401
22402- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
22403- pmd_free(mm, pmd);
22404+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
22405+ pxd_free(mm, pxd);
22406 }
22407 }
22408 }
22409
22410-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
22411+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
22412 {
22413- pud_t *pud;
22414+ pyd_t *pyd;
22415 unsigned long addr;
22416 int i;
22417
22418- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
22419+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
22420 return;
22421
22422- pud = pud_offset(pgd, 0);
22423+#ifdef CONFIG_X86_64
22424+ pyd = pyd_offset(mm, 0L);
22425+#else
22426+ pyd = pyd_offset(pgd, 0L);
22427+#endif
22428
22429- for (addr = i = 0; i < PREALLOCATED_PMDS;
22430- i++, pud++, addr += PUD_SIZE) {
22431- pmd_t *pmd = pmds[i];
22432+ for (addr = i = 0; i < PREALLOCATED_PXDS;
22433+ i++, pyd++, addr += PYD_SIZE) {
22434+ pxd_t *pxd = pxds[i];
22435
22436 if (i >= KERNEL_PGD_BOUNDARY)
22437- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
22438- sizeof(pmd_t) * PTRS_PER_PMD);
22439+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
22440+ sizeof(pxd_t) * PTRS_PER_PMD);
22441
22442- pud_populate(mm, pud, pmd);
22443+ pyd_populate(mm, pyd, pxd);
22444 }
22445 }
22446
22447 pgd_t *pgd_alloc(struct mm_struct *mm)
22448 {
22449 pgd_t *pgd;
22450- pmd_t *pmds[PREALLOCATED_PMDS];
22451+ pxd_t *pxds[PREALLOCATED_PXDS];
22452+
22453 unsigned long flags;
22454
22455 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
22456@@ -257,11 +308,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
22457
22458 mm->pgd = pgd;
22459
22460- if (preallocate_pmds(pmds) != 0)
22461+ if (preallocate_pxds(pxds) != 0)
22462 goto out_free_pgd;
22463
22464 if (paravirt_pgd_alloc(mm) != 0)
22465- goto out_free_pmds;
22466+ goto out_free_pxds;
22467
22468 /*
22469 * Make sure that pre-populating the pmds is atomic with
22470@@ -271,14 +322,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
22471 spin_lock_irqsave(&pgd_lock, flags);
22472
22473 pgd_ctor(pgd);
22474- pgd_prepopulate_pmd(mm, pgd, pmds);
22475+ pgd_prepopulate_pxd(mm, pgd, pxds);
22476
22477 spin_unlock_irqrestore(&pgd_lock, flags);
22478
22479 return pgd;
22480
22481-out_free_pmds:
22482- free_pmds(pmds);
22483+out_free_pxds:
22484+ free_pxds(pxds);
22485 out_free_pgd:
22486 free_page((unsigned long)pgd);
22487 out:
22488@@ -287,7 +338,7 @@ out:
22489
22490 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
22491 {
22492- pgd_mop_up_pmds(mm, pgd);
22493+ pgd_mop_up_pxds(mm, pgd);
22494 pgd_dtor(pgd);
22495 paravirt_pgd_free(mm, pgd);
22496 free_page((unsigned long)pgd);
22497diff -urNp linux-2.6.32.42/arch/x86/mm/setup_nx.c linux-2.6.32.42/arch/x86/mm/setup_nx.c
22498--- linux-2.6.32.42/arch/x86/mm/setup_nx.c 2011-03-27 14:31:47.000000000 -0400
22499+++ linux-2.6.32.42/arch/x86/mm/setup_nx.c 2011-04-17 15:56:46.000000000 -0400
22500@@ -4,11 +4,10 @@
22501
22502 #include <asm/pgtable.h>
22503
22504+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
22505 int nx_enabled;
22506
22507-#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
22508-static int disable_nx __cpuinitdata;
22509-
22510+#ifndef CONFIG_PAX_PAGEEXEC
22511 /*
22512 * noexec = on|off
22513 *
22514@@ -22,32 +21,26 @@ static int __init noexec_setup(char *str
22515 if (!str)
22516 return -EINVAL;
22517 if (!strncmp(str, "on", 2)) {
22518- __supported_pte_mask |= _PAGE_NX;
22519- disable_nx = 0;
22520+ nx_enabled = 1;
22521 } else if (!strncmp(str, "off", 3)) {
22522- disable_nx = 1;
22523- __supported_pte_mask &= ~_PAGE_NX;
22524+ nx_enabled = 0;
22525 }
22526 return 0;
22527 }
22528 early_param("noexec", noexec_setup);
22529 #endif
22530+#endif
22531
22532 #ifdef CONFIG_X86_PAE
22533 void __init set_nx(void)
22534 {
22535- unsigned int v[4], l, h;
22536+ if (!nx_enabled && cpu_has_nx) {
22537+ unsigned l, h;
22538
22539- if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
22540- cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
22541-
22542- if ((v[3] & (1 << 20)) && !disable_nx) {
22543- rdmsr(MSR_EFER, l, h);
22544- l |= EFER_NX;
22545- wrmsr(MSR_EFER, l, h);
22546- nx_enabled = 1;
22547- __supported_pte_mask |= _PAGE_NX;
22548- }
22549+ __supported_pte_mask &= ~_PAGE_NX;
22550+ rdmsr(MSR_EFER, l, h);
22551+ l &= ~EFER_NX;
22552+ wrmsr(MSR_EFER, l, h);
22553 }
22554 }
22555 #else
22556@@ -62,7 +55,7 @@ void __cpuinit check_efer(void)
22557 unsigned long efer;
22558
22559 rdmsrl(MSR_EFER, efer);
22560- if (!(efer & EFER_NX) || disable_nx)
22561+ if (!(efer & EFER_NX) || !nx_enabled)
22562 __supported_pte_mask &= ~_PAGE_NX;
22563 }
22564 #endif
22565diff -urNp linux-2.6.32.42/arch/x86/mm/tlb.c linux-2.6.32.42/arch/x86/mm/tlb.c
22566--- linux-2.6.32.42/arch/x86/mm/tlb.c 2011-03-27 14:31:47.000000000 -0400
22567+++ linux-2.6.32.42/arch/x86/mm/tlb.c 2011-04-23 12:56:10.000000000 -0400
22568@@ -61,7 +61,11 @@ void leave_mm(int cpu)
22569 BUG();
22570 cpumask_clear_cpu(cpu,
22571 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
22572+
22573+#ifndef CONFIG_PAX_PER_CPU_PGD
22574 load_cr3(swapper_pg_dir);
22575+#endif
22576+
22577 }
22578 EXPORT_SYMBOL_GPL(leave_mm);
22579
22580diff -urNp linux-2.6.32.42/arch/x86/oprofile/backtrace.c linux-2.6.32.42/arch/x86/oprofile/backtrace.c
22581--- linux-2.6.32.42/arch/x86/oprofile/backtrace.c 2011-03-27 14:31:47.000000000 -0400
22582+++ linux-2.6.32.42/arch/x86/oprofile/backtrace.c 2011-04-17 15:56:46.000000000 -0400
22583@@ -57,7 +57,7 @@ static struct frame_head *dump_user_back
22584 struct frame_head bufhead[2];
22585
22586 /* Also check accessibility of one struct frame_head beyond */
22587- if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
22588+ if (!__access_ok(VERIFY_READ, head, sizeof(bufhead)))
22589 return NULL;
22590 if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
22591 return NULL;
22592@@ -77,7 +77,7 @@ x86_backtrace(struct pt_regs * const reg
22593 {
22594 struct frame_head *head = (struct frame_head *)frame_pointer(regs);
22595
22596- if (!user_mode_vm(regs)) {
22597+ if (!user_mode(regs)) {
22598 unsigned long stack = kernel_stack_pointer(regs);
22599 if (depth)
22600 dump_trace(NULL, regs, (unsigned long *)stack, 0,
22601diff -urNp linux-2.6.32.42/arch/x86/oprofile/op_model_p4.c linux-2.6.32.42/arch/x86/oprofile/op_model_p4.c
22602--- linux-2.6.32.42/arch/x86/oprofile/op_model_p4.c 2011-03-27 14:31:47.000000000 -0400
22603+++ linux-2.6.32.42/arch/x86/oprofile/op_model_p4.c 2011-04-17 15:56:46.000000000 -0400
22604@@ -50,7 +50,7 @@ static inline void setup_num_counters(vo
22605 #endif
22606 }
22607
22608-static int inline addr_increment(void)
22609+static inline int addr_increment(void)
22610 {
22611 #ifdef CONFIG_SMP
22612 return smp_num_siblings == 2 ? 2 : 1;
22613diff -urNp linux-2.6.32.42/arch/x86/pci/common.c linux-2.6.32.42/arch/x86/pci/common.c
22614--- linux-2.6.32.42/arch/x86/pci/common.c 2011-03-27 14:31:47.000000000 -0400
22615+++ linux-2.6.32.42/arch/x86/pci/common.c 2011-04-23 12:56:10.000000000 -0400
22616@@ -31,8 +31,8 @@ int noioapicreroute = 1;
22617 int pcibios_last_bus = -1;
22618 unsigned long pirq_table_addr;
22619 struct pci_bus *pci_root_bus;
22620-struct pci_raw_ops *raw_pci_ops;
22621-struct pci_raw_ops *raw_pci_ext_ops;
22622+const struct pci_raw_ops *raw_pci_ops;
22623+const struct pci_raw_ops *raw_pci_ext_ops;
22624
22625 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
22626 int reg, int len, u32 *val)
22627diff -urNp linux-2.6.32.42/arch/x86/pci/direct.c linux-2.6.32.42/arch/x86/pci/direct.c
22628--- linux-2.6.32.42/arch/x86/pci/direct.c 2011-03-27 14:31:47.000000000 -0400
22629+++ linux-2.6.32.42/arch/x86/pci/direct.c 2011-04-17 15:56:46.000000000 -0400
22630@@ -79,7 +79,7 @@ static int pci_conf1_write(unsigned int
22631
22632 #undef PCI_CONF1_ADDRESS
22633
22634-struct pci_raw_ops pci_direct_conf1 = {
22635+const struct pci_raw_ops pci_direct_conf1 = {
22636 .read = pci_conf1_read,
22637 .write = pci_conf1_write,
22638 };
22639@@ -173,7 +173,7 @@ static int pci_conf2_write(unsigned int
22640
22641 #undef PCI_CONF2_ADDRESS
22642
22643-struct pci_raw_ops pci_direct_conf2 = {
22644+const struct pci_raw_ops pci_direct_conf2 = {
22645 .read = pci_conf2_read,
22646 .write = pci_conf2_write,
22647 };
22648@@ -189,7 +189,7 @@ struct pci_raw_ops pci_direct_conf2 = {
22649 * This should be close to trivial, but it isn't, because there are buggy
22650 * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID.
22651 */
22652-static int __init pci_sanity_check(struct pci_raw_ops *o)
22653+static int __init pci_sanity_check(const struct pci_raw_ops *o)
22654 {
22655 u32 x = 0;
22656 int year, devfn;
22657diff -urNp linux-2.6.32.42/arch/x86/pci/mmconfig_32.c linux-2.6.32.42/arch/x86/pci/mmconfig_32.c
22658--- linux-2.6.32.42/arch/x86/pci/mmconfig_32.c 2011-03-27 14:31:47.000000000 -0400
22659+++ linux-2.6.32.42/arch/x86/pci/mmconfig_32.c 2011-04-17 15:56:46.000000000 -0400
22660@@ -125,7 +125,7 @@ static int pci_mmcfg_write(unsigned int
22661 return 0;
22662 }
22663
22664-static struct pci_raw_ops pci_mmcfg = {
22665+static const struct pci_raw_ops pci_mmcfg = {
22666 .read = pci_mmcfg_read,
22667 .write = pci_mmcfg_write,
22668 };
22669diff -urNp linux-2.6.32.42/arch/x86/pci/mmconfig_64.c linux-2.6.32.42/arch/x86/pci/mmconfig_64.c
22670--- linux-2.6.32.42/arch/x86/pci/mmconfig_64.c 2011-03-27 14:31:47.000000000 -0400
22671+++ linux-2.6.32.42/arch/x86/pci/mmconfig_64.c 2011-04-17 15:56:46.000000000 -0400
22672@@ -104,7 +104,7 @@ static int pci_mmcfg_write(unsigned int
22673 return 0;
22674 }
22675
22676-static struct pci_raw_ops pci_mmcfg = {
22677+static const struct pci_raw_ops pci_mmcfg = {
22678 .read = pci_mmcfg_read,
22679 .write = pci_mmcfg_write,
22680 };
22681diff -urNp linux-2.6.32.42/arch/x86/pci/numaq_32.c linux-2.6.32.42/arch/x86/pci/numaq_32.c
22682--- linux-2.6.32.42/arch/x86/pci/numaq_32.c 2011-03-27 14:31:47.000000000 -0400
22683+++ linux-2.6.32.42/arch/x86/pci/numaq_32.c 2011-04-17 15:56:46.000000000 -0400
22684@@ -112,7 +112,7 @@ static int pci_conf1_mq_write(unsigned i
22685
22686 #undef PCI_CONF1_MQ_ADDRESS
22687
22688-static struct pci_raw_ops pci_direct_conf1_mq = {
22689+static const struct pci_raw_ops pci_direct_conf1_mq = {
22690 .read = pci_conf1_mq_read,
22691 .write = pci_conf1_mq_write
22692 };
22693diff -urNp linux-2.6.32.42/arch/x86/pci/olpc.c linux-2.6.32.42/arch/x86/pci/olpc.c
22694--- linux-2.6.32.42/arch/x86/pci/olpc.c 2011-03-27 14:31:47.000000000 -0400
22695+++ linux-2.6.32.42/arch/x86/pci/olpc.c 2011-04-17 15:56:46.000000000 -0400
22696@@ -297,7 +297,7 @@ static int pci_olpc_write(unsigned int s
22697 return 0;
22698 }
22699
22700-static struct pci_raw_ops pci_olpc_conf = {
22701+static const struct pci_raw_ops pci_olpc_conf = {
22702 .read = pci_olpc_read,
22703 .write = pci_olpc_write,
22704 };
22705diff -urNp linux-2.6.32.42/arch/x86/pci/pcbios.c linux-2.6.32.42/arch/x86/pci/pcbios.c
22706--- linux-2.6.32.42/arch/x86/pci/pcbios.c 2011-03-27 14:31:47.000000000 -0400
22707+++ linux-2.6.32.42/arch/x86/pci/pcbios.c 2011-04-17 15:56:46.000000000 -0400
22708@@ -56,50 +56,93 @@ union bios32 {
22709 static struct {
22710 unsigned long address;
22711 unsigned short segment;
22712-} bios32_indirect = { 0, __KERNEL_CS };
22713+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
22714
22715 /*
22716 * Returns the entry point for the given service, NULL on error
22717 */
22718
22719-static unsigned long bios32_service(unsigned long service)
22720+static unsigned long __devinit bios32_service(unsigned long service)
22721 {
22722 unsigned char return_code; /* %al */
22723 unsigned long address; /* %ebx */
22724 unsigned long length; /* %ecx */
22725 unsigned long entry; /* %edx */
22726 unsigned long flags;
22727+ struct desc_struct d, *gdt;
22728
22729 local_irq_save(flags);
22730- __asm__("lcall *(%%edi); cld"
22731+
22732+ gdt = get_cpu_gdt_table(smp_processor_id());
22733+
22734+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
22735+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
22736+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
22737+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
22738+
22739+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
22740 : "=a" (return_code),
22741 "=b" (address),
22742 "=c" (length),
22743 "=d" (entry)
22744 : "0" (service),
22745 "1" (0),
22746- "D" (&bios32_indirect));
22747+ "D" (&bios32_indirect),
22748+ "r"(__PCIBIOS_DS)
22749+ : "memory");
22750+
22751+ pax_open_kernel();
22752+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
22753+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
22754+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
22755+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
22756+ pax_close_kernel();
22757+
22758 local_irq_restore(flags);
22759
22760 switch (return_code) {
22761- case 0:
22762- return address + entry;
22763- case 0x80: /* Not present */
22764- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
22765- return 0;
22766- default: /* Shouldn't happen */
22767- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
22768- service, return_code);
22769+ case 0: {
22770+ int cpu;
22771+ unsigned char flags;
22772+
22773+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
22774+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
22775+ printk(KERN_WARNING "bios32_service: not valid\n");
22776 return 0;
22777+ }
22778+ address = address + PAGE_OFFSET;
22779+ length += 16UL; /* some BIOSs underreport this... */
22780+ flags = 4;
22781+ if (length >= 64*1024*1024) {
22782+ length >>= PAGE_SHIFT;
22783+ flags |= 8;
22784+ }
22785+
22786+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
22787+ gdt = get_cpu_gdt_table(cpu);
22788+ pack_descriptor(&d, address, length, 0x9b, flags);
22789+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
22790+ pack_descriptor(&d, address, length, 0x93, flags);
22791+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
22792+ }
22793+ return entry;
22794+ }
22795+ case 0x80: /* Not present */
22796+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
22797+ return 0;
22798+ default: /* Shouldn't happen */
22799+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
22800+ service, return_code);
22801+ return 0;
22802 }
22803 }
22804
22805 static struct {
22806 unsigned long address;
22807 unsigned short segment;
22808-} pci_indirect = { 0, __KERNEL_CS };
22809+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
22810
22811-static int pci_bios_present;
22812+static int pci_bios_present __read_only;
22813
22814 static int __devinit check_pcibios(void)
22815 {
22816@@ -108,11 +151,13 @@ static int __devinit check_pcibios(void)
22817 unsigned long flags, pcibios_entry;
22818
22819 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
22820- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
22821+ pci_indirect.address = pcibios_entry;
22822
22823 local_irq_save(flags);
22824- __asm__(
22825- "lcall *(%%edi); cld\n\t"
22826+ __asm__("movw %w6, %%ds\n\t"
22827+ "lcall *%%ss:(%%edi); cld\n\t"
22828+ "push %%ss\n\t"
22829+ "pop %%ds\n\t"
22830 "jc 1f\n\t"
22831 "xor %%ah, %%ah\n"
22832 "1:"
22833@@ -121,7 +166,8 @@ static int __devinit check_pcibios(void)
22834 "=b" (ebx),
22835 "=c" (ecx)
22836 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
22837- "D" (&pci_indirect)
22838+ "D" (&pci_indirect),
22839+ "r" (__PCIBIOS_DS)
22840 : "memory");
22841 local_irq_restore(flags);
22842
22843@@ -165,7 +211,10 @@ static int pci_bios_read(unsigned int se
22844
22845 switch (len) {
22846 case 1:
22847- __asm__("lcall *(%%esi); cld\n\t"
22848+ __asm__("movw %w6, %%ds\n\t"
22849+ "lcall *%%ss:(%%esi); cld\n\t"
22850+ "push %%ss\n\t"
22851+ "pop %%ds\n\t"
22852 "jc 1f\n\t"
22853 "xor %%ah, %%ah\n"
22854 "1:"
22855@@ -174,7 +223,8 @@ static int pci_bios_read(unsigned int se
22856 : "1" (PCIBIOS_READ_CONFIG_BYTE),
22857 "b" (bx),
22858 "D" ((long)reg),
22859- "S" (&pci_indirect));
22860+ "S" (&pci_indirect),
22861+ "r" (__PCIBIOS_DS));
22862 /*
22863 * Zero-extend the result beyond 8 bits, do not trust the
22864 * BIOS having done it:
22865@@ -182,7 +232,10 @@ static int pci_bios_read(unsigned int se
22866 *value &= 0xff;
22867 break;
22868 case 2:
22869- __asm__("lcall *(%%esi); cld\n\t"
22870+ __asm__("movw %w6, %%ds\n\t"
22871+ "lcall *%%ss:(%%esi); cld\n\t"
22872+ "push %%ss\n\t"
22873+ "pop %%ds\n\t"
22874 "jc 1f\n\t"
22875 "xor %%ah, %%ah\n"
22876 "1:"
22877@@ -191,7 +244,8 @@ static int pci_bios_read(unsigned int se
22878 : "1" (PCIBIOS_READ_CONFIG_WORD),
22879 "b" (bx),
22880 "D" ((long)reg),
22881- "S" (&pci_indirect));
22882+ "S" (&pci_indirect),
22883+ "r" (__PCIBIOS_DS));
22884 /*
22885 * Zero-extend the result beyond 16 bits, do not trust the
22886 * BIOS having done it:
22887@@ -199,7 +253,10 @@ static int pci_bios_read(unsigned int se
22888 *value &= 0xffff;
22889 break;
22890 case 4:
22891- __asm__("lcall *(%%esi); cld\n\t"
22892+ __asm__("movw %w6, %%ds\n\t"
22893+ "lcall *%%ss:(%%esi); cld\n\t"
22894+ "push %%ss\n\t"
22895+ "pop %%ds\n\t"
22896 "jc 1f\n\t"
22897 "xor %%ah, %%ah\n"
22898 "1:"
22899@@ -208,7 +265,8 @@ static int pci_bios_read(unsigned int se
22900 : "1" (PCIBIOS_READ_CONFIG_DWORD),
22901 "b" (bx),
22902 "D" ((long)reg),
22903- "S" (&pci_indirect));
22904+ "S" (&pci_indirect),
22905+ "r" (__PCIBIOS_DS));
22906 break;
22907 }
22908
22909@@ -231,7 +289,10 @@ static int pci_bios_write(unsigned int s
22910
22911 switch (len) {
22912 case 1:
22913- __asm__("lcall *(%%esi); cld\n\t"
22914+ __asm__("movw %w6, %%ds\n\t"
22915+ "lcall *%%ss:(%%esi); cld\n\t"
22916+ "push %%ss\n\t"
22917+ "pop %%ds\n\t"
22918 "jc 1f\n\t"
22919 "xor %%ah, %%ah\n"
22920 "1:"
22921@@ -240,10 +301,14 @@ static int pci_bios_write(unsigned int s
22922 "c" (value),
22923 "b" (bx),
22924 "D" ((long)reg),
22925- "S" (&pci_indirect));
22926+ "S" (&pci_indirect),
22927+ "r" (__PCIBIOS_DS));
22928 break;
22929 case 2:
22930- __asm__("lcall *(%%esi); cld\n\t"
22931+ __asm__("movw %w6, %%ds\n\t"
22932+ "lcall *%%ss:(%%esi); cld\n\t"
22933+ "push %%ss\n\t"
22934+ "pop %%ds\n\t"
22935 "jc 1f\n\t"
22936 "xor %%ah, %%ah\n"
22937 "1:"
22938@@ -252,10 +317,14 @@ static int pci_bios_write(unsigned int s
22939 "c" (value),
22940 "b" (bx),
22941 "D" ((long)reg),
22942- "S" (&pci_indirect));
22943+ "S" (&pci_indirect),
22944+ "r" (__PCIBIOS_DS));
22945 break;
22946 case 4:
22947- __asm__("lcall *(%%esi); cld\n\t"
22948+ __asm__("movw %w6, %%ds\n\t"
22949+ "lcall *%%ss:(%%esi); cld\n\t"
22950+ "push %%ss\n\t"
22951+ "pop %%ds\n\t"
22952 "jc 1f\n\t"
22953 "xor %%ah, %%ah\n"
22954 "1:"
22955@@ -264,7 +333,8 @@ static int pci_bios_write(unsigned int s
22956 "c" (value),
22957 "b" (bx),
22958 "D" ((long)reg),
22959- "S" (&pci_indirect));
22960+ "S" (&pci_indirect),
22961+ "r" (__PCIBIOS_DS));
22962 break;
22963 }
22964
22965@@ -278,7 +348,7 @@ static int pci_bios_write(unsigned int s
22966 * Function table for BIOS32 access
22967 */
22968
22969-static struct pci_raw_ops pci_bios_access = {
22970+static const struct pci_raw_ops pci_bios_access = {
22971 .read = pci_bios_read,
22972 .write = pci_bios_write
22973 };
22974@@ -287,7 +357,7 @@ static struct pci_raw_ops pci_bios_acces
22975 * Try to find PCI BIOS.
22976 */
22977
22978-static struct pci_raw_ops * __devinit pci_find_bios(void)
22979+static const struct pci_raw_ops * __devinit pci_find_bios(void)
22980 {
22981 union bios32 *check;
22982 unsigned char sum;
22983@@ -368,10 +438,13 @@ struct irq_routing_table * pcibios_get_i
22984
22985 DBG("PCI: Fetching IRQ routing table... ");
22986 __asm__("push %%es\n\t"
22987+ "movw %w8, %%ds\n\t"
22988 "push %%ds\n\t"
22989 "pop %%es\n\t"
22990- "lcall *(%%esi); cld\n\t"
22991+ "lcall *%%ss:(%%esi); cld\n\t"
22992 "pop %%es\n\t"
22993+ "push %%ss\n\t"
22994+ "pop %%ds\n"
22995 "jc 1f\n\t"
22996 "xor %%ah, %%ah\n"
22997 "1:"
22998@@ -382,7 +455,8 @@ struct irq_routing_table * pcibios_get_i
22999 "1" (0),
23000 "D" ((long) &opt),
23001 "S" (&pci_indirect),
23002- "m" (opt)
23003+ "m" (opt),
23004+ "r" (__PCIBIOS_DS)
23005 : "memory");
23006 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
23007 if (ret & 0xff00)
23008@@ -406,7 +480,10 @@ int pcibios_set_irq_routing(struct pci_d
23009 {
23010 int ret;
23011
23012- __asm__("lcall *(%%esi); cld\n\t"
23013+ __asm__("movw %w5, %%ds\n\t"
23014+ "lcall *%%ss:(%%esi); cld\n\t"
23015+ "push %%ss\n\t"
23016+ "pop %%ds\n"
23017 "jc 1f\n\t"
23018 "xor %%ah, %%ah\n"
23019 "1:"
23020@@ -414,7 +491,8 @@ int pcibios_set_irq_routing(struct pci_d
23021 : "0" (PCIBIOS_SET_PCI_HW_INT),
23022 "b" ((dev->bus->number << 8) | dev->devfn),
23023 "c" ((irq << 8) | (pin + 10)),
23024- "S" (&pci_indirect));
23025+ "S" (&pci_indirect),
23026+ "r" (__PCIBIOS_DS));
23027 return !(ret & 0xff00);
23028 }
23029 EXPORT_SYMBOL(pcibios_set_irq_routing);
23030diff -urNp linux-2.6.32.42/arch/x86/power/cpu.c linux-2.6.32.42/arch/x86/power/cpu.c
23031--- linux-2.6.32.42/arch/x86/power/cpu.c 2011-03-27 14:31:47.000000000 -0400
23032+++ linux-2.6.32.42/arch/x86/power/cpu.c 2011-04-17 15:56:46.000000000 -0400
23033@@ -129,7 +129,7 @@ static void do_fpu_end(void)
23034 static void fix_processor_context(void)
23035 {
23036 int cpu = smp_processor_id();
23037- struct tss_struct *t = &per_cpu(init_tss, cpu);
23038+ struct tss_struct *t = init_tss + cpu;
23039
23040 set_tss_desc(cpu, t); /*
23041 * This just modifies memory; should not be
23042@@ -139,7 +139,9 @@ static void fix_processor_context(void)
23043 */
23044
23045 #ifdef CONFIG_X86_64
23046+ pax_open_kernel();
23047 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
23048+ pax_close_kernel();
23049
23050 syscall_init(); /* This sets MSR_*STAR and related */
23051 #endif
23052diff -urNp linux-2.6.32.42/arch/x86/vdso/Makefile linux-2.6.32.42/arch/x86/vdso/Makefile
23053--- linux-2.6.32.42/arch/x86/vdso/Makefile 2011-03-27 14:31:47.000000000 -0400
23054+++ linux-2.6.32.42/arch/x86/vdso/Makefile 2011-04-17 15:56:46.000000000 -0400
23055@@ -122,7 +122,7 @@ quiet_cmd_vdso = VDSO $@
23056 $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
23057 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
23058
23059-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
23060+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
23061 GCOV_PROFILE := n
23062
23063 #
23064diff -urNp linux-2.6.32.42/arch/x86/vdso/vclock_gettime.c linux-2.6.32.42/arch/x86/vdso/vclock_gettime.c
23065--- linux-2.6.32.42/arch/x86/vdso/vclock_gettime.c 2011-03-27 14:31:47.000000000 -0400
23066+++ linux-2.6.32.42/arch/x86/vdso/vclock_gettime.c 2011-04-17 15:56:46.000000000 -0400
23067@@ -22,24 +22,48 @@
23068 #include <asm/hpet.h>
23069 #include <asm/unistd.h>
23070 #include <asm/io.h>
23071+#include <asm/fixmap.h>
23072 #include "vextern.h"
23073
23074 #define gtod vdso_vsyscall_gtod_data
23075
23076+notrace noinline long __vdso_fallback_time(long *t)
23077+{
23078+ long secs;
23079+ asm volatile("syscall"
23080+ : "=a" (secs)
23081+ : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
23082+ return secs;
23083+}
23084+
23085 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
23086 {
23087 long ret;
23088 asm("syscall" : "=a" (ret) :
23089- "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
23090+ "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
23091 return ret;
23092 }
23093
23094+notrace static inline cycle_t __vdso_vread_hpet(void)
23095+{
23096+ return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
23097+}
23098+
23099+notrace static inline cycle_t __vdso_vread_tsc(void)
23100+{
23101+ cycle_t ret = (cycle_t)vget_cycles();
23102+
23103+ return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
23104+}
23105+
23106 notrace static inline long vgetns(void)
23107 {
23108 long v;
23109- cycles_t (*vread)(void);
23110- vread = gtod->clock.vread;
23111- v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
23112+ if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
23113+ v = __vdso_vread_tsc();
23114+ else
23115+ v = __vdso_vread_hpet();
23116+ v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
23117 return (v * gtod->clock.mult) >> gtod->clock.shift;
23118 }
23119
23120@@ -113,7 +137,9 @@ notrace static noinline int do_monotonic
23121
23122 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
23123 {
23124- if (likely(gtod->sysctl_enabled))
23125+ if (likely(gtod->sysctl_enabled &&
23126+ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
23127+ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
23128 switch (clock) {
23129 case CLOCK_REALTIME:
23130 if (likely(gtod->clock.vread))
23131@@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid
23132 int clock_gettime(clockid_t, struct timespec *)
23133 __attribute__((weak, alias("__vdso_clock_gettime")));
23134
23135-notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
23136+notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
23137 {
23138 long ret;
23139- if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
23140+ asm("syscall" : "=a" (ret) :
23141+ "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
23142+ return ret;
23143+}
23144+
23145+notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
23146+{
23147+ if (likely(gtod->sysctl_enabled &&
23148+ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
23149+ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
23150+ {
23151 if (likely(tv != NULL)) {
23152 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
23153 offsetof(struct timespec, tv_nsec) ||
23154@@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct t
23155 }
23156 return 0;
23157 }
23158- asm("syscall" : "=a" (ret) :
23159- "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
23160- return ret;
23161+ return __vdso_fallback_gettimeofday(tv, tz);
23162 }
23163 int gettimeofday(struct timeval *, struct timezone *)
23164 __attribute__((weak, alias("__vdso_gettimeofday")));
23165diff -urNp linux-2.6.32.42/arch/x86/vdso/vdso32-setup.c linux-2.6.32.42/arch/x86/vdso/vdso32-setup.c
23166--- linux-2.6.32.42/arch/x86/vdso/vdso32-setup.c 2011-03-27 14:31:47.000000000 -0400
23167+++ linux-2.6.32.42/arch/x86/vdso/vdso32-setup.c 2011-04-23 12:56:10.000000000 -0400
23168@@ -25,6 +25,7 @@
23169 #include <asm/tlbflush.h>
23170 #include <asm/vdso.h>
23171 #include <asm/proto.h>
23172+#include <asm/mman.h>
23173
23174 enum {
23175 VDSO_DISABLED = 0,
23176@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
23177 void enable_sep_cpu(void)
23178 {
23179 int cpu = get_cpu();
23180- struct tss_struct *tss = &per_cpu(init_tss, cpu);
23181+ struct tss_struct *tss = init_tss + cpu;
23182
23183 if (!boot_cpu_has(X86_FEATURE_SEP)) {
23184 put_cpu();
23185@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
23186 gate_vma.vm_start = FIXADDR_USER_START;
23187 gate_vma.vm_end = FIXADDR_USER_END;
23188 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
23189- gate_vma.vm_page_prot = __P101;
23190+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
23191 /*
23192 * Make sure the vDSO gets into every core dump.
23193 * Dumping its contents makes post-mortem fully interpretable later
23194@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
23195 if (compat)
23196 addr = VDSO_HIGH_BASE;
23197 else {
23198- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
23199+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
23200 if (IS_ERR_VALUE(addr)) {
23201 ret = addr;
23202 goto up_fail;
23203 }
23204 }
23205
23206- current->mm->context.vdso = (void *)addr;
23207+ current->mm->context.vdso = addr;
23208
23209 if (compat_uses_vma || !compat) {
23210 /*
23211@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
23212 }
23213
23214 current_thread_info()->sysenter_return =
23215- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
23216+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
23217
23218 up_fail:
23219 if (ret)
23220- current->mm->context.vdso = NULL;
23221+ current->mm->context.vdso = 0;
23222
23223 up_write(&mm->mmap_sem);
23224
23225@@ -413,8 +414,14 @@ __initcall(ia32_binfmt_init);
23226
23227 const char *arch_vma_name(struct vm_area_struct *vma)
23228 {
23229- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
23230+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
23231 return "[vdso]";
23232+
23233+#ifdef CONFIG_PAX_SEGMEXEC
23234+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
23235+ return "[vdso]";
23236+#endif
23237+
23238 return NULL;
23239 }
23240
23241@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
23242 struct mm_struct *mm = tsk->mm;
23243
23244 /* Check to see if this task was created in compat vdso mode */
23245- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
23246+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
23247 return &gate_vma;
23248 return NULL;
23249 }
23250diff -urNp linux-2.6.32.42/arch/x86/vdso/vdso.lds.S linux-2.6.32.42/arch/x86/vdso/vdso.lds.S
23251--- linux-2.6.32.42/arch/x86/vdso/vdso.lds.S 2011-03-27 14:31:47.000000000 -0400
23252+++ linux-2.6.32.42/arch/x86/vdso/vdso.lds.S 2011-06-06 17:35:35.000000000 -0400
23253@@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
23254 #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
23255 #include "vextern.h"
23256 #undef VEXTERN
23257+
23258+#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
23259+VEXTERN(fallback_gettimeofday)
23260+VEXTERN(fallback_time)
23261+VEXTERN(getcpu)
23262+#undef VEXTERN
23263diff -urNp linux-2.6.32.42/arch/x86/vdso/vextern.h linux-2.6.32.42/arch/x86/vdso/vextern.h
23264--- linux-2.6.32.42/arch/x86/vdso/vextern.h 2011-03-27 14:31:47.000000000 -0400
23265+++ linux-2.6.32.42/arch/x86/vdso/vextern.h 2011-04-17 15:56:46.000000000 -0400
23266@@ -11,6 +11,5 @@
23267 put into vextern.h and be referenced as a pointer with vdso prefix.
23268 The main kernel later fills in the values. */
23269
23270-VEXTERN(jiffies)
23271 VEXTERN(vgetcpu_mode)
23272 VEXTERN(vsyscall_gtod_data)
23273diff -urNp linux-2.6.32.42/arch/x86/vdso/vma.c linux-2.6.32.42/arch/x86/vdso/vma.c
23274--- linux-2.6.32.42/arch/x86/vdso/vma.c 2011-03-27 14:31:47.000000000 -0400
23275+++ linux-2.6.32.42/arch/x86/vdso/vma.c 2011-04-17 15:56:46.000000000 -0400
23276@@ -57,7 +57,7 @@ static int __init init_vdso_vars(void)
23277 if (!vbase)
23278 goto oom;
23279
23280- if (memcmp(vbase, "\177ELF", 4)) {
23281+ if (memcmp(vbase, ELFMAG, SELFMAG)) {
23282 printk("VDSO: I'm broken; not ELF\n");
23283 vdso_enabled = 0;
23284 }
23285@@ -66,6 +66,7 @@ static int __init init_vdso_vars(void)
23286 *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
23287 #include "vextern.h"
23288 #undef VEXTERN
23289+ vunmap(vbase);
23290 return 0;
23291
23292 oom:
23293@@ -116,7 +117,7 @@ int arch_setup_additional_pages(struct l
23294 goto up_fail;
23295 }
23296
23297- current->mm->context.vdso = (void *)addr;
23298+ current->mm->context.vdso = addr;
23299
23300 ret = install_special_mapping(mm, addr, vdso_size,
23301 VM_READ|VM_EXEC|
23302@@ -124,7 +125,7 @@ int arch_setup_additional_pages(struct l
23303 VM_ALWAYSDUMP,
23304 vdso_pages);
23305 if (ret) {
23306- current->mm->context.vdso = NULL;
23307+ current->mm->context.vdso = 0;
23308 goto up_fail;
23309 }
23310
23311@@ -132,10 +133,3 @@ up_fail:
23312 up_write(&mm->mmap_sem);
23313 return ret;
23314 }
23315-
23316-static __init int vdso_setup(char *s)
23317-{
23318- vdso_enabled = simple_strtoul(s, NULL, 0);
23319- return 0;
23320-}
23321-__setup("vdso=", vdso_setup);
23322diff -urNp linux-2.6.32.42/arch/x86/xen/enlighten.c linux-2.6.32.42/arch/x86/xen/enlighten.c
23323--- linux-2.6.32.42/arch/x86/xen/enlighten.c 2011-03-27 14:31:47.000000000 -0400
23324+++ linux-2.6.32.42/arch/x86/xen/enlighten.c 2011-05-22 23:02:03.000000000 -0400
23325@@ -71,8 +71,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
23326
23327 struct shared_info xen_dummy_shared_info;
23328
23329-void *xen_initial_gdt;
23330-
23331 /*
23332 * Point at some empty memory to start with. We map the real shared_info
23333 * page as soon as fixmap is up and running.
23334@@ -548,7 +546,7 @@ static void xen_write_idt_entry(gate_des
23335
23336 preempt_disable();
23337
23338- start = __get_cpu_var(idt_desc).address;
23339+ start = (unsigned long)__get_cpu_var(idt_desc).address;
23340 end = start + __get_cpu_var(idt_desc).size + 1;
23341
23342 xen_mc_flush();
23343@@ -993,7 +991,7 @@ static const struct pv_apic_ops xen_apic
23344 #endif
23345 };
23346
23347-static void xen_reboot(int reason)
23348+static __noreturn void xen_reboot(int reason)
23349 {
23350 struct sched_shutdown r = { .reason = reason };
23351
23352@@ -1001,17 +999,17 @@ static void xen_reboot(int reason)
23353 BUG();
23354 }
23355
23356-static void xen_restart(char *msg)
23357+static __noreturn void xen_restart(char *msg)
23358 {
23359 xen_reboot(SHUTDOWN_reboot);
23360 }
23361
23362-static void xen_emergency_restart(void)
23363+static __noreturn void xen_emergency_restart(void)
23364 {
23365 xen_reboot(SHUTDOWN_reboot);
23366 }
23367
23368-static void xen_machine_halt(void)
23369+static __noreturn void xen_machine_halt(void)
23370 {
23371 xen_reboot(SHUTDOWN_poweroff);
23372 }
23373@@ -1095,9 +1093,20 @@ asmlinkage void __init xen_start_kernel(
23374 */
23375 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
23376
23377-#ifdef CONFIG_X86_64
23378 /* Work out if we support NX */
23379- check_efer();
23380+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23381+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
23382+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
23383+ unsigned l, h;
23384+
23385+#ifdef CONFIG_X86_PAE
23386+ nx_enabled = 1;
23387+#endif
23388+ __supported_pte_mask |= _PAGE_NX;
23389+ rdmsr(MSR_EFER, l, h);
23390+ l |= EFER_NX;
23391+ wrmsr(MSR_EFER, l, h);
23392+ }
23393 #endif
23394
23395 xen_setup_features();
23396@@ -1129,13 +1138,6 @@ asmlinkage void __init xen_start_kernel(
23397
23398 machine_ops = xen_machine_ops;
23399
23400- /*
23401- * The only reliable way to retain the initial address of the
23402- * percpu gdt_page is to remember it here, so we can go and
23403- * mark it RW later, when the initial percpu area is freed.
23404- */
23405- xen_initial_gdt = &per_cpu(gdt_page, 0);
23406-
23407 xen_smp_init();
23408
23409 pgd = (pgd_t *)xen_start_info->pt_base;
23410diff -urNp linux-2.6.32.42/arch/x86/xen/mmu.c linux-2.6.32.42/arch/x86/xen/mmu.c
23411--- linux-2.6.32.42/arch/x86/xen/mmu.c 2011-06-25 12:55:34.000000000 -0400
23412+++ linux-2.6.32.42/arch/x86/xen/mmu.c 2011-06-25 12:56:37.000000000 -0400
23413@@ -1714,6 +1714,8 @@ __init pgd_t *xen_setup_kernel_pagetable
23414 convert_pfn_mfn(init_level4_pgt);
23415 convert_pfn_mfn(level3_ident_pgt);
23416 convert_pfn_mfn(level3_kernel_pgt);
23417+ convert_pfn_mfn(level3_vmalloc_pgt);
23418+ convert_pfn_mfn(level3_vmemmap_pgt);
23419
23420 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
23421 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
23422@@ -1732,7 +1734,10 @@ __init pgd_t *xen_setup_kernel_pagetable
23423 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
23424 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
23425 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
23426+ set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
23427+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
23428 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
23429+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
23430 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
23431 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
23432
23433diff -urNp linux-2.6.32.42/arch/x86/xen/smp.c linux-2.6.32.42/arch/x86/xen/smp.c
23434--- linux-2.6.32.42/arch/x86/xen/smp.c 2011-03-27 14:31:47.000000000 -0400
23435+++ linux-2.6.32.42/arch/x86/xen/smp.c 2011-05-11 18:25:15.000000000 -0400
23436@@ -167,11 +167,6 @@ static void __init xen_smp_prepare_boot_
23437 {
23438 BUG_ON(smp_processor_id() != 0);
23439 native_smp_prepare_boot_cpu();
23440-
23441- /* We've switched to the "real" per-cpu gdt, so make sure the
23442- old memory can be recycled */
23443- make_lowmem_page_readwrite(xen_initial_gdt);
23444-
23445 xen_setup_vcpu_info_placement();
23446 }
23447
23448@@ -231,12 +226,12 @@ cpu_initialize_context(unsigned int cpu,
23449 gdt = get_cpu_gdt_table(cpu);
23450
23451 ctxt->flags = VGCF_IN_KERNEL;
23452- ctxt->user_regs.ds = __USER_DS;
23453- ctxt->user_regs.es = __USER_DS;
23454+ ctxt->user_regs.ds = __KERNEL_DS;
23455+ ctxt->user_regs.es = __KERNEL_DS;
23456 ctxt->user_regs.ss = __KERNEL_DS;
23457 #ifdef CONFIG_X86_32
23458 ctxt->user_regs.fs = __KERNEL_PERCPU;
23459- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
23460+ savesegment(gs, ctxt->user_regs.gs);
23461 #else
23462 ctxt->gs_base_kernel = per_cpu_offset(cpu);
23463 #endif
23464@@ -287,13 +282,12 @@ static int __cpuinit xen_cpu_up(unsigned
23465 int rc;
23466
23467 per_cpu(current_task, cpu) = idle;
23468+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
23469 #ifdef CONFIG_X86_32
23470 irq_ctx_init(cpu);
23471 #else
23472 clear_tsk_thread_flag(idle, TIF_FORK);
23473- per_cpu(kernel_stack, cpu) =
23474- (unsigned long)task_stack_page(idle) -
23475- KERNEL_STACK_OFFSET + THREAD_SIZE;
23476+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
23477 #endif
23478 xen_setup_runstate_info(cpu);
23479 xen_setup_timer(cpu);
23480diff -urNp linux-2.6.32.42/arch/x86/xen/xen-asm_32.S linux-2.6.32.42/arch/x86/xen/xen-asm_32.S
23481--- linux-2.6.32.42/arch/x86/xen/xen-asm_32.S 2011-03-27 14:31:47.000000000 -0400
23482+++ linux-2.6.32.42/arch/x86/xen/xen-asm_32.S 2011-04-22 19:13:13.000000000 -0400
23483@@ -83,14 +83,14 @@ ENTRY(xen_iret)
23484 ESP_OFFSET=4 # bytes pushed onto stack
23485
23486 /*
23487- * Store vcpu_info pointer for easy access. Do it this way to
23488- * avoid having to reload %fs
23489+ * Store vcpu_info pointer for easy access.
23490 */
23491 #ifdef CONFIG_SMP
23492- GET_THREAD_INFO(%eax)
23493- movl TI_cpu(%eax), %eax
23494- movl __per_cpu_offset(,%eax,4), %eax
23495- mov per_cpu__xen_vcpu(%eax), %eax
23496+ push %fs
23497+ mov $(__KERNEL_PERCPU), %eax
23498+ mov %eax, %fs
23499+ mov PER_CPU_VAR(xen_vcpu), %eax
23500+ pop %fs
23501 #else
23502 movl per_cpu__xen_vcpu, %eax
23503 #endif
23504diff -urNp linux-2.6.32.42/arch/x86/xen/xen-head.S linux-2.6.32.42/arch/x86/xen/xen-head.S
23505--- linux-2.6.32.42/arch/x86/xen/xen-head.S 2011-03-27 14:31:47.000000000 -0400
23506+++ linux-2.6.32.42/arch/x86/xen/xen-head.S 2011-04-17 15:56:46.000000000 -0400
23507@@ -19,6 +19,17 @@ ENTRY(startup_xen)
23508 #ifdef CONFIG_X86_32
23509 mov %esi,xen_start_info
23510 mov $init_thread_union+THREAD_SIZE,%esp
23511+#ifdef CONFIG_SMP
23512+ movl $cpu_gdt_table,%edi
23513+ movl $__per_cpu_load,%eax
23514+ movw %ax,__KERNEL_PERCPU + 2(%edi)
23515+ rorl $16,%eax
23516+ movb %al,__KERNEL_PERCPU + 4(%edi)
23517+ movb %ah,__KERNEL_PERCPU + 7(%edi)
23518+ movl $__per_cpu_end - 1,%eax
23519+ subl $__per_cpu_start,%eax
23520+ movw %ax,__KERNEL_PERCPU + 0(%edi)
23521+#endif
23522 #else
23523 mov %rsi,xen_start_info
23524 mov $init_thread_union+THREAD_SIZE,%rsp
23525diff -urNp linux-2.6.32.42/arch/x86/xen/xen-ops.h linux-2.6.32.42/arch/x86/xen/xen-ops.h
23526--- linux-2.6.32.42/arch/x86/xen/xen-ops.h 2011-03-27 14:31:47.000000000 -0400
23527+++ linux-2.6.32.42/arch/x86/xen/xen-ops.h 2011-04-17 15:56:46.000000000 -0400
23528@@ -10,8 +10,6 @@
23529 extern const char xen_hypervisor_callback[];
23530 extern const char xen_failsafe_callback[];
23531
23532-extern void *xen_initial_gdt;
23533-
23534 struct trap_info;
23535 void xen_copy_trap_info(struct trap_info *traps);
23536
23537diff -urNp linux-2.6.32.42/block/blk-integrity.c linux-2.6.32.42/block/blk-integrity.c
23538--- linux-2.6.32.42/block/blk-integrity.c 2011-03-27 14:31:47.000000000 -0400
23539+++ linux-2.6.32.42/block/blk-integrity.c 2011-04-17 15:56:46.000000000 -0400
23540@@ -278,7 +278,7 @@ static struct attribute *integrity_attrs
23541 NULL,
23542 };
23543
23544-static struct sysfs_ops integrity_ops = {
23545+static const struct sysfs_ops integrity_ops = {
23546 .show = &integrity_attr_show,
23547 .store = &integrity_attr_store,
23548 };
23549diff -urNp linux-2.6.32.42/block/blk-iopoll.c linux-2.6.32.42/block/blk-iopoll.c
23550--- linux-2.6.32.42/block/blk-iopoll.c 2011-03-27 14:31:47.000000000 -0400
23551+++ linux-2.6.32.42/block/blk-iopoll.c 2011-04-17 15:56:46.000000000 -0400
23552@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
23553 }
23554 EXPORT_SYMBOL(blk_iopoll_complete);
23555
23556-static void blk_iopoll_softirq(struct softirq_action *h)
23557+static void blk_iopoll_softirq(void)
23558 {
23559 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
23560 int rearm = 0, budget = blk_iopoll_budget;
23561diff -urNp linux-2.6.32.42/block/blk-map.c linux-2.6.32.42/block/blk-map.c
23562--- linux-2.6.32.42/block/blk-map.c 2011-03-27 14:31:47.000000000 -0400
23563+++ linux-2.6.32.42/block/blk-map.c 2011-04-18 16:57:33.000000000 -0400
23564@@ -54,7 +54,7 @@ static int __blk_rq_map_user(struct requ
23565 * direct dma. else, set up kernel bounce buffers
23566 */
23567 uaddr = (unsigned long) ubuf;
23568- if (blk_rq_aligned(q, ubuf, len) && !map_data)
23569+ if (blk_rq_aligned(q, (__force void *)ubuf, len) && !map_data)
23570 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
23571 else
23572 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
23573@@ -201,12 +201,13 @@ int blk_rq_map_user_iov(struct request_q
23574 for (i = 0; i < iov_count; i++) {
23575 unsigned long uaddr = (unsigned long)iov[i].iov_base;
23576
23577+ if (!iov[i].iov_len)
23578+ return -EINVAL;
23579+
23580 if (uaddr & queue_dma_alignment(q)) {
23581 unaligned = 1;
23582 break;
23583 }
23584- if (!iov[i].iov_len)
23585- return -EINVAL;
23586 }
23587
23588 if (unaligned || (q->dma_pad_mask & len) || map_data)
23589@@ -299,7 +300,7 @@ int blk_rq_map_kern(struct request_queue
23590 if (!len || !kbuf)
23591 return -EINVAL;
23592
23593- do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
23594+ do_copy = !blk_rq_aligned(q, kbuf, len) || object_starts_on_stack(kbuf);
23595 if (do_copy)
23596 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
23597 else
23598diff -urNp linux-2.6.32.42/block/blk-softirq.c linux-2.6.32.42/block/blk-softirq.c
23599--- linux-2.6.32.42/block/blk-softirq.c 2011-03-27 14:31:47.000000000 -0400
23600+++ linux-2.6.32.42/block/blk-softirq.c 2011-04-17 15:56:46.000000000 -0400
23601@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
23602 * Softirq action handler - move entries to local list and loop over them
23603 * while passing them to the queue registered handler.
23604 */
23605-static void blk_done_softirq(struct softirq_action *h)
23606+static void blk_done_softirq(void)
23607 {
23608 struct list_head *cpu_list, local_list;
23609
23610diff -urNp linux-2.6.32.42/block/blk-sysfs.c linux-2.6.32.42/block/blk-sysfs.c
23611--- linux-2.6.32.42/block/blk-sysfs.c 2011-05-10 22:12:01.000000000 -0400
23612+++ linux-2.6.32.42/block/blk-sysfs.c 2011-05-10 22:12:26.000000000 -0400
23613@@ -414,7 +414,7 @@ static void blk_release_queue(struct kob
23614 kmem_cache_free(blk_requestq_cachep, q);
23615 }
23616
23617-static struct sysfs_ops queue_sysfs_ops = {
23618+static const struct sysfs_ops queue_sysfs_ops = {
23619 .show = queue_attr_show,
23620 .store = queue_attr_store,
23621 };
23622diff -urNp linux-2.6.32.42/block/bsg.c linux-2.6.32.42/block/bsg.c
23623--- linux-2.6.32.42/block/bsg.c 2011-03-27 14:31:47.000000000 -0400
23624+++ linux-2.6.32.42/block/bsg.c 2011-04-17 15:56:46.000000000 -0400
23625@@ -175,16 +175,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
23626 struct sg_io_v4 *hdr, struct bsg_device *bd,
23627 fmode_t has_write_perm)
23628 {
23629+ unsigned char tmpcmd[sizeof(rq->__cmd)];
23630+ unsigned char *cmdptr;
23631+
23632 if (hdr->request_len > BLK_MAX_CDB) {
23633 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
23634 if (!rq->cmd)
23635 return -ENOMEM;
23636- }
23637+ cmdptr = rq->cmd;
23638+ } else
23639+ cmdptr = tmpcmd;
23640
23641- if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
23642+ if (copy_from_user(cmdptr, (void *)(unsigned long)hdr->request,
23643 hdr->request_len))
23644 return -EFAULT;
23645
23646+ if (cmdptr != rq->cmd)
23647+ memcpy(rq->cmd, cmdptr, hdr->request_len);
23648+
23649 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
23650 if (blk_verify_command(rq->cmd, has_write_perm))
23651 return -EPERM;
23652diff -urNp linux-2.6.32.42/block/elevator.c linux-2.6.32.42/block/elevator.c
23653--- linux-2.6.32.42/block/elevator.c 2011-03-27 14:31:47.000000000 -0400
23654+++ linux-2.6.32.42/block/elevator.c 2011-04-17 15:56:46.000000000 -0400
23655@@ -889,7 +889,7 @@ elv_attr_store(struct kobject *kobj, str
23656 return error;
23657 }
23658
23659-static struct sysfs_ops elv_sysfs_ops = {
23660+static const struct sysfs_ops elv_sysfs_ops = {
23661 .show = elv_attr_show,
23662 .store = elv_attr_store,
23663 };
23664diff -urNp linux-2.6.32.42/block/scsi_ioctl.c linux-2.6.32.42/block/scsi_ioctl.c
23665--- linux-2.6.32.42/block/scsi_ioctl.c 2011-03-27 14:31:47.000000000 -0400
23666+++ linux-2.6.32.42/block/scsi_ioctl.c 2011-04-23 13:28:22.000000000 -0400
23667@@ -220,8 +220,20 @@ EXPORT_SYMBOL(blk_verify_command);
23668 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
23669 struct sg_io_hdr *hdr, fmode_t mode)
23670 {
23671- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
23672+ unsigned char tmpcmd[sizeof(rq->__cmd)];
23673+ unsigned char *cmdptr;
23674+
23675+ if (rq->cmd != rq->__cmd)
23676+ cmdptr = rq->cmd;
23677+ else
23678+ cmdptr = tmpcmd;
23679+
23680+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
23681 return -EFAULT;
23682+
23683+ if (cmdptr != rq->cmd)
23684+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
23685+
23686 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
23687 return -EPERM;
23688
23689@@ -430,6 +442,8 @@ int sg_scsi_ioctl(struct request_queue *
23690 int err;
23691 unsigned int in_len, out_len, bytes, opcode, cmdlen;
23692 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
23693+ unsigned char tmpcmd[sizeof(rq->__cmd)];
23694+ unsigned char *cmdptr;
23695
23696 if (!sic)
23697 return -EINVAL;
23698@@ -463,9 +477,18 @@ int sg_scsi_ioctl(struct request_queue *
23699 */
23700 err = -EFAULT;
23701 rq->cmd_len = cmdlen;
23702- if (copy_from_user(rq->cmd, sic->data, cmdlen))
23703+
23704+ if (rq->cmd != rq->__cmd)
23705+ cmdptr = rq->cmd;
23706+ else
23707+ cmdptr = tmpcmd;
23708+
23709+ if (copy_from_user(cmdptr, sic->data, cmdlen))
23710 goto error;
23711
23712+ if (rq->cmd != cmdptr)
23713+ memcpy(rq->cmd, cmdptr, cmdlen);
23714+
23715 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
23716 goto error;
23717
23718diff -urNp linux-2.6.32.42/crypto/gf128mul.c linux-2.6.32.42/crypto/gf128mul.c
23719--- linux-2.6.32.42/crypto/gf128mul.c 2011-03-27 14:31:47.000000000 -0400
23720+++ linux-2.6.32.42/crypto/gf128mul.c 2011-07-06 19:53:33.000000000 -0400
23721@@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128
23722 for (i = 0; i < 7; ++i)
23723 gf128mul_x_lle(&p[i + 1], &p[i]);
23724
23725- memset(r, 0, sizeof(r));
23726+ memset(r, 0, sizeof(*r));
23727 for (i = 0;;) {
23728 u8 ch = ((u8 *)b)[15 - i];
23729
23730@@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128
23731 for (i = 0; i < 7; ++i)
23732 gf128mul_x_bbe(&p[i + 1], &p[i]);
23733
23734- memset(r, 0, sizeof(r));
23735+ memset(r, 0, sizeof(*r));
23736 for (i = 0;;) {
23737 u8 ch = ((u8 *)b)[i];
23738
23739diff -urNp linux-2.6.32.42/crypto/serpent.c linux-2.6.32.42/crypto/serpent.c
23740--- linux-2.6.32.42/crypto/serpent.c 2011-03-27 14:31:47.000000000 -0400
23741+++ linux-2.6.32.42/crypto/serpent.c 2011-05-16 21:46:57.000000000 -0400
23742@@ -224,6 +224,8 @@ static int serpent_setkey(struct crypto_
23743 u32 r0,r1,r2,r3,r4;
23744 int i;
23745
23746+ pax_track_stack();
23747+
23748 /* Copy key, add padding */
23749
23750 for (i = 0; i < keylen; ++i)
23751diff -urNp linux-2.6.32.42/Documentation/dontdiff linux-2.6.32.42/Documentation/dontdiff
23752--- linux-2.6.32.42/Documentation/dontdiff 2011-03-27 14:31:47.000000000 -0400
23753+++ linux-2.6.32.42/Documentation/dontdiff 2011-05-18 20:09:36.000000000 -0400
23754@@ -1,13 +1,16 @@
23755 *.a
23756 *.aux
23757 *.bin
23758+*.cis
23759 *.cpio
23760 *.csp
23761+*.dbg
23762 *.dsp
23763 *.dvi
23764 *.elf
23765 *.eps
23766 *.fw
23767+*.gcno
23768 *.gen.S
23769 *.gif
23770 *.grep
23771@@ -38,8 +41,10 @@
23772 *.tab.h
23773 *.tex
23774 *.ver
23775+*.vim
23776 *.xml
23777 *_MODULES
23778+*_reg_safe.h
23779 *_vga16.c
23780 *~
23781 *.9
23782@@ -49,11 +54,16 @@
23783 53c700_d.h
23784 CVS
23785 ChangeSet
23786+GPATH
23787+GRTAGS
23788+GSYMS
23789+GTAGS
23790 Image
23791 Kerntypes
23792 Module.markers
23793 Module.symvers
23794 PENDING
23795+PERF*
23796 SCCS
23797 System.map*
23798 TAGS
23799@@ -76,7 +86,11 @@ btfixupprep
23800 build
23801 bvmlinux
23802 bzImage*
23803+capability_names.h
23804+capflags.c
23805 classlist.h*
23806+clut_vga16.c
23807+common-cmds.h
23808 comp*.log
23809 compile.h*
23810 conf
23811@@ -103,13 +117,14 @@ gen_crc32table
23812 gen_init_cpio
23813 genksyms
23814 *_gray256.c
23815+hash
23816 ihex2fw
23817 ikconfig.h*
23818 initramfs_data.cpio
23819+initramfs_data.cpio.bz2
23820 initramfs_data.cpio.gz
23821 initramfs_list
23822 kallsyms
23823-kconfig
23824 keywords.c
23825 ksym.c*
23826 ksym.h*
23827@@ -133,7 +148,9 @@ mkboot
23828 mkbugboot
23829 mkcpustr
23830 mkdep
23831+mkpiggy
23832 mkprep
23833+mkregtable
23834 mktables
23835 mktree
23836 modpost
23837@@ -149,6 +166,7 @@ patches*
23838 pca200e.bin
23839 pca200e_ecd.bin2
23840 piggy.gz
23841+piggy.S
23842 piggyback
23843 pnmtologo
23844 ppc_defs.h*
23845@@ -157,12 +175,15 @@ qconf
23846 raid6altivec*.c
23847 raid6int*.c
23848 raid6tables.c
23849+regdb.c
23850 relocs
23851+rlim_names.h
23852 series
23853 setup
23854 setup.bin
23855 setup.elf
23856 sImage
23857+slabinfo
23858 sm_tbl*
23859 split-include
23860 syscalltab.h
23861@@ -186,14 +207,20 @@ version.h*
23862 vmlinux
23863 vmlinux-*
23864 vmlinux.aout
23865+vmlinux.bin.all
23866+vmlinux.bin.bz2
23867 vmlinux.lds
23868+vmlinux.relocs
23869+voffset.h
23870 vsyscall.lds
23871 vsyscall_32.lds
23872 wanxlfw.inc
23873 uImage
23874 unifdef
23875+utsrelease.h
23876 wakeup.bin
23877 wakeup.elf
23878 wakeup.lds
23879 zImage*
23880 zconf.hash.c
23881+zoffset.h
23882diff -urNp linux-2.6.32.42/Documentation/kernel-parameters.txt linux-2.6.32.42/Documentation/kernel-parameters.txt
23883--- linux-2.6.32.42/Documentation/kernel-parameters.txt 2011-03-27 14:31:47.000000000 -0400
23884+++ linux-2.6.32.42/Documentation/kernel-parameters.txt 2011-04-17 15:56:45.000000000 -0400
23885@@ -1837,6 +1837,13 @@ and is between 256 and 4096 characters.
23886 the specified number of seconds. This is to be used if
23887 your oopses keep scrolling off the screen.
23888
23889+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
23890+ virtualization environments that don't cope well with the
23891+ expand down segment used by UDEREF on X86-32 or the frequent
23892+ page table updates on X86-64.
23893+
23894+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
23895+
23896 pcbit= [HW,ISDN]
23897
23898 pcd. [PARIDE]
23899diff -urNp linux-2.6.32.42/drivers/acpi/acpi_pad.c linux-2.6.32.42/drivers/acpi/acpi_pad.c
23900--- linux-2.6.32.42/drivers/acpi/acpi_pad.c 2011-03-27 14:31:47.000000000 -0400
23901+++ linux-2.6.32.42/drivers/acpi/acpi_pad.c 2011-04-17 15:56:46.000000000 -0400
23902@@ -30,7 +30,7 @@
23903 #include <acpi/acpi_bus.h>
23904 #include <acpi/acpi_drivers.h>
23905
23906-#define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator"
23907+#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
23908 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
23909 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
23910 static DEFINE_MUTEX(isolated_cpus_lock);
23911diff -urNp linux-2.6.32.42/drivers/acpi/battery.c linux-2.6.32.42/drivers/acpi/battery.c
23912--- linux-2.6.32.42/drivers/acpi/battery.c 2011-03-27 14:31:47.000000000 -0400
23913+++ linux-2.6.32.42/drivers/acpi/battery.c 2011-04-17 15:56:46.000000000 -0400
23914@@ -763,7 +763,7 @@ DECLARE_FILE_FUNCTIONS(alarm);
23915 }
23916
23917 static struct battery_file {
23918- struct file_operations ops;
23919+ const struct file_operations ops;
23920 mode_t mode;
23921 const char *name;
23922 } acpi_battery_file[] = {
23923diff -urNp linux-2.6.32.42/drivers/acpi/dock.c linux-2.6.32.42/drivers/acpi/dock.c
23924--- linux-2.6.32.42/drivers/acpi/dock.c 2011-03-27 14:31:47.000000000 -0400
23925+++ linux-2.6.32.42/drivers/acpi/dock.c 2011-04-17 15:56:46.000000000 -0400
23926@@ -77,7 +77,7 @@ struct dock_dependent_device {
23927 struct list_head list;
23928 struct list_head hotplug_list;
23929 acpi_handle handle;
23930- struct acpi_dock_ops *ops;
23931+ const struct acpi_dock_ops *ops;
23932 void *context;
23933 };
23934
23935@@ -605,7 +605,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifi
23936 * the dock driver after _DCK is executed.
23937 */
23938 int
23939-register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
23940+register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
23941 void *context)
23942 {
23943 struct dock_dependent_device *dd;
23944diff -urNp linux-2.6.32.42/drivers/acpi/osl.c linux-2.6.32.42/drivers/acpi/osl.c
23945--- linux-2.6.32.42/drivers/acpi/osl.c 2011-03-27 14:31:47.000000000 -0400
23946+++ linux-2.6.32.42/drivers/acpi/osl.c 2011-04-17 15:56:46.000000000 -0400
23947@@ -523,6 +523,8 @@ acpi_os_read_memory(acpi_physical_addres
23948 void __iomem *virt_addr;
23949
23950 virt_addr = ioremap(phys_addr, width);
23951+ if (!virt_addr)
23952+ return AE_NO_MEMORY;
23953 if (!value)
23954 value = &dummy;
23955
23956@@ -551,6 +553,8 @@ acpi_os_write_memory(acpi_physical_addre
23957 void __iomem *virt_addr;
23958
23959 virt_addr = ioremap(phys_addr, width);
23960+ if (!virt_addr)
23961+ return AE_NO_MEMORY;
23962
23963 switch (width) {
23964 case 8:
23965diff -urNp linux-2.6.32.42/drivers/acpi/power_meter.c linux-2.6.32.42/drivers/acpi/power_meter.c
23966--- linux-2.6.32.42/drivers/acpi/power_meter.c 2011-03-27 14:31:47.000000000 -0400
23967+++ linux-2.6.32.42/drivers/acpi/power_meter.c 2011-04-17 15:56:46.000000000 -0400
23968@@ -315,8 +315,6 @@ static ssize_t set_trip(struct device *d
23969 return res;
23970
23971 temp /= 1000;
23972- if (temp < 0)
23973- return -EINVAL;
23974
23975 mutex_lock(&resource->lock);
23976 resource->trip[attr->index - 7] = temp;
23977diff -urNp linux-2.6.32.42/drivers/acpi/proc.c linux-2.6.32.42/drivers/acpi/proc.c
23978--- linux-2.6.32.42/drivers/acpi/proc.c 2011-03-27 14:31:47.000000000 -0400
23979+++ linux-2.6.32.42/drivers/acpi/proc.c 2011-04-17 15:56:46.000000000 -0400
23980@@ -391,20 +391,15 @@ acpi_system_write_wakeup_device(struct f
23981 size_t count, loff_t * ppos)
23982 {
23983 struct list_head *node, *next;
23984- char strbuf[5];
23985- char str[5] = "";
23986- unsigned int len = count;
23987+ char strbuf[5] = {0};
23988 struct acpi_device *found_dev = NULL;
23989
23990- if (len > 4)
23991- len = 4;
23992- if (len < 0)
23993- return -EFAULT;
23994+ if (count > 4)
23995+ count = 4;
23996
23997- if (copy_from_user(strbuf, buffer, len))
23998+ if (copy_from_user(strbuf, buffer, count))
23999 return -EFAULT;
24000- strbuf[len] = '\0';
24001- sscanf(strbuf, "%s", str);
24002+ strbuf[count] = '\0';
24003
24004 mutex_lock(&acpi_device_lock);
24005 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
24006@@ -413,7 +408,7 @@ acpi_system_write_wakeup_device(struct f
24007 if (!dev->wakeup.flags.valid)
24008 continue;
24009
24010- if (!strncmp(dev->pnp.bus_id, str, 4)) {
24011+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
24012 dev->wakeup.state.enabled =
24013 dev->wakeup.state.enabled ? 0 : 1;
24014 found_dev = dev;
24015diff -urNp linux-2.6.32.42/drivers/acpi/processor_core.c linux-2.6.32.42/drivers/acpi/processor_core.c
24016--- linux-2.6.32.42/drivers/acpi/processor_core.c 2011-03-27 14:31:47.000000000 -0400
24017+++ linux-2.6.32.42/drivers/acpi/processor_core.c 2011-04-17 15:56:46.000000000 -0400
24018@@ -790,7 +790,7 @@ static int __cpuinit acpi_processor_add(
24019 return 0;
24020 }
24021
24022- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
24023+ BUG_ON(pr->id >= nr_cpu_ids);
24024
24025 /*
24026 * Buggy BIOS check
24027diff -urNp linux-2.6.32.42/drivers/acpi/sbshc.c linux-2.6.32.42/drivers/acpi/sbshc.c
24028--- linux-2.6.32.42/drivers/acpi/sbshc.c 2011-03-27 14:31:47.000000000 -0400
24029+++ linux-2.6.32.42/drivers/acpi/sbshc.c 2011-04-17 15:56:46.000000000 -0400
24030@@ -17,7 +17,7 @@
24031
24032 #define PREFIX "ACPI: "
24033
24034-#define ACPI_SMB_HC_CLASS "smbus_host_controller"
24035+#define ACPI_SMB_HC_CLASS "smbus_host_ctl"
24036 #define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
24037
24038 struct acpi_smb_hc {
24039diff -urNp linux-2.6.32.42/drivers/acpi/sleep.c linux-2.6.32.42/drivers/acpi/sleep.c
24040--- linux-2.6.32.42/drivers/acpi/sleep.c 2011-03-27 14:31:47.000000000 -0400
24041+++ linux-2.6.32.42/drivers/acpi/sleep.c 2011-04-17 15:56:46.000000000 -0400
24042@@ -283,7 +283,7 @@ static int acpi_suspend_state_valid(susp
24043 }
24044 }
24045
24046-static struct platform_suspend_ops acpi_suspend_ops = {
24047+static const struct platform_suspend_ops acpi_suspend_ops = {
24048 .valid = acpi_suspend_state_valid,
24049 .begin = acpi_suspend_begin,
24050 .prepare_late = acpi_pm_prepare,
24051@@ -311,7 +311,7 @@ static int acpi_suspend_begin_old(suspen
24052 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
24053 * been requested.
24054 */
24055-static struct platform_suspend_ops acpi_suspend_ops_old = {
24056+static const struct platform_suspend_ops acpi_suspend_ops_old = {
24057 .valid = acpi_suspend_state_valid,
24058 .begin = acpi_suspend_begin_old,
24059 .prepare_late = acpi_pm_disable_gpes,
24060@@ -460,7 +460,7 @@ static void acpi_pm_enable_gpes(void)
24061 acpi_enable_all_runtime_gpes();
24062 }
24063
24064-static struct platform_hibernation_ops acpi_hibernation_ops = {
24065+static const struct platform_hibernation_ops acpi_hibernation_ops = {
24066 .begin = acpi_hibernation_begin,
24067 .end = acpi_pm_end,
24068 .pre_snapshot = acpi_hibernation_pre_snapshot,
24069@@ -513,7 +513,7 @@ static int acpi_hibernation_pre_snapshot
24070 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
24071 * been requested.
24072 */
24073-static struct platform_hibernation_ops acpi_hibernation_ops_old = {
24074+static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
24075 .begin = acpi_hibernation_begin_old,
24076 .end = acpi_pm_end,
24077 .pre_snapshot = acpi_hibernation_pre_snapshot_old,
24078diff -urNp linux-2.6.32.42/drivers/acpi/video.c linux-2.6.32.42/drivers/acpi/video.c
24079--- linux-2.6.32.42/drivers/acpi/video.c 2011-03-27 14:31:47.000000000 -0400
24080+++ linux-2.6.32.42/drivers/acpi/video.c 2011-04-17 15:56:46.000000000 -0400
24081@@ -359,7 +359,7 @@ static int acpi_video_set_brightness(str
24082 vd->brightness->levels[request_level]);
24083 }
24084
24085-static struct backlight_ops acpi_backlight_ops = {
24086+static const struct backlight_ops acpi_backlight_ops = {
24087 .get_brightness = acpi_video_get_brightness,
24088 .update_status = acpi_video_set_brightness,
24089 };
24090diff -urNp linux-2.6.32.42/drivers/ata/ahci.c linux-2.6.32.42/drivers/ata/ahci.c
24091--- linux-2.6.32.42/drivers/ata/ahci.c 2011-03-27 14:31:47.000000000 -0400
24092+++ linux-2.6.32.42/drivers/ata/ahci.c 2011-04-23 12:56:10.000000000 -0400
24093@@ -387,7 +387,7 @@ static struct scsi_host_template ahci_sh
24094 .sdev_attrs = ahci_sdev_attrs,
24095 };
24096
24097-static struct ata_port_operations ahci_ops = {
24098+static const struct ata_port_operations ahci_ops = {
24099 .inherits = &sata_pmp_port_ops,
24100
24101 .qc_defer = sata_pmp_qc_defer_cmd_switch,
24102@@ -424,17 +424,17 @@ static struct ata_port_operations ahci_o
24103 .port_stop = ahci_port_stop,
24104 };
24105
24106-static struct ata_port_operations ahci_vt8251_ops = {
24107+static const struct ata_port_operations ahci_vt8251_ops = {
24108 .inherits = &ahci_ops,
24109 .hardreset = ahci_vt8251_hardreset,
24110 };
24111
24112-static struct ata_port_operations ahci_p5wdh_ops = {
24113+static const struct ata_port_operations ahci_p5wdh_ops = {
24114 .inherits = &ahci_ops,
24115 .hardreset = ahci_p5wdh_hardreset,
24116 };
24117
24118-static struct ata_port_operations ahci_sb600_ops = {
24119+static const struct ata_port_operations ahci_sb600_ops = {
24120 .inherits = &ahci_ops,
24121 .softreset = ahci_sb600_softreset,
24122 .pmp_softreset = ahci_sb600_softreset,
24123diff -urNp linux-2.6.32.42/drivers/ata/ata_generic.c linux-2.6.32.42/drivers/ata/ata_generic.c
24124--- linux-2.6.32.42/drivers/ata/ata_generic.c 2011-03-27 14:31:47.000000000 -0400
24125+++ linux-2.6.32.42/drivers/ata/ata_generic.c 2011-04-17 15:56:46.000000000 -0400
24126@@ -104,7 +104,7 @@ static struct scsi_host_template generic
24127 ATA_BMDMA_SHT(DRV_NAME),
24128 };
24129
24130-static struct ata_port_operations generic_port_ops = {
24131+static const struct ata_port_operations generic_port_ops = {
24132 .inherits = &ata_bmdma_port_ops,
24133 .cable_detect = ata_cable_unknown,
24134 .set_mode = generic_set_mode,
24135diff -urNp linux-2.6.32.42/drivers/ata/ata_piix.c linux-2.6.32.42/drivers/ata/ata_piix.c
24136--- linux-2.6.32.42/drivers/ata/ata_piix.c 2011-03-27 14:31:47.000000000 -0400
24137+++ linux-2.6.32.42/drivers/ata/ata_piix.c 2011-04-23 12:56:10.000000000 -0400
24138@@ -318,7 +318,7 @@ static struct scsi_host_template piix_sh
24139 ATA_BMDMA_SHT(DRV_NAME),
24140 };
24141
24142-static struct ata_port_operations piix_pata_ops = {
24143+static const struct ata_port_operations piix_pata_ops = {
24144 .inherits = &ata_bmdma32_port_ops,
24145 .cable_detect = ata_cable_40wire,
24146 .set_piomode = piix_set_piomode,
24147@@ -326,22 +326,22 @@ static struct ata_port_operations piix_p
24148 .prereset = piix_pata_prereset,
24149 };
24150
24151-static struct ata_port_operations piix_vmw_ops = {
24152+static const struct ata_port_operations piix_vmw_ops = {
24153 .inherits = &piix_pata_ops,
24154 .bmdma_status = piix_vmw_bmdma_status,
24155 };
24156
24157-static struct ata_port_operations ich_pata_ops = {
24158+static const struct ata_port_operations ich_pata_ops = {
24159 .inherits = &piix_pata_ops,
24160 .cable_detect = ich_pata_cable_detect,
24161 .set_dmamode = ich_set_dmamode,
24162 };
24163
24164-static struct ata_port_operations piix_sata_ops = {
24165+static const struct ata_port_operations piix_sata_ops = {
24166 .inherits = &ata_bmdma_port_ops,
24167 };
24168
24169-static struct ata_port_operations piix_sidpr_sata_ops = {
24170+static const struct ata_port_operations piix_sidpr_sata_ops = {
24171 .inherits = &piix_sata_ops,
24172 .hardreset = sata_std_hardreset,
24173 .scr_read = piix_sidpr_scr_read,
24174diff -urNp linux-2.6.32.42/drivers/ata/libata-acpi.c linux-2.6.32.42/drivers/ata/libata-acpi.c
24175--- linux-2.6.32.42/drivers/ata/libata-acpi.c 2011-03-27 14:31:47.000000000 -0400
24176+++ linux-2.6.32.42/drivers/ata/libata-acpi.c 2011-04-17 15:56:46.000000000 -0400
24177@@ -223,12 +223,12 @@ static void ata_acpi_dev_uevent(acpi_han
24178 ata_acpi_uevent(dev->link->ap, dev, event);
24179 }
24180
24181-static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
24182+static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
24183 .handler = ata_acpi_dev_notify_dock,
24184 .uevent = ata_acpi_dev_uevent,
24185 };
24186
24187-static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
24188+static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
24189 .handler = ata_acpi_ap_notify_dock,
24190 .uevent = ata_acpi_ap_uevent,
24191 };
24192diff -urNp linux-2.6.32.42/drivers/ata/libata-core.c linux-2.6.32.42/drivers/ata/libata-core.c
24193--- linux-2.6.32.42/drivers/ata/libata-core.c 2011-03-27 14:31:47.000000000 -0400
24194+++ linux-2.6.32.42/drivers/ata/libata-core.c 2011-04-23 12:56:10.000000000 -0400
24195@@ -4954,7 +4954,7 @@ void ata_qc_free(struct ata_queued_cmd *
24196 struct ata_port *ap;
24197 unsigned int tag;
24198
24199- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24200+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24201 ap = qc->ap;
24202
24203 qc->flags = 0;
24204@@ -4970,7 +4970,7 @@ void __ata_qc_complete(struct ata_queued
24205 struct ata_port *ap;
24206 struct ata_link *link;
24207
24208- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24209+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24210 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
24211 ap = qc->ap;
24212 link = qc->dev->link;
24213@@ -5987,7 +5987,7 @@ static void ata_host_stop(struct device
24214 * LOCKING:
24215 * None.
24216 */
24217-static void ata_finalize_port_ops(struct ata_port_operations *ops)
24218+static void ata_finalize_port_ops(const struct ata_port_operations *ops)
24219 {
24220 static DEFINE_SPINLOCK(lock);
24221 const struct ata_port_operations *cur;
24222@@ -5999,6 +5999,7 @@ static void ata_finalize_port_ops(struct
24223 return;
24224
24225 spin_lock(&lock);
24226+ pax_open_kernel();
24227
24228 for (cur = ops->inherits; cur; cur = cur->inherits) {
24229 void **inherit = (void **)cur;
24230@@ -6012,8 +6013,9 @@ static void ata_finalize_port_ops(struct
24231 if (IS_ERR(*pp))
24232 *pp = NULL;
24233
24234- ops->inherits = NULL;
24235+ ((struct ata_port_operations *)ops)->inherits = NULL;
24236
24237+ pax_close_kernel();
24238 spin_unlock(&lock);
24239 }
24240
24241@@ -6110,7 +6112,7 @@ int ata_host_start(struct ata_host *host
24242 */
24243 /* KILLME - the only user left is ipr */
24244 void ata_host_init(struct ata_host *host, struct device *dev,
24245- unsigned long flags, struct ata_port_operations *ops)
24246+ unsigned long flags, const struct ata_port_operations *ops)
24247 {
24248 spin_lock_init(&host->lock);
24249 host->dev = dev;
24250@@ -6773,7 +6775,7 @@ static void ata_dummy_error_handler(stru
24251 /* truly dummy */
24252 }
24253
24254-struct ata_port_operations ata_dummy_port_ops = {
24255+const struct ata_port_operations ata_dummy_port_ops = {
24256 .qc_prep = ata_noop_qc_prep,
24257 .qc_issue = ata_dummy_qc_issue,
24258 .error_handler = ata_dummy_error_handler,
24259diff -urNp linux-2.6.32.42/drivers/ata/libata-eh.c linux-2.6.32.42/drivers/ata/libata-eh.c
24260--- linux-2.6.32.42/drivers/ata/libata-eh.c 2011-03-27 14:31:47.000000000 -0400
24261+++ linux-2.6.32.42/drivers/ata/libata-eh.c 2011-05-16 21:46:57.000000000 -0400
24262@@ -2423,6 +2423,8 @@ void ata_eh_report(struct ata_port *ap)
24263 {
24264 struct ata_link *link;
24265
24266+ pax_track_stack();
24267+
24268 ata_for_each_link(link, ap, HOST_FIRST)
24269 ata_eh_link_report(link);
24270 }
24271@@ -3590,7 +3592,7 @@ void ata_do_eh(struct ata_port *ap, ata_
24272 */
24273 void ata_std_error_handler(struct ata_port *ap)
24274 {
24275- struct ata_port_operations *ops = ap->ops;
24276+ const struct ata_port_operations *ops = ap->ops;
24277 ata_reset_fn_t hardreset = ops->hardreset;
24278
24279 /* ignore built-in hardreset if SCR access is not available */
24280diff -urNp linux-2.6.32.42/drivers/ata/libata-pmp.c linux-2.6.32.42/drivers/ata/libata-pmp.c
24281--- linux-2.6.32.42/drivers/ata/libata-pmp.c 2011-03-27 14:31:47.000000000 -0400
24282+++ linux-2.6.32.42/drivers/ata/libata-pmp.c 2011-04-17 15:56:46.000000000 -0400
24283@@ -841,7 +841,7 @@ static int sata_pmp_handle_link_fail(str
24284 */
24285 static int sata_pmp_eh_recover(struct ata_port *ap)
24286 {
24287- struct ata_port_operations *ops = ap->ops;
24288+ const struct ata_port_operations *ops = ap->ops;
24289 int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
24290 struct ata_link *pmp_link = &ap->link;
24291 struct ata_device *pmp_dev = pmp_link->device;
24292diff -urNp linux-2.6.32.42/drivers/ata/pata_acpi.c linux-2.6.32.42/drivers/ata/pata_acpi.c
24293--- linux-2.6.32.42/drivers/ata/pata_acpi.c 2011-03-27 14:31:47.000000000 -0400
24294+++ linux-2.6.32.42/drivers/ata/pata_acpi.c 2011-04-17 15:56:46.000000000 -0400
24295@@ -215,7 +215,7 @@ static struct scsi_host_template pacpi_s
24296 ATA_BMDMA_SHT(DRV_NAME),
24297 };
24298
24299-static struct ata_port_operations pacpi_ops = {
24300+static const struct ata_port_operations pacpi_ops = {
24301 .inherits = &ata_bmdma_port_ops,
24302 .qc_issue = pacpi_qc_issue,
24303 .cable_detect = pacpi_cable_detect,
24304diff -urNp linux-2.6.32.42/drivers/ata/pata_ali.c linux-2.6.32.42/drivers/ata/pata_ali.c
24305--- linux-2.6.32.42/drivers/ata/pata_ali.c 2011-03-27 14:31:47.000000000 -0400
24306+++ linux-2.6.32.42/drivers/ata/pata_ali.c 2011-04-17 15:56:46.000000000 -0400
24307@@ -365,7 +365,7 @@ static struct scsi_host_template ali_sht
24308 * Port operations for PIO only ALi
24309 */
24310
24311-static struct ata_port_operations ali_early_port_ops = {
24312+static const struct ata_port_operations ali_early_port_ops = {
24313 .inherits = &ata_sff_port_ops,
24314 .cable_detect = ata_cable_40wire,
24315 .set_piomode = ali_set_piomode,
24316@@ -382,7 +382,7 @@ static const struct ata_port_operations
24317 * Port operations for DMA capable ALi without cable
24318 * detect
24319 */
24320-static struct ata_port_operations ali_20_port_ops = {
24321+static const struct ata_port_operations ali_20_port_ops = {
24322 .inherits = &ali_dma_base_ops,
24323 .cable_detect = ata_cable_40wire,
24324 .mode_filter = ali_20_filter,
24325@@ -393,7 +393,7 @@ static struct ata_port_operations ali_20
24326 /*
24327 * Port operations for DMA capable ALi with cable detect
24328 */
24329-static struct ata_port_operations ali_c2_port_ops = {
24330+static const struct ata_port_operations ali_c2_port_ops = {
24331 .inherits = &ali_dma_base_ops,
24332 .check_atapi_dma = ali_check_atapi_dma,
24333 .cable_detect = ali_c2_cable_detect,
24334@@ -404,7 +404,7 @@ static struct ata_port_operations ali_c2
24335 /*
24336 * Port operations for DMA capable ALi with cable detect
24337 */
24338-static struct ata_port_operations ali_c4_port_ops = {
24339+static const struct ata_port_operations ali_c4_port_ops = {
24340 .inherits = &ali_dma_base_ops,
24341 .check_atapi_dma = ali_check_atapi_dma,
24342 .cable_detect = ali_c2_cable_detect,
24343@@ -414,7 +414,7 @@ static struct ata_port_operations ali_c4
24344 /*
24345 * Port operations for DMA capable ALi with cable detect and LBA48
24346 */
24347-static struct ata_port_operations ali_c5_port_ops = {
24348+static const struct ata_port_operations ali_c5_port_ops = {
24349 .inherits = &ali_dma_base_ops,
24350 .check_atapi_dma = ali_check_atapi_dma,
24351 .dev_config = ali_warn_atapi_dma,
24352diff -urNp linux-2.6.32.42/drivers/ata/pata_amd.c linux-2.6.32.42/drivers/ata/pata_amd.c
24353--- linux-2.6.32.42/drivers/ata/pata_amd.c 2011-03-27 14:31:47.000000000 -0400
24354+++ linux-2.6.32.42/drivers/ata/pata_amd.c 2011-04-17 15:56:46.000000000 -0400
24355@@ -397,28 +397,28 @@ static const struct ata_port_operations
24356 .prereset = amd_pre_reset,
24357 };
24358
24359-static struct ata_port_operations amd33_port_ops = {
24360+static const struct ata_port_operations amd33_port_ops = {
24361 .inherits = &amd_base_port_ops,
24362 .cable_detect = ata_cable_40wire,
24363 .set_piomode = amd33_set_piomode,
24364 .set_dmamode = amd33_set_dmamode,
24365 };
24366
24367-static struct ata_port_operations amd66_port_ops = {
24368+static const struct ata_port_operations amd66_port_ops = {
24369 .inherits = &amd_base_port_ops,
24370 .cable_detect = ata_cable_unknown,
24371 .set_piomode = amd66_set_piomode,
24372 .set_dmamode = amd66_set_dmamode,
24373 };
24374
24375-static struct ata_port_operations amd100_port_ops = {
24376+static const struct ata_port_operations amd100_port_ops = {
24377 .inherits = &amd_base_port_ops,
24378 .cable_detect = ata_cable_unknown,
24379 .set_piomode = amd100_set_piomode,
24380 .set_dmamode = amd100_set_dmamode,
24381 };
24382
24383-static struct ata_port_operations amd133_port_ops = {
24384+static const struct ata_port_operations amd133_port_ops = {
24385 .inherits = &amd_base_port_ops,
24386 .cable_detect = amd_cable_detect,
24387 .set_piomode = amd133_set_piomode,
24388@@ -433,13 +433,13 @@ static const struct ata_port_operations
24389 .host_stop = nv_host_stop,
24390 };
24391
24392-static struct ata_port_operations nv100_port_ops = {
24393+static const struct ata_port_operations nv100_port_ops = {
24394 .inherits = &nv_base_port_ops,
24395 .set_piomode = nv100_set_piomode,
24396 .set_dmamode = nv100_set_dmamode,
24397 };
24398
24399-static struct ata_port_operations nv133_port_ops = {
24400+static const struct ata_port_operations nv133_port_ops = {
24401 .inherits = &nv_base_port_ops,
24402 .set_piomode = nv133_set_piomode,
24403 .set_dmamode = nv133_set_dmamode,
24404diff -urNp linux-2.6.32.42/drivers/ata/pata_artop.c linux-2.6.32.42/drivers/ata/pata_artop.c
24405--- linux-2.6.32.42/drivers/ata/pata_artop.c 2011-03-27 14:31:47.000000000 -0400
24406+++ linux-2.6.32.42/drivers/ata/pata_artop.c 2011-04-17 15:56:46.000000000 -0400
24407@@ -311,7 +311,7 @@ static struct scsi_host_template artop_s
24408 ATA_BMDMA_SHT(DRV_NAME),
24409 };
24410
24411-static struct ata_port_operations artop6210_ops = {
24412+static const struct ata_port_operations artop6210_ops = {
24413 .inherits = &ata_bmdma_port_ops,
24414 .cable_detect = ata_cable_40wire,
24415 .set_piomode = artop6210_set_piomode,
24416@@ -320,7 +320,7 @@ static struct ata_port_operations artop6
24417 .qc_defer = artop6210_qc_defer,
24418 };
24419
24420-static struct ata_port_operations artop6260_ops = {
24421+static const struct ata_port_operations artop6260_ops = {
24422 .inherits = &ata_bmdma_port_ops,
24423 .cable_detect = artop6260_cable_detect,
24424 .set_piomode = artop6260_set_piomode,
24425diff -urNp linux-2.6.32.42/drivers/ata/pata_at32.c linux-2.6.32.42/drivers/ata/pata_at32.c
24426--- linux-2.6.32.42/drivers/ata/pata_at32.c 2011-03-27 14:31:47.000000000 -0400
24427+++ linux-2.6.32.42/drivers/ata/pata_at32.c 2011-04-17 15:56:46.000000000 -0400
24428@@ -172,7 +172,7 @@ static struct scsi_host_template at32_sh
24429 ATA_PIO_SHT(DRV_NAME),
24430 };
24431
24432-static struct ata_port_operations at32_port_ops = {
24433+static const struct ata_port_operations at32_port_ops = {
24434 .inherits = &ata_sff_port_ops,
24435 .cable_detect = ata_cable_40wire,
24436 .set_piomode = pata_at32_set_piomode,
24437diff -urNp linux-2.6.32.42/drivers/ata/pata_at91.c linux-2.6.32.42/drivers/ata/pata_at91.c
24438--- linux-2.6.32.42/drivers/ata/pata_at91.c 2011-03-27 14:31:47.000000000 -0400
24439+++ linux-2.6.32.42/drivers/ata/pata_at91.c 2011-04-17 15:56:46.000000000 -0400
24440@@ -195,7 +195,7 @@ static struct scsi_host_template pata_at
24441 ATA_PIO_SHT(DRV_NAME),
24442 };
24443
24444-static struct ata_port_operations pata_at91_port_ops = {
24445+static const struct ata_port_operations pata_at91_port_ops = {
24446 .inherits = &ata_sff_port_ops,
24447
24448 .sff_data_xfer = pata_at91_data_xfer_noirq,
24449diff -urNp linux-2.6.32.42/drivers/ata/pata_atiixp.c linux-2.6.32.42/drivers/ata/pata_atiixp.c
24450--- linux-2.6.32.42/drivers/ata/pata_atiixp.c 2011-03-27 14:31:47.000000000 -0400
24451+++ linux-2.6.32.42/drivers/ata/pata_atiixp.c 2011-04-17 15:56:46.000000000 -0400
24452@@ -205,7 +205,7 @@ static struct scsi_host_template atiixp_
24453 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
24454 };
24455
24456-static struct ata_port_operations atiixp_port_ops = {
24457+static const struct ata_port_operations atiixp_port_ops = {
24458 .inherits = &ata_bmdma_port_ops,
24459
24460 .qc_prep = ata_sff_dumb_qc_prep,
24461diff -urNp linux-2.6.32.42/drivers/ata/pata_atp867x.c linux-2.6.32.42/drivers/ata/pata_atp867x.c
24462--- linux-2.6.32.42/drivers/ata/pata_atp867x.c 2011-03-27 14:31:47.000000000 -0400
24463+++ linux-2.6.32.42/drivers/ata/pata_atp867x.c 2011-04-17 15:56:46.000000000 -0400
24464@@ -274,7 +274,7 @@ static struct scsi_host_template atp867x
24465 ATA_BMDMA_SHT(DRV_NAME),
24466 };
24467
24468-static struct ata_port_operations atp867x_ops = {
24469+static const struct ata_port_operations atp867x_ops = {
24470 .inherits = &ata_bmdma_port_ops,
24471 .cable_detect = atp867x_cable_detect,
24472 .set_piomode = atp867x_set_piomode,
24473diff -urNp linux-2.6.32.42/drivers/ata/pata_bf54x.c linux-2.6.32.42/drivers/ata/pata_bf54x.c
24474--- linux-2.6.32.42/drivers/ata/pata_bf54x.c 2011-03-27 14:31:47.000000000 -0400
24475+++ linux-2.6.32.42/drivers/ata/pata_bf54x.c 2011-04-17 15:56:46.000000000 -0400
24476@@ -1464,7 +1464,7 @@ static struct scsi_host_template bfin_sh
24477 .dma_boundary = ATA_DMA_BOUNDARY,
24478 };
24479
24480-static struct ata_port_operations bfin_pata_ops = {
24481+static const struct ata_port_operations bfin_pata_ops = {
24482 .inherits = &ata_sff_port_ops,
24483
24484 .set_piomode = bfin_set_piomode,
24485diff -urNp linux-2.6.32.42/drivers/ata/pata_cmd640.c linux-2.6.32.42/drivers/ata/pata_cmd640.c
24486--- linux-2.6.32.42/drivers/ata/pata_cmd640.c 2011-03-27 14:31:47.000000000 -0400
24487+++ linux-2.6.32.42/drivers/ata/pata_cmd640.c 2011-04-17 15:56:46.000000000 -0400
24488@@ -168,7 +168,7 @@ static struct scsi_host_template cmd640_
24489 ATA_BMDMA_SHT(DRV_NAME),
24490 };
24491
24492-static struct ata_port_operations cmd640_port_ops = {
24493+static const struct ata_port_operations cmd640_port_ops = {
24494 .inherits = &ata_bmdma_port_ops,
24495 /* In theory xfer_noirq is not needed once we kill the prefetcher */
24496 .sff_data_xfer = ata_sff_data_xfer_noirq,
24497diff -urNp linux-2.6.32.42/drivers/ata/pata_cmd64x.c linux-2.6.32.42/drivers/ata/pata_cmd64x.c
24498--- linux-2.6.32.42/drivers/ata/pata_cmd64x.c 2011-06-25 12:55:34.000000000 -0400
24499+++ linux-2.6.32.42/drivers/ata/pata_cmd64x.c 2011-06-25 12:56:37.000000000 -0400
24500@@ -271,18 +271,18 @@ static const struct ata_port_operations
24501 .set_dmamode = cmd64x_set_dmamode,
24502 };
24503
24504-static struct ata_port_operations cmd64x_port_ops = {
24505+static const struct ata_port_operations cmd64x_port_ops = {
24506 .inherits = &cmd64x_base_ops,
24507 .cable_detect = ata_cable_40wire,
24508 };
24509
24510-static struct ata_port_operations cmd646r1_port_ops = {
24511+static const struct ata_port_operations cmd646r1_port_ops = {
24512 .inherits = &cmd64x_base_ops,
24513 .bmdma_stop = cmd646r1_bmdma_stop,
24514 .cable_detect = ata_cable_40wire,
24515 };
24516
24517-static struct ata_port_operations cmd648_port_ops = {
24518+static const struct ata_port_operations cmd648_port_ops = {
24519 .inherits = &cmd64x_base_ops,
24520 .bmdma_stop = cmd648_bmdma_stop,
24521 .cable_detect = cmd648_cable_detect,
24522diff -urNp linux-2.6.32.42/drivers/ata/pata_cs5520.c linux-2.6.32.42/drivers/ata/pata_cs5520.c
24523--- linux-2.6.32.42/drivers/ata/pata_cs5520.c 2011-03-27 14:31:47.000000000 -0400
24524+++ linux-2.6.32.42/drivers/ata/pata_cs5520.c 2011-04-17 15:56:46.000000000 -0400
24525@@ -144,7 +144,7 @@ static struct scsi_host_template cs5520_
24526 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
24527 };
24528
24529-static struct ata_port_operations cs5520_port_ops = {
24530+static const struct ata_port_operations cs5520_port_ops = {
24531 .inherits = &ata_bmdma_port_ops,
24532 .qc_prep = ata_sff_dumb_qc_prep,
24533 .cable_detect = ata_cable_40wire,
24534diff -urNp linux-2.6.32.42/drivers/ata/pata_cs5530.c linux-2.6.32.42/drivers/ata/pata_cs5530.c
24535--- linux-2.6.32.42/drivers/ata/pata_cs5530.c 2011-03-27 14:31:47.000000000 -0400
24536+++ linux-2.6.32.42/drivers/ata/pata_cs5530.c 2011-04-17 15:56:46.000000000 -0400
24537@@ -164,7 +164,7 @@ static struct scsi_host_template cs5530_
24538 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
24539 };
24540
24541-static struct ata_port_operations cs5530_port_ops = {
24542+static const struct ata_port_operations cs5530_port_ops = {
24543 .inherits = &ata_bmdma_port_ops,
24544
24545 .qc_prep = ata_sff_dumb_qc_prep,
24546diff -urNp linux-2.6.32.42/drivers/ata/pata_cs5535.c linux-2.6.32.42/drivers/ata/pata_cs5535.c
24547--- linux-2.6.32.42/drivers/ata/pata_cs5535.c 2011-03-27 14:31:47.000000000 -0400
24548+++ linux-2.6.32.42/drivers/ata/pata_cs5535.c 2011-04-17 15:56:46.000000000 -0400
24549@@ -160,7 +160,7 @@ static struct scsi_host_template cs5535_
24550 ATA_BMDMA_SHT(DRV_NAME),
24551 };
24552
24553-static struct ata_port_operations cs5535_port_ops = {
24554+static const struct ata_port_operations cs5535_port_ops = {
24555 .inherits = &ata_bmdma_port_ops,
24556 .cable_detect = cs5535_cable_detect,
24557 .set_piomode = cs5535_set_piomode,
24558diff -urNp linux-2.6.32.42/drivers/ata/pata_cs5536.c linux-2.6.32.42/drivers/ata/pata_cs5536.c
24559--- linux-2.6.32.42/drivers/ata/pata_cs5536.c 2011-03-27 14:31:47.000000000 -0400
24560+++ linux-2.6.32.42/drivers/ata/pata_cs5536.c 2011-04-17 15:56:46.000000000 -0400
24561@@ -223,7 +223,7 @@ static struct scsi_host_template cs5536_
24562 ATA_BMDMA_SHT(DRV_NAME),
24563 };
24564
24565-static struct ata_port_operations cs5536_port_ops = {
24566+static const struct ata_port_operations cs5536_port_ops = {
24567 .inherits = &ata_bmdma_port_ops,
24568 .cable_detect = cs5536_cable_detect,
24569 .set_piomode = cs5536_set_piomode,
24570diff -urNp linux-2.6.32.42/drivers/ata/pata_cypress.c linux-2.6.32.42/drivers/ata/pata_cypress.c
24571--- linux-2.6.32.42/drivers/ata/pata_cypress.c 2011-03-27 14:31:47.000000000 -0400
24572+++ linux-2.6.32.42/drivers/ata/pata_cypress.c 2011-04-17 15:56:46.000000000 -0400
24573@@ -113,7 +113,7 @@ static struct scsi_host_template cy82c69
24574 ATA_BMDMA_SHT(DRV_NAME),
24575 };
24576
24577-static struct ata_port_operations cy82c693_port_ops = {
24578+static const struct ata_port_operations cy82c693_port_ops = {
24579 .inherits = &ata_bmdma_port_ops,
24580 .cable_detect = ata_cable_40wire,
24581 .set_piomode = cy82c693_set_piomode,
24582diff -urNp linux-2.6.32.42/drivers/ata/pata_efar.c linux-2.6.32.42/drivers/ata/pata_efar.c
24583--- linux-2.6.32.42/drivers/ata/pata_efar.c 2011-03-27 14:31:47.000000000 -0400
24584+++ linux-2.6.32.42/drivers/ata/pata_efar.c 2011-04-17 15:56:46.000000000 -0400
24585@@ -222,7 +222,7 @@ static struct scsi_host_template efar_sh
24586 ATA_BMDMA_SHT(DRV_NAME),
24587 };
24588
24589-static struct ata_port_operations efar_ops = {
24590+static const struct ata_port_operations efar_ops = {
24591 .inherits = &ata_bmdma_port_ops,
24592 .cable_detect = efar_cable_detect,
24593 .set_piomode = efar_set_piomode,
24594diff -urNp linux-2.6.32.42/drivers/ata/pata_hpt366.c linux-2.6.32.42/drivers/ata/pata_hpt366.c
24595--- linux-2.6.32.42/drivers/ata/pata_hpt366.c 2011-06-25 12:55:34.000000000 -0400
24596+++ linux-2.6.32.42/drivers/ata/pata_hpt366.c 2011-06-25 12:56:37.000000000 -0400
24597@@ -282,7 +282,7 @@ static struct scsi_host_template hpt36x_
24598 * Configuration for HPT366/68
24599 */
24600
24601-static struct ata_port_operations hpt366_port_ops = {
24602+static const struct ata_port_operations hpt366_port_ops = {
24603 .inherits = &ata_bmdma_port_ops,
24604 .cable_detect = hpt36x_cable_detect,
24605 .mode_filter = hpt366_filter,
24606diff -urNp linux-2.6.32.42/drivers/ata/pata_hpt37x.c linux-2.6.32.42/drivers/ata/pata_hpt37x.c
24607--- linux-2.6.32.42/drivers/ata/pata_hpt37x.c 2011-06-25 12:55:34.000000000 -0400
24608+++ linux-2.6.32.42/drivers/ata/pata_hpt37x.c 2011-06-25 12:56:37.000000000 -0400
24609@@ -576,7 +576,7 @@ static struct scsi_host_template hpt37x_
24610 * Configuration for HPT370
24611 */
24612
24613-static struct ata_port_operations hpt370_port_ops = {
24614+static const struct ata_port_operations hpt370_port_ops = {
24615 .inherits = &ata_bmdma_port_ops,
24616
24617 .bmdma_stop = hpt370_bmdma_stop,
24618@@ -591,7 +591,7 @@ static struct ata_port_operations hpt370
24619 * Configuration for HPT370A. Close to 370 but less filters
24620 */
24621
24622-static struct ata_port_operations hpt370a_port_ops = {
24623+static const struct ata_port_operations hpt370a_port_ops = {
24624 .inherits = &hpt370_port_ops,
24625 .mode_filter = hpt370a_filter,
24626 };
24627@@ -601,7 +601,7 @@ static struct ata_port_operations hpt370
24628 * and DMA mode setting functionality.
24629 */
24630
24631-static struct ata_port_operations hpt372_port_ops = {
24632+static const struct ata_port_operations hpt372_port_ops = {
24633 .inherits = &ata_bmdma_port_ops,
24634
24635 .bmdma_stop = hpt37x_bmdma_stop,
24636@@ -616,7 +616,7 @@ static struct ata_port_operations hpt372
24637 * but we have a different cable detection procedure for function 1.
24638 */
24639
24640-static struct ata_port_operations hpt374_fn1_port_ops = {
24641+static const struct ata_port_operations hpt374_fn1_port_ops = {
24642 .inherits = &hpt372_port_ops,
24643 .prereset = hpt374_fn1_pre_reset,
24644 };
24645diff -urNp linux-2.6.32.42/drivers/ata/pata_hpt3x2n.c linux-2.6.32.42/drivers/ata/pata_hpt3x2n.c
24646--- linux-2.6.32.42/drivers/ata/pata_hpt3x2n.c 2011-06-25 12:55:34.000000000 -0400
24647+++ linux-2.6.32.42/drivers/ata/pata_hpt3x2n.c 2011-06-25 12:56:37.000000000 -0400
24648@@ -337,7 +337,7 @@ static struct scsi_host_template hpt3x2n
24649 * Configuration for HPT3x2n.
24650 */
24651
24652-static struct ata_port_operations hpt3x2n_port_ops = {
24653+static const struct ata_port_operations hpt3x2n_port_ops = {
24654 .inherits = &ata_bmdma_port_ops,
24655
24656 .bmdma_stop = hpt3x2n_bmdma_stop,
24657diff -urNp linux-2.6.32.42/drivers/ata/pata_hpt3x3.c linux-2.6.32.42/drivers/ata/pata_hpt3x3.c
24658--- linux-2.6.32.42/drivers/ata/pata_hpt3x3.c 2011-03-27 14:31:47.000000000 -0400
24659+++ linux-2.6.32.42/drivers/ata/pata_hpt3x3.c 2011-04-17 15:56:46.000000000 -0400
24660@@ -141,7 +141,7 @@ static struct scsi_host_template hpt3x3_
24661 ATA_BMDMA_SHT(DRV_NAME),
24662 };
24663
24664-static struct ata_port_operations hpt3x3_port_ops = {
24665+static const struct ata_port_operations hpt3x3_port_ops = {
24666 .inherits = &ata_bmdma_port_ops,
24667 .cable_detect = ata_cable_40wire,
24668 .set_piomode = hpt3x3_set_piomode,
24669diff -urNp linux-2.6.32.42/drivers/ata/pata_icside.c linux-2.6.32.42/drivers/ata/pata_icside.c
24670--- linux-2.6.32.42/drivers/ata/pata_icside.c 2011-03-27 14:31:47.000000000 -0400
24671+++ linux-2.6.32.42/drivers/ata/pata_icside.c 2011-04-17 15:56:46.000000000 -0400
24672@@ -319,7 +319,7 @@ static void pata_icside_postreset(struct
24673 }
24674 }
24675
24676-static struct ata_port_operations pata_icside_port_ops = {
24677+static const struct ata_port_operations pata_icside_port_ops = {
24678 .inherits = &ata_sff_port_ops,
24679 /* no need to build any PRD tables for DMA */
24680 .qc_prep = ata_noop_qc_prep,
24681diff -urNp linux-2.6.32.42/drivers/ata/pata_isapnp.c linux-2.6.32.42/drivers/ata/pata_isapnp.c
24682--- linux-2.6.32.42/drivers/ata/pata_isapnp.c 2011-03-27 14:31:47.000000000 -0400
24683+++ linux-2.6.32.42/drivers/ata/pata_isapnp.c 2011-04-17 15:56:46.000000000 -0400
24684@@ -23,12 +23,12 @@ static struct scsi_host_template isapnp_
24685 ATA_PIO_SHT(DRV_NAME),
24686 };
24687
24688-static struct ata_port_operations isapnp_port_ops = {
24689+static const struct ata_port_operations isapnp_port_ops = {
24690 .inherits = &ata_sff_port_ops,
24691 .cable_detect = ata_cable_40wire,
24692 };
24693
24694-static struct ata_port_operations isapnp_noalt_port_ops = {
24695+static const struct ata_port_operations isapnp_noalt_port_ops = {
24696 .inherits = &ata_sff_port_ops,
24697 .cable_detect = ata_cable_40wire,
24698 /* No altstatus so we don't want to use the lost interrupt poll */
24699diff -urNp linux-2.6.32.42/drivers/ata/pata_it8213.c linux-2.6.32.42/drivers/ata/pata_it8213.c
24700--- linux-2.6.32.42/drivers/ata/pata_it8213.c 2011-03-27 14:31:47.000000000 -0400
24701+++ linux-2.6.32.42/drivers/ata/pata_it8213.c 2011-04-17 15:56:46.000000000 -0400
24702@@ -234,7 +234,7 @@ static struct scsi_host_template it8213_
24703 };
24704
24705
24706-static struct ata_port_operations it8213_ops = {
24707+static const struct ata_port_operations it8213_ops = {
24708 .inherits = &ata_bmdma_port_ops,
24709 .cable_detect = it8213_cable_detect,
24710 .set_piomode = it8213_set_piomode,
24711diff -urNp linux-2.6.32.42/drivers/ata/pata_it821x.c linux-2.6.32.42/drivers/ata/pata_it821x.c
24712--- linux-2.6.32.42/drivers/ata/pata_it821x.c 2011-03-27 14:31:47.000000000 -0400
24713+++ linux-2.6.32.42/drivers/ata/pata_it821x.c 2011-04-17 15:56:46.000000000 -0400
24714@@ -800,7 +800,7 @@ static struct scsi_host_template it821x_
24715 ATA_BMDMA_SHT(DRV_NAME),
24716 };
24717
24718-static struct ata_port_operations it821x_smart_port_ops = {
24719+static const struct ata_port_operations it821x_smart_port_ops = {
24720 .inherits = &ata_bmdma_port_ops,
24721
24722 .check_atapi_dma= it821x_check_atapi_dma,
24723@@ -814,7 +814,7 @@ static struct ata_port_operations it821x
24724 .port_start = it821x_port_start,
24725 };
24726
24727-static struct ata_port_operations it821x_passthru_port_ops = {
24728+static const struct ata_port_operations it821x_passthru_port_ops = {
24729 .inherits = &ata_bmdma_port_ops,
24730
24731 .check_atapi_dma= it821x_check_atapi_dma,
24732@@ -830,7 +830,7 @@ static struct ata_port_operations it821x
24733 .port_start = it821x_port_start,
24734 };
24735
24736-static struct ata_port_operations it821x_rdc_port_ops = {
24737+static const struct ata_port_operations it821x_rdc_port_ops = {
24738 .inherits = &ata_bmdma_port_ops,
24739
24740 .check_atapi_dma= it821x_check_atapi_dma,
24741diff -urNp linux-2.6.32.42/drivers/ata/pata_ixp4xx_cf.c linux-2.6.32.42/drivers/ata/pata_ixp4xx_cf.c
24742--- linux-2.6.32.42/drivers/ata/pata_ixp4xx_cf.c 2011-03-27 14:31:47.000000000 -0400
24743+++ linux-2.6.32.42/drivers/ata/pata_ixp4xx_cf.c 2011-04-17 15:56:46.000000000 -0400
24744@@ -89,7 +89,7 @@ static struct scsi_host_template ixp4xx_
24745 ATA_PIO_SHT(DRV_NAME),
24746 };
24747
24748-static struct ata_port_operations ixp4xx_port_ops = {
24749+static const struct ata_port_operations ixp4xx_port_ops = {
24750 .inherits = &ata_sff_port_ops,
24751 .sff_data_xfer = ixp4xx_mmio_data_xfer,
24752 .cable_detect = ata_cable_40wire,
24753diff -urNp linux-2.6.32.42/drivers/ata/pata_jmicron.c linux-2.6.32.42/drivers/ata/pata_jmicron.c
24754--- linux-2.6.32.42/drivers/ata/pata_jmicron.c 2011-03-27 14:31:47.000000000 -0400
24755+++ linux-2.6.32.42/drivers/ata/pata_jmicron.c 2011-04-17 15:56:46.000000000 -0400
24756@@ -111,7 +111,7 @@ static struct scsi_host_template jmicron
24757 ATA_BMDMA_SHT(DRV_NAME),
24758 };
24759
24760-static struct ata_port_operations jmicron_ops = {
24761+static const struct ata_port_operations jmicron_ops = {
24762 .inherits = &ata_bmdma_port_ops,
24763 .prereset = jmicron_pre_reset,
24764 };
24765diff -urNp linux-2.6.32.42/drivers/ata/pata_legacy.c linux-2.6.32.42/drivers/ata/pata_legacy.c
24766--- linux-2.6.32.42/drivers/ata/pata_legacy.c 2011-03-27 14:31:47.000000000 -0400
24767+++ linux-2.6.32.42/drivers/ata/pata_legacy.c 2011-04-17 15:56:46.000000000 -0400
24768@@ -106,7 +106,7 @@ struct legacy_probe {
24769
24770 struct legacy_controller {
24771 const char *name;
24772- struct ata_port_operations *ops;
24773+ const struct ata_port_operations *ops;
24774 unsigned int pio_mask;
24775 unsigned int flags;
24776 unsigned int pflags;
24777@@ -223,12 +223,12 @@ static const struct ata_port_operations
24778 * pio_mask as well.
24779 */
24780
24781-static struct ata_port_operations simple_port_ops = {
24782+static const struct ata_port_operations simple_port_ops = {
24783 .inherits = &legacy_base_port_ops,
24784 .sff_data_xfer = ata_sff_data_xfer_noirq,
24785 };
24786
24787-static struct ata_port_operations legacy_port_ops = {
24788+static const struct ata_port_operations legacy_port_ops = {
24789 .inherits = &legacy_base_port_ops,
24790 .sff_data_xfer = ata_sff_data_xfer_noirq,
24791 .set_mode = legacy_set_mode,
24792@@ -324,7 +324,7 @@ static unsigned int pdc_data_xfer_vlb(st
24793 return buflen;
24794 }
24795
24796-static struct ata_port_operations pdc20230_port_ops = {
24797+static const struct ata_port_operations pdc20230_port_ops = {
24798 .inherits = &legacy_base_port_ops,
24799 .set_piomode = pdc20230_set_piomode,
24800 .sff_data_xfer = pdc_data_xfer_vlb,
24801@@ -357,7 +357,7 @@ static void ht6560a_set_piomode(struct a
24802 ioread8(ap->ioaddr.status_addr);
24803 }
24804
24805-static struct ata_port_operations ht6560a_port_ops = {
24806+static const struct ata_port_operations ht6560a_port_ops = {
24807 .inherits = &legacy_base_port_ops,
24808 .set_piomode = ht6560a_set_piomode,
24809 };
24810@@ -400,7 +400,7 @@ static void ht6560b_set_piomode(struct a
24811 ioread8(ap->ioaddr.status_addr);
24812 }
24813
24814-static struct ata_port_operations ht6560b_port_ops = {
24815+static const struct ata_port_operations ht6560b_port_ops = {
24816 .inherits = &legacy_base_port_ops,
24817 .set_piomode = ht6560b_set_piomode,
24818 };
24819@@ -499,7 +499,7 @@ static void opti82c611a_set_piomode(stru
24820 }
24821
24822
24823-static struct ata_port_operations opti82c611a_port_ops = {
24824+static const struct ata_port_operations opti82c611a_port_ops = {
24825 .inherits = &legacy_base_port_ops,
24826 .set_piomode = opti82c611a_set_piomode,
24827 };
24828@@ -609,7 +609,7 @@ static unsigned int opti82c46x_qc_issue(
24829 return ata_sff_qc_issue(qc);
24830 }
24831
24832-static struct ata_port_operations opti82c46x_port_ops = {
24833+static const struct ata_port_operations opti82c46x_port_ops = {
24834 .inherits = &legacy_base_port_ops,
24835 .set_piomode = opti82c46x_set_piomode,
24836 .qc_issue = opti82c46x_qc_issue,
24837@@ -771,20 +771,20 @@ static int qdi_port(struct platform_devi
24838 return 0;
24839 }
24840
24841-static struct ata_port_operations qdi6500_port_ops = {
24842+static const struct ata_port_operations qdi6500_port_ops = {
24843 .inherits = &legacy_base_port_ops,
24844 .set_piomode = qdi6500_set_piomode,
24845 .qc_issue = qdi_qc_issue,
24846 .sff_data_xfer = vlb32_data_xfer,
24847 };
24848
24849-static struct ata_port_operations qdi6580_port_ops = {
24850+static const struct ata_port_operations qdi6580_port_ops = {
24851 .inherits = &legacy_base_port_ops,
24852 .set_piomode = qdi6580_set_piomode,
24853 .sff_data_xfer = vlb32_data_xfer,
24854 };
24855
24856-static struct ata_port_operations qdi6580dp_port_ops = {
24857+static const struct ata_port_operations qdi6580dp_port_ops = {
24858 .inherits = &legacy_base_port_ops,
24859 .set_piomode = qdi6580dp_set_piomode,
24860 .sff_data_xfer = vlb32_data_xfer,
24861@@ -855,7 +855,7 @@ static int winbond_port(struct platform_
24862 return 0;
24863 }
24864
24865-static struct ata_port_operations winbond_port_ops = {
24866+static const struct ata_port_operations winbond_port_ops = {
24867 .inherits = &legacy_base_port_ops,
24868 .set_piomode = winbond_set_piomode,
24869 .sff_data_xfer = vlb32_data_xfer,
24870@@ -978,7 +978,7 @@ static __init int legacy_init_one(struct
24871 int pio_modes = controller->pio_mask;
24872 unsigned long io = probe->port;
24873 u32 mask = (1 << probe->slot);
24874- struct ata_port_operations *ops = controller->ops;
24875+ const struct ata_port_operations *ops = controller->ops;
24876 struct legacy_data *ld = &legacy_data[probe->slot];
24877 struct ata_host *host = NULL;
24878 struct ata_port *ap;
24879diff -urNp linux-2.6.32.42/drivers/ata/pata_marvell.c linux-2.6.32.42/drivers/ata/pata_marvell.c
24880--- linux-2.6.32.42/drivers/ata/pata_marvell.c 2011-03-27 14:31:47.000000000 -0400
24881+++ linux-2.6.32.42/drivers/ata/pata_marvell.c 2011-04-17 15:56:46.000000000 -0400
24882@@ -100,7 +100,7 @@ static struct scsi_host_template marvell
24883 ATA_BMDMA_SHT(DRV_NAME),
24884 };
24885
24886-static struct ata_port_operations marvell_ops = {
24887+static const struct ata_port_operations marvell_ops = {
24888 .inherits = &ata_bmdma_port_ops,
24889 .cable_detect = marvell_cable_detect,
24890 .prereset = marvell_pre_reset,
24891diff -urNp linux-2.6.32.42/drivers/ata/pata_mpc52xx.c linux-2.6.32.42/drivers/ata/pata_mpc52xx.c
24892--- linux-2.6.32.42/drivers/ata/pata_mpc52xx.c 2011-03-27 14:31:47.000000000 -0400
24893+++ linux-2.6.32.42/drivers/ata/pata_mpc52xx.c 2011-04-17 15:56:46.000000000 -0400
24894@@ -609,7 +609,7 @@ static struct scsi_host_template mpc52xx
24895 ATA_PIO_SHT(DRV_NAME),
24896 };
24897
24898-static struct ata_port_operations mpc52xx_ata_port_ops = {
24899+static const struct ata_port_operations mpc52xx_ata_port_ops = {
24900 .inherits = &ata_bmdma_port_ops,
24901 .sff_dev_select = mpc52xx_ata_dev_select,
24902 .set_piomode = mpc52xx_ata_set_piomode,
24903diff -urNp linux-2.6.32.42/drivers/ata/pata_mpiix.c linux-2.6.32.42/drivers/ata/pata_mpiix.c
24904--- linux-2.6.32.42/drivers/ata/pata_mpiix.c 2011-03-27 14:31:47.000000000 -0400
24905+++ linux-2.6.32.42/drivers/ata/pata_mpiix.c 2011-04-17 15:56:46.000000000 -0400
24906@@ -140,7 +140,7 @@ static struct scsi_host_template mpiix_s
24907 ATA_PIO_SHT(DRV_NAME),
24908 };
24909
24910-static struct ata_port_operations mpiix_port_ops = {
24911+static const struct ata_port_operations mpiix_port_ops = {
24912 .inherits = &ata_sff_port_ops,
24913 .qc_issue = mpiix_qc_issue,
24914 .cable_detect = ata_cable_40wire,
24915diff -urNp linux-2.6.32.42/drivers/ata/pata_netcell.c linux-2.6.32.42/drivers/ata/pata_netcell.c
24916--- linux-2.6.32.42/drivers/ata/pata_netcell.c 2011-03-27 14:31:47.000000000 -0400
24917+++ linux-2.6.32.42/drivers/ata/pata_netcell.c 2011-04-17 15:56:46.000000000 -0400
24918@@ -34,7 +34,7 @@ static struct scsi_host_template netcell
24919 ATA_BMDMA_SHT(DRV_NAME),
24920 };
24921
24922-static struct ata_port_operations netcell_ops = {
24923+static const struct ata_port_operations netcell_ops = {
24924 .inherits = &ata_bmdma_port_ops,
24925 .cable_detect = ata_cable_80wire,
24926 .read_id = netcell_read_id,
24927diff -urNp linux-2.6.32.42/drivers/ata/pata_ninja32.c linux-2.6.32.42/drivers/ata/pata_ninja32.c
24928--- linux-2.6.32.42/drivers/ata/pata_ninja32.c 2011-03-27 14:31:47.000000000 -0400
24929+++ linux-2.6.32.42/drivers/ata/pata_ninja32.c 2011-04-17 15:56:46.000000000 -0400
24930@@ -81,7 +81,7 @@ static struct scsi_host_template ninja32
24931 ATA_BMDMA_SHT(DRV_NAME),
24932 };
24933
24934-static struct ata_port_operations ninja32_port_ops = {
24935+static const struct ata_port_operations ninja32_port_ops = {
24936 .inherits = &ata_bmdma_port_ops,
24937 .sff_dev_select = ninja32_dev_select,
24938 .cable_detect = ata_cable_40wire,
24939diff -urNp linux-2.6.32.42/drivers/ata/pata_ns87410.c linux-2.6.32.42/drivers/ata/pata_ns87410.c
24940--- linux-2.6.32.42/drivers/ata/pata_ns87410.c 2011-03-27 14:31:47.000000000 -0400
24941+++ linux-2.6.32.42/drivers/ata/pata_ns87410.c 2011-04-17 15:56:46.000000000 -0400
24942@@ -132,7 +132,7 @@ static struct scsi_host_template ns87410
24943 ATA_PIO_SHT(DRV_NAME),
24944 };
24945
24946-static struct ata_port_operations ns87410_port_ops = {
24947+static const struct ata_port_operations ns87410_port_ops = {
24948 .inherits = &ata_sff_port_ops,
24949 .qc_issue = ns87410_qc_issue,
24950 .cable_detect = ata_cable_40wire,
24951diff -urNp linux-2.6.32.42/drivers/ata/pata_ns87415.c linux-2.6.32.42/drivers/ata/pata_ns87415.c
24952--- linux-2.6.32.42/drivers/ata/pata_ns87415.c 2011-03-27 14:31:47.000000000 -0400
24953+++ linux-2.6.32.42/drivers/ata/pata_ns87415.c 2011-04-17 15:56:46.000000000 -0400
24954@@ -299,7 +299,7 @@ static u8 ns87560_bmdma_status(struct at
24955 }
24956 #endif /* 87560 SuperIO Support */
24957
24958-static struct ata_port_operations ns87415_pata_ops = {
24959+static const struct ata_port_operations ns87415_pata_ops = {
24960 .inherits = &ata_bmdma_port_ops,
24961
24962 .check_atapi_dma = ns87415_check_atapi_dma,
24963@@ -313,7 +313,7 @@ static struct ata_port_operations ns8741
24964 };
24965
24966 #if defined(CONFIG_SUPERIO)
24967-static struct ata_port_operations ns87560_pata_ops = {
24968+static const struct ata_port_operations ns87560_pata_ops = {
24969 .inherits = &ns87415_pata_ops,
24970 .sff_tf_read = ns87560_tf_read,
24971 .sff_check_status = ns87560_check_status,
24972diff -urNp linux-2.6.32.42/drivers/ata/pata_octeon_cf.c linux-2.6.32.42/drivers/ata/pata_octeon_cf.c
24973--- linux-2.6.32.42/drivers/ata/pata_octeon_cf.c 2011-03-27 14:31:47.000000000 -0400
24974+++ linux-2.6.32.42/drivers/ata/pata_octeon_cf.c 2011-04-17 15:56:46.000000000 -0400
24975@@ -801,6 +801,7 @@ static unsigned int octeon_cf_qc_issue(s
24976 return 0;
24977 }
24978
24979+/* cannot be const */
24980 static struct ata_port_operations octeon_cf_ops = {
24981 .inherits = &ata_sff_port_ops,
24982 .check_atapi_dma = octeon_cf_check_atapi_dma,
24983diff -urNp linux-2.6.32.42/drivers/ata/pata_oldpiix.c linux-2.6.32.42/drivers/ata/pata_oldpiix.c
24984--- linux-2.6.32.42/drivers/ata/pata_oldpiix.c 2011-03-27 14:31:47.000000000 -0400
24985+++ linux-2.6.32.42/drivers/ata/pata_oldpiix.c 2011-04-17 15:56:46.000000000 -0400
24986@@ -208,7 +208,7 @@ static struct scsi_host_template oldpiix
24987 ATA_BMDMA_SHT(DRV_NAME),
24988 };
24989
24990-static struct ata_port_operations oldpiix_pata_ops = {
24991+static const struct ata_port_operations oldpiix_pata_ops = {
24992 .inherits = &ata_bmdma_port_ops,
24993 .qc_issue = oldpiix_qc_issue,
24994 .cable_detect = ata_cable_40wire,
24995diff -urNp linux-2.6.32.42/drivers/ata/pata_opti.c linux-2.6.32.42/drivers/ata/pata_opti.c
24996--- linux-2.6.32.42/drivers/ata/pata_opti.c 2011-03-27 14:31:47.000000000 -0400
24997+++ linux-2.6.32.42/drivers/ata/pata_opti.c 2011-04-17 15:56:46.000000000 -0400
24998@@ -152,7 +152,7 @@ static struct scsi_host_template opti_sh
24999 ATA_PIO_SHT(DRV_NAME),
25000 };
25001
25002-static struct ata_port_operations opti_port_ops = {
25003+static const struct ata_port_operations opti_port_ops = {
25004 .inherits = &ata_sff_port_ops,
25005 .cable_detect = ata_cable_40wire,
25006 .set_piomode = opti_set_piomode,
25007diff -urNp linux-2.6.32.42/drivers/ata/pata_optidma.c linux-2.6.32.42/drivers/ata/pata_optidma.c
25008--- linux-2.6.32.42/drivers/ata/pata_optidma.c 2011-03-27 14:31:47.000000000 -0400
25009+++ linux-2.6.32.42/drivers/ata/pata_optidma.c 2011-04-17 15:56:46.000000000 -0400
25010@@ -337,7 +337,7 @@ static struct scsi_host_template optidma
25011 ATA_BMDMA_SHT(DRV_NAME),
25012 };
25013
25014-static struct ata_port_operations optidma_port_ops = {
25015+static const struct ata_port_operations optidma_port_ops = {
25016 .inherits = &ata_bmdma_port_ops,
25017 .cable_detect = ata_cable_40wire,
25018 .set_piomode = optidma_set_pio_mode,
25019@@ -346,7 +346,7 @@ static struct ata_port_operations optidm
25020 .prereset = optidma_pre_reset,
25021 };
25022
25023-static struct ata_port_operations optiplus_port_ops = {
25024+static const struct ata_port_operations optiplus_port_ops = {
25025 .inherits = &optidma_port_ops,
25026 .set_piomode = optiplus_set_pio_mode,
25027 .set_dmamode = optiplus_set_dma_mode,
25028diff -urNp linux-2.6.32.42/drivers/ata/pata_palmld.c linux-2.6.32.42/drivers/ata/pata_palmld.c
25029--- linux-2.6.32.42/drivers/ata/pata_palmld.c 2011-03-27 14:31:47.000000000 -0400
25030+++ linux-2.6.32.42/drivers/ata/pata_palmld.c 2011-04-17 15:56:46.000000000 -0400
25031@@ -37,7 +37,7 @@ static struct scsi_host_template palmld_
25032 ATA_PIO_SHT(DRV_NAME),
25033 };
25034
25035-static struct ata_port_operations palmld_port_ops = {
25036+static const struct ata_port_operations palmld_port_ops = {
25037 .inherits = &ata_sff_port_ops,
25038 .sff_data_xfer = ata_sff_data_xfer_noirq,
25039 .cable_detect = ata_cable_40wire,
25040diff -urNp linux-2.6.32.42/drivers/ata/pata_pcmcia.c linux-2.6.32.42/drivers/ata/pata_pcmcia.c
25041--- linux-2.6.32.42/drivers/ata/pata_pcmcia.c 2011-03-27 14:31:47.000000000 -0400
25042+++ linux-2.6.32.42/drivers/ata/pata_pcmcia.c 2011-04-17 15:56:46.000000000 -0400
25043@@ -162,14 +162,14 @@ static struct scsi_host_template pcmcia_
25044 ATA_PIO_SHT(DRV_NAME),
25045 };
25046
25047-static struct ata_port_operations pcmcia_port_ops = {
25048+static const struct ata_port_operations pcmcia_port_ops = {
25049 .inherits = &ata_sff_port_ops,
25050 .sff_data_xfer = ata_sff_data_xfer_noirq,
25051 .cable_detect = ata_cable_40wire,
25052 .set_mode = pcmcia_set_mode,
25053 };
25054
25055-static struct ata_port_operations pcmcia_8bit_port_ops = {
25056+static const struct ata_port_operations pcmcia_8bit_port_ops = {
25057 .inherits = &ata_sff_port_ops,
25058 .sff_data_xfer = ata_data_xfer_8bit,
25059 .cable_detect = ata_cable_40wire,
25060@@ -256,7 +256,7 @@ static int pcmcia_init_one(struct pcmcia
25061 unsigned long io_base, ctl_base;
25062 void __iomem *io_addr, *ctl_addr;
25063 int n_ports = 1;
25064- struct ata_port_operations *ops = &pcmcia_port_ops;
25065+ const struct ata_port_operations *ops = &pcmcia_port_ops;
25066
25067 info = kzalloc(sizeof(*info), GFP_KERNEL);
25068 if (info == NULL)
25069diff -urNp linux-2.6.32.42/drivers/ata/pata_pdc2027x.c linux-2.6.32.42/drivers/ata/pata_pdc2027x.c
25070--- linux-2.6.32.42/drivers/ata/pata_pdc2027x.c 2011-03-27 14:31:47.000000000 -0400
25071+++ linux-2.6.32.42/drivers/ata/pata_pdc2027x.c 2011-04-17 15:56:46.000000000 -0400
25072@@ -132,14 +132,14 @@ static struct scsi_host_template pdc2027
25073 ATA_BMDMA_SHT(DRV_NAME),
25074 };
25075
25076-static struct ata_port_operations pdc2027x_pata100_ops = {
25077+static const struct ata_port_operations pdc2027x_pata100_ops = {
25078 .inherits = &ata_bmdma_port_ops,
25079 .check_atapi_dma = pdc2027x_check_atapi_dma,
25080 .cable_detect = pdc2027x_cable_detect,
25081 .prereset = pdc2027x_prereset,
25082 };
25083
25084-static struct ata_port_operations pdc2027x_pata133_ops = {
25085+static const struct ata_port_operations pdc2027x_pata133_ops = {
25086 .inherits = &pdc2027x_pata100_ops,
25087 .mode_filter = pdc2027x_mode_filter,
25088 .set_piomode = pdc2027x_set_piomode,
25089diff -urNp linux-2.6.32.42/drivers/ata/pata_pdc202xx_old.c linux-2.6.32.42/drivers/ata/pata_pdc202xx_old.c
25090--- linux-2.6.32.42/drivers/ata/pata_pdc202xx_old.c 2011-03-27 14:31:47.000000000 -0400
25091+++ linux-2.6.32.42/drivers/ata/pata_pdc202xx_old.c 2011-04-17 15:56:46.000000000 -0400
25092@@ -274,7 +274,7 @@ static struct scsi_host_template pdc202x
25093 ATA_BMDMA_SHT(DRV_NAME),
25094 };
25095
25096-static struct ata_port_operations pdc2024x_port_ops = {
25097+static const struct ata_port_operations pdc2024x_port_ops = {
25098 .inherits = &ata_bmdma_port_ops,
25099
25100 .cable_detect = ata_cable_40wire,
25101@@ -284,7 +284,7 @@ static struct ata_port_operations pdc202
25102 .sff_exec_command = pdc202xx_exec_command,
25103 };
25104
25105-static struct ata_port_operations pdc2026x_port_ops = {
25106+static const struct ata_port_operations pdc2026x_port_ops = {
25107 .inherits = &pdc2024x_port_ops,
25108
25109 .check_atapi_dma = pdc2026x_check_atapi_dma,
25110diff -urNp linux-2.6.32.42/drivers/ata/pata_platform.c linux-2.6.32.42/drivers/ata/pata_platform.c
25111--- linux-2.6.32.42/drivers/ata/pata_platform.c 2011-03-27 14:31:47.000000000 -0400
25112+++ linux-2.6.32.42/drivers/ata/pata_platform.c 2011-04-17 15:56:46.000000000 -0400
25113@@ -48,7 +48,7 @@ static struct scsi_host_template pata_pl
25114 ATA_PIO_SHT(DRV_NAME),
25115 };
25116
25117-static struct ata_port_operations pata_platform_port_ops = {
25118+static const struct ata_port_operations pata_platform_port_ops = {
25119 .inherits = &ata_sff_port_ops,
25120 .sff_data_xfer = ata_sff_data_xfer_noirq,
25121 .cable_detect = ata_cable_unknown,
25122diff -urNp linux-2.6.32.42/drivers/ata/pata_qdi.c linux-2.6.32.42/drivers/ata/pata_qdi.c
25123--- linux-2.6.32.42/drivers/ata/pata_qdi.c 2011-03-27 14:31:47.000000000 -0400
25124+++ linux-2.6.32.42/drivers/ata/pata_qdi.c 2011-04-17 15:56:46.000000000 -0400
25125@@ -157,7 +157,7 @@ static struct scsi_host_template qdi_sht
25126 ATA_PIO_SHT(DRV_NAME),
25127 };
25128
25129-static struct ata_port_operations qdi6500_port_ops = {
25130+static const struct ata_port_operations qdi6500_port_ops = {
25131 .inherits = &ata_sff_port_ops,
25132 .qc_issue = qdi_qc_issue,
25133 .sff_data_xfer = qdi_data_xfer,
25134@@ -165,7 +165,7 @@ static struct ata_port_operations qdi650
25135 .set_piomode = qdi6500_set_piomode,
25136 };
25137
25138-static struct ata_port_operations qdi6580_port_ops = {
25139+static const struct ata_port_operations qdi6580_port_ops = {
25140 .inherits = &qdi6500_port_ops,
25141 .set_piomode = qdi6580_set_piomode,
25142 };
25143diff -urNp linux-2.6.32.42/drivers/ata/pata_radisys.c linux-2.6.32.42/drivers/ata/pata_radisys.c
25144--- linux-2.6.32.42/drivers/ata/pata_radisys.c 2011-03-27 14:31:47.000000000 -0400
25145+++ linux-2.6.32.42/drivers/ata/pata_radisys.c 2011-04-17 15:56:46.000000000 -0400
25146@@ -187,7 +187,7 @@ static struct scsi_host_template radisys
25147 ATA_BMDMA_SHT(DRV_NAME),
25148 };
25149
25150-static struct ata_port_operations radisys_pata_ops = {
25151+static const struct ata_port_operations radisys_pata_ops = {
25152 .inherits = &ata_bmdma_port_ops,
25153 .qc_issue = radisys_qc_issue,
25154 .cable_detect = ata_cable_unknown,
25155diff -urNp linux-2.6.32.42/drivers/ata/pata_rb532_cf.c linux-2.6.32.42/drivers/ata/pata_rb532_cf.c
25156--- linux-2.6.32.42/drivers/ata/pata_rb532_cf.c 2011-03-27 14:31:47.000000000 -0400
25157+++ linux-2.6.32.42/drivers/ata/pata_rb532_cf.c 2011-04-17 15:56:46.000000000 -0400
25158@@ -68,7 +68,7 @@ static irqreturn_t rb532_pata_irq_handle
25159 return IRQ_HANDLED;
25160 }
25161
25162-static struct ata_port_operations rb532_pata_port_ops = {
25163+static const struct ata_port_operations rb532_pata_port_ops = {
25164 .inherits = &ata_sff_port_ops,
25165 .sff_data_xfer = ata_sff_data_xfer32,
25166 };
25167diff -urNp linux-2.6.32.42/drivers/ata/pata_rdc.c linux-2.6.32.42/drivers/ata/pata_rdc.c
25168--- linux-2.6.32.42/drivers/ata/pata_rdc.c 2011-03-27 14:31:47.000000000 -0400
25169+++ linux-2.6.32.42/drivers/ata/pata_rdc.c 2011-04-17 15:56:46.000000000 -0400
25170@@ -272,7 +272,7 @@ static void rdc_set_dmamode(struct ata_p
25171 pci_write_config_byte(dev, 0x48, udma_enable);
25172 }
25173
25174-static struct ata_port_operations rdc_pata_ops = {
25175+static const struct ata_port_operations rdc_pata_ops = {
25176 .inherits = &ata_bmdma32_port_ops,
25177 .cable_detect = rdc_pata_cable_detect,
25178 .set_piomode = rdc_set_piomode,
25179diff -urNp linux-2.6.32.42/drivers/ata/pata_rz1000.c linux-2.6.32.42/drivers/ata/pata_rz1000.c
25180--- linux-2.6.32.42/drivers/ata/pata_rz1000.c 2011-03-27 14:31:47.000000000 -0400
25181+++ linux-2.6.32.42/drivers/ata/pata_rz1000.c 2011-04-17 15:56:46.000000000 -0400
25182@@ -54,7 +54,7 @@ static struct scsi_host_template rz1000_
25183 ATA_PIO_SHT(DRV_NAME),
25184 };
25185
25186-static struct ata_port_operations rz1000_port_ops = {
25187+static const struct ata_port_operations rz1000_port_ops = {
25188 .inherits = &ata_sff_port_ops,
25189 .cable_detect = ata_cable_40wire,
25190 .set_mode = rz1000_set_mode,
25191diff -urNp linux-2.6.32.42/drivers/ata/pata_sc1200.c linux-2.6.32.42/drivers/ata/pata_sc1200.c
25192--- linux-2.6.32.42/drivers/ata/pata_sc1200.c 2011-03-27 14:31:47.000000000 -0400
25193+++ linux-2.6.32.42/drivers/ata/pata_sc1200.c 2011-04-17 15:56:46.000000000 -0400
25194@@ -207,7 +207,7 @@ static struct scsi_host_template sc1200_
25195 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
25196 };
25197
25198-static struct ata_port_operations sc1200_port_ops = {
25199+static const struct ata_port_operations sc1200_port_ops = {
25200 .inherits = &ata_bmdma_port_ops,
25201 .qc_prep = ata_sff_dumb_qc_prep,
25202 .qc_issue = sc1200_qc_issue,
25203diff -urNp linux-2.6.32.42/drivers/ata/pata_scc.c linux-2.6.32.42/drivers/ata/pata_scc.c
25204--- linux-2.6.32.42/drivers/ata/pata_scc.c 2011-03-27 14:31:47.000000000 -0400
25205+++ linux-2.6.32.42/drivers/ata/pata_scc.c 2011-04-17 15:56:46.000000000 -0400
25206@@ -965,7 +965,7 @@ static struct scsi_host_template scc_sht
25207 ATA_BMDMA_SHT(DRV_NAME),
25208 };
25209
25210-static struct ata_port_operations scc_pata_ops = {
25211+static const struct ata_port_operations scc_pata_ops = {
25212 .inherits = &ata_bmdma_port_ops,
25213
25214 .set_piomode = scc_set_piomode,
25215diff -urNp linux-2.6.32.42/drivers/ata/pata_sch.c linux-2.6.32.42/drivers/ata/pata_sch.c
25216--- linux-2.6.32.42/drivers/ata/pata_sch.c 2011-03-27 14:31:47.000000000 -0400
25217+++ linux-2.6.32.42/drivers/ata/pata_sch.c 2011-04-17 15:56:46.000000000 -0400
25218@@ -75,7 +75,7 @@ static struct scsi_host_template sch_sht
25219 ATA_BMDMA_SHT(DRV_NAME),
25220 };
25221
25222-static struct ata_port_operations sch_pata_ops = {
25223+static const struct ata_port_operations sch_pata_ops = {
25224 .inherits = &ata_bmdma_port_ops,
25225 .cable_detect = ata_cable_unknown,
25226 .set_piomode = sch_set_piomode,
25227diff -urNp linux-2.6.32.42/drivers/ata/pata_serverworks.c linux-2.6.32.42/drivers/ata/pata_serverworks.c
25228--- linux-2.6.32.42/drivers/ata/pata_serverworks.c 2011-03-27 14:31:47.000000000 -0400
25229+++ linux-2.6.32.42/drivers/ata/pata_serverworks.c 2011-04-17 15:56:46.000000000 -0400
25230@@ -299,7 +299,7 @@ static struct scsi_host_template serverw
25231 ATA_BMDMA_SHT(DRV_NAME),
25232 };
25233
25234-static struct ata_port_operations serverworks_osb4_port_ops = {
25235+static const struct ata_port_operations serverworks_osb4_port_ops = {
25236 .inherits = &ata_bmdma_port_ops,
25237 .cable_detect = serverworks_cable_detect,
25238 .mode_filter = serverworks_osb4_filter,
25239@@ -307,7 +307,7 @@ static struct ata_port_operations server
25240 .set_dmamode = serverworks_set_dmamode,
25241 };
25242
25243-static struct ata_port_operations serverworks_csb_port_ops = {
25244+static const struct ata_port_operations serverworks_csb_port_ops = {
25245 .inherits = &serverworks_osb4_port_ops,
25246 .mode_filter = serverworks_csb_filter,
25247 };
25248diff -urNp linux-2.6.32.42/drivers/ata/pata_sil680.c linux-2.6.32.42/drivers/ata/pata_sil680.c
25249--- linux-2.6.32.42/drivers/ata/pata_sil680.c 2011-06-25 12:55:34.000000000 -0400
25250+++ linux-2.6.32.42/drivers/ata/pata_sil680.c 2011-06-25 12:56:37.000000000 -0400
25251@@ -194,7 +194,7 @@ static struct scsi_host_template sil680_
25252 ATA_BMDMA_SHT(DRV_NAME),
25253 };
25254
25255-static struct ata_port_operations sil680_port_ops = {
25256+static const struct ata_port_operations sil680_port_ops = {
25257 .inherits = &ata_bmdma32_port_ops,
25258 .cable_detect = sil680_cable_detect,
25259 .set_piomode = sil680_set_piomode,
25260diff -urNp linux-2.6.32.42/drivers/ata/pata_sis.c linux-2.6.32.42/drivers/ata/pata_sis.c
25261--- linux-2.6.32.42/drivers/ata/pata_sis.c 2011-03-27 14:31:47.000000000 -0400
25262+++ linux-2.6.32.42/drivers/ata/pata_sis.c 2011-04-17 15:56:46.000000000 -0400
25263@@ -503,47 +503,47 @@ static struct scsi_host_template sis_sht
25264 ATA_BMDMA_SHT(DRV_NAME),
25265 };
25266
25267-static struct ata_port_operations sis_133_for_sata_ops = {
25268+static const struct ata_port_operations sis_133_for_sata_ops = {
25269 .inherits = &ata_bmdma_port_ops,
25270 .set_piomode = sis_133_set_piomode,
25271 .set_dmamode = sis_133_set_dmamode,
25272 .cable_detect = sis_133_cable_detect,
25273 };
25274
25275-static struct ata_port_operations sis_base_ops = {
25276+static const struct ata_port_operations sis_base_ops = {
25277 .inherits = &ata_bmdma_port_ops,
25278 .prereset = sis_pre_reset,
25279 };
25280
25281-static struct ata_port_operations sis_133_ops = {
25282+static const struct ata_port_operations sis_133_ops = {
25283 .inherits = &sis_base_ops,
25284 .set_piomode = sis_133_set_piomode,
25285 .set_dmamode = sis_133_set_dmamode,
25286 .cable_detect = sis_133_cable_detect,
25287 };
25288
25289-static struct ata_port_operations sis_133_early_ops = {
25290+static const struct ata_port_operations sis_133_early_ops = {
25291 .inherits = &sis_base_ops,
25292 .set_piomode = sis_100_set_piomode,
25293 .set_dmamode = sis_133_early_set_dmamode,
25294 .cable_detect = sis_66_cable_detect,
25295 };
25296
25297-static struct ata_port_operations sis_100_ops = {
25298+static const struct ata_port_operations sis_100_ops = {
25299 .inherits = &sis_base_ops,
25300 .set_piomode = sis_100_set_piomode,
25301 .set_dmamode = sis_100_set_dmamode,
25302 .cable_detect = sis_66_cable_detect,
25303 };
25304
25305-static struct ata_port_operations sis_66_ops = {
25306+static const struct ata_port_operations sis_66_ops = {
25307 .inherits = &sis_base_ops,
25308 .set_piomode = sis_old_set_piomode,
25309 .set_dmamode = sis_66_set_dmamode,
25310 .cable_detect = sis_66_cable_detect,
25311 };
25312
25313-static struct ata_port_operations sis_old_ops = {
25314+static const struct ata_port_operations sis_old_ops = {
25315 .inherits = &sis_base_ops,
25316 .set_piomode = sis_old_set_piomode,
25317 .set_dmamode = sis_old_set_dmamode,
25318diff -urNp linux-2.6.32.42/drivers/ata/pata_sl82c105.c linux-2.6.32.42/drivers/ata/pata_sl82c105.c
25319--- linux-2.6.32.42/drivers/ata/pata_sl82c105.c 2011-03-27 14:31:47.000000000 -0400
25320+++ linux-2.6.32.42/drivers/ata/pata_sl82c105.c 2011-04-17 15:56:46.000000000 -0400
25321@@ -231,7 +231,7 @@ static struct scsi_host_template sl82c10
25322 ATA_BMDMA_SHT(DRV_NAME),
25323 };
25324
25325-static struct ata_port_operations sl82c105_port_ops = {
25326+static const struct ata_port_operations sl82c105_port_ops = {
25327 .inherits = &ata_bmdma_port_ops,
25328 .qc_defer = sl82c105_qc_defer,
25329 .bmdma_start = sl82c105_bmdma_start,
25330diff -urNp linux-2.6.32.42/drivers/ata/pata_triflex.c linux-2.6.32.42/drivers/ata/pata_triflex.c
25331--- linux-2.6.32.42/drivers/ata/pata_triflex.c 2011-03-27 14:31:47.000000000 -0400
25332+++ linux-2.6.32.42/drivers/ata/pata_triflex.c 2011-04-17 15:56:46.000000000 -0400
25333@@ -178,7 +178,7 @@ static struct scsi_host_template triflex
25334 ATA_BMDMA_SHT(DRV_NAME),
25335 };
25336
25337-static struct ata_port_operations triflex_port_ops = {
25338+static const struct ata_port_operations triflex_port_ops = {
25339 .inherits = &ata_bmdma_port_ops,
25340 .bmdma_start = triflex_bmdma_start,
25341 .bmdma_stop = triflex_bmdma_stop,
25342diff -urNp linux-2.6.32.42/drivers/ata/pata_via.c linux-2.6.32.42/drivers/ata/pata_via.c
25343--- linux-2.6.32.42/drivers/ata/pata_via.c 2011-03-27 14:31:47.000000000 -0400
25344+++ linux-2.6.32.42/drivers/ata/pata_via.c 2011-04-17 15:56:46.000000000 -0400
25345@@ -419,7 +419,7 @@ static struct scsi_host_template via_sht
25346 ATA_BMDMA_SHT(DRV_NAME),
25347 };
25348
25349-static struct ata_port_operations via_port_ops = {
25350+static const struct ata_port_operations via_port_ops = {
25351 .inherits = &ata_bmdma_port_ops,
25352 .cable_detect = via_cable_detect,
25353 .set_piomode = via_set_piomode,
25354@@ -429,7 +429,7 @@ static struct ata_port_operations via_po
25355 .port_start = via_port_start,
25356 };
25357
25358-static struct ata_port_operations via_port_ops_noirq = {
25359+static const struct ata_port_operations via_port_ops_noirq = {
25360 .inherits = &via_port_ops,
25361 .sff_data_xfer = ata_sff_data_xfer_noirq,
25362 };
25363diff -urNp linux-2.6.32.42/drivers/ata/pata_winbond.c linux-2.6.32.42/drivers/ata/pata_winbond.c
25364--- linux-2.6.32.42/drivers/ata/pata_winbond.c 2011-03-27 14:31:47.000000000 -0400
25365+++ linux-2.6.32.42/drivers/ata/pata_winbond.c 2011-04-17 15:56:46.000000000 -0400
25366@@ -125,7 +125,7 @@ static struct scsi_host_template winbond
25367 ATA_PIO_SHT(DRV_NAME),
25368 };
25369
25370-static struct ata_port_operations winbond_port_ops = {
25371+static const struct ata_port_operations winbond_port_ops = {
25372 .inherits = &ata_sff_port_ops,
25373 .sff_data_xfer = winbond_data_xfer,
25374 .cable_detect = ata_cable_40wire,
25375diff -urNp linux-2.6.32.42/drivers/ata/pdc_adma.c linux-2.6.32.42/drivers/ata/pdc_adma.c
25376--- linux-2.6.32.42/drivers/ata/pdc_adma.c 2011-03-27 14:31:47.000000000 -0400
25377+++ linux-2.6.32.42/drivers/ata/pdc_adma.c 2011-04-17 15:56:46.000000000 -0400
25378@@ -145,7 +145,7 @@ static struct scsi_host_template adma_at
25379 .dma_boundary = ADMA_DMA_BOUNDARY,
25380 };
25381
25382-static struct ata_port_operations adma_ata_ops = {
25383+static const struct ata_port_operations adma_ata_ops = {
25384 .inherits = &ata_sff_port_ops,
25385
25386 .lost_interrupt = ATA_OP_NULL,
25387diff -urNp linux-2.6.32.42/drivers/ata/sata_fsl.c linux-2.6.32.42/drivers/ata/sata_fsl.c
25388--- linux-2.6.32.42/drivers/ata/sata_fsl.c 2011-03-27 14:31:47.000000000 -0400
25389+++ linux-2.6.32.42/drivers/ata/sata_fsl.c 2011-04-17 15:56:46.000000000 -0400
25390@@ -1258,7 +1258,7 @@ static struct scsi_host_template sata_fs
25391 .dma_boundary = ATA_DMA_BOUNDARY,
25392 };
25393
25394-static struct ata_port_operations sata_fsl_ops = {
25395+static const struct ata_port_operations sata_fsl_ops = {
25396 .inherits = &sata_pmp_port_ops,
25397
25398 .qc_defer = ata_std_qc_defer,
25399diff -urNp linux-2.6.32.42/drivers/ata/sata_inic162x.c linux-2.6.32.42/drivers/ata/sata_inic162x.c
25400--- linux-2.6.32.42/drivers/ata/sata_inic162x.c 2011-03-27 14:31:47.000000000 -0400
25401+++ linux-2.6.32.42/drivers/ata/sata_inic162x.c 2011-04-17 15:56:46.000000000 -0400
25402@@ -721,7 +721,7 @@ static int inic_port_start(struct ata_po
25403 return 0;
25404 }
25405
25406-static struct ata_port_operations inic_port_ops = {
25407+static const struct ata_port_operations inic_port_ops = {
25408 .inherits = &sata_port_ops,
25409
25410 .check_atapi_dma = inic_check_atapi_dma,
25411diff -urNp linux-2.6.32.42/drivers/ata/sata_mv.c linux-2.6.32.42/drivers/ata/sata_mv.c
25412--- linux-2.6.32.42/drivers/ata/sata_mv.c 2011-03-27 14:31:47.000000000 -0400
25413+++ linux-2.6.32.42/drivers/ata/sata_mv.c 2011-04-17 15:56:46.000000000 -0400
25414@@ -656,7 +656,7 @@ static struct scsi_host_template mv6_sht
25415 .dma_boundary = MV_DMA_BOUNDARY,
25416 };
25417
25418-static struct ata_port_operations mv5_ops = {
25419+static const struct ata_port_operations mv5_ops = {
25420 .inherits = &ata_sff_port_ops,
25421
25422 .lost_interrupt = ATA_OP_NULL,
25423@@ -678,7 +678,7 @@ static struct ata_port_operations mv5_op
25424 .port_stop = mv_port_stop,
25425 };
25426
25427-static struct ata_port_operations mv6_ops = {
25428+static const struct ata_port_operations mv6_ops = {
25429 .inherits = &mv5_ops,
25430 .dev_config = mv6_dev_config,
25431 .scr_read = mv_scr_read,
25432@@ -698,7 +698,7 @@ static struct ata_port_operations mv6_op
25433 .bmdma_status = mv_bmdma_status,
25434 };
25435
25436-static struct ata_port_operations mv_iie_ops = {
25437+static const struct ata_port_operations mv_iie_ops = {
25438 .inherits = &mv6_ops,
25439 .dev_config = ATA_OP_NULL,
25440 .qc_prep = mv_qc_prep_iie,
25441diff -urNp linux-2.6.32.42/drivers/ata/sata_nv.c linux-2.6.32.42/drivers/ata/sata_nv.c
25442--- linux-2.6.32.42/drivers/ata/sata_nv.c 2011-03-27 14:31:47.000000000 -0400
25443+++ linux-2.6.32.42/drivers/ata/sata_nv.c 2011-04-17 15:56:46.000000000 -0400
25444@@ -464,7 +464,7 @@ static struct scsi_host_template nv_swnc
25445 * cases. Define nv_hardreset() which only kicks in for post-boot
25446 * probing and use it for all variants.
25447 */
25448-static struct ata_port_operations nv_generic_ops = {
25449+static const struct ata_port_operations nv_generic_ops = {
25450 .inherits = &ata_bmdma_port_ops,
25451 .lost_interrupt = ATA_OP_NULL,
25452 .scr_read = nv_scr_read,
25453@@ -472,20 +472,20 @@ static struct ata_port_operations nv_gen
25454 .hardreset = nv_hardreset,
25455 };
25456
25457-static struct ata_port_operations nv_nf2_ops = {
25458+static const struct ata_port_operations nv_nf2_ops = {
25459 .inherits = &nv_generic_ops,
25460 .freeze = nv_nf2_freeze,
25461 .thaw = nv_nf2_thaw,
25462 };
25463
25464-static struct ata_port_operations nv_ck804_ops = {
25465+static const struct ata_port_operations nv_ck804_ops = {
25466 .inherits = &nv_generic_ops,
25467 .freeze = nv_ck804_freeze,
25468 .thaw = nv_ck804_thaw,
25469 .host_stop = nv_ck804_host_stop,
25470 };
25471
25472-static struct ata_port_operations nv_adma_ops = {
25473+static const struct ata_port_operations nv_adma_ops = {
25474 .inherits = &nv_ck804_ops,
25475
25476 .check_atapi_dma = nv_adma_check_atapi_dma,
25477@@ -509,7 +509,7 @@ static struct ata_port_operations nv_adm
25478 .host_stop = nv_adma_host_stop,
25479 };
25480
25481-static struct ata_port_operations nv_swncq_ops = {
25482+static const struct ata_port_operations nv_swncq_ops = {
25483 .inherits = &nv_generic_ops,
25484
25485 .qc_defer = ata_std_qc_defer,
25486diff -urNp linux-2.6.32.42/drivers/ata/sata_promise.c linux-2.6.32.42/drivers/ata/sata_promise.c
25487--- linux-2.6.32.42/drivers/ata/sata_promise.c 2011-03-27 14:31:47.000000000 -0400
25488+++ linux-2.6.32.42/drivers/ata/sata_promise.c 2011-04-17 15:56:46.000000000 -0400
25489@@ -195,7 +195,7 @@ static const struct ata_port_operations
25490 .error_handler = pdc_error_handler,
25491 };
25492
25493-static struct ata_port_operations pdc_sata_ops = {
25494+static const struct ata_port_operations pdc_sata_ops = {
25495 .inherits = &pdc_common_ops,
25496 .cable_detect = pdc_sata_cable_detect,
25497 .freeze = pdc_sata_freeze,
25498@@ -208,14 +208,14 @@ static struct ata_port_operations pdc_sa
25499
25500 /* First-generation chips need a more restrictive ->check_atapi_dma op,
25501 and ->freeze/thaw that ignore the hotplug controls. */
25502-static struct ata_port_operations pdc_old_sata_ops = {
25503+static const struct ata_port_operations pdc_old_sata_ops = {
25504 .inherits = &pdc_sata_ops,
25505 .freeze = pdc_freeze,
25506 .thaw = pdc_thaw,
25507 .check_atapi_dma = pdc_old_sata_check_atapi_dma,
25508 };
25509
25510-static struct ata_port_operations pdc_pata_ops = {
25511+static const struct ata_port_operations pdc_pata_ops = {
25512 .inherits = &pdc_common_ops,
25513 .cable_detect = pdc_pata_cable_detect,
25514 .freeze = pdc_freeze,
25515diff -urNp linux-2.6.32.42/drivers/ata/sata_qstor.c linux-2.6.32.42/drivers/ata/sata_qstor.c
25516--- linux-2.6.32.42/drivers/ata/sata_qstor.c 2011-03-27 14:31:47.000000000 -0400
25517+++ linux-2.6.32.42/drivers/ata/sata_qstor.c 2011-04-17 15:56:46.000000000 -0400
25518@@ -132,7 +132,7 @@ static struct scsi_host_template qs_ata_
25519 .dma_boundary = QS_DMA_BOUNDARY,
25520 };
25521
25522-static struct ata_port_operations qs_ata_ops = {
25523+static const struct ata_port_operations qs_ata_ops = {
25524 .inherits = &ata_sff_port_ops,
25525
25526 .check_atapi_dma = qs_check_atapi_dma,
25527diff -urNp linux-2.6.32.42/drivers/ata/sata_sil24.c linux-2.6.32.42/drivers/ata/sata_sil24.c
25528--- linux-2.6.32.42/drivers/ata/sata_sil24.c 2011-03-27 14:31:47.000000000 -0400
25529+++ linux-2.6.32.42/drivers/ata/sata_sil24.c 2011-04-17 15:56:46.000000000 -0400
25530@@ -388,7 +388,7 @@ static struct scsi_host_template sil24_s
25531 .dma_boundary = ATA_DMA_BOUNDARY,
25532 };
25533
25534-static struct ata_port_operations sil24_ops = {
25535+static const struct ata_port_operations sil24_ops = {
25536 .inherits = &sata_pmp_port_ops,
25537
25538 .qc_defer = sil24_qc_defer,
25539diff -urNp linux-2.6.32.42/drivers/ata/sata_sil.c linux-2.6.32.42/drivers/ata/sata_sil.c
25540--- linux-2.6.32.42/drivers/ata/sata_sil.c 2011-03-27 14:31:47.000000000 -0400
25541+++ linux-2.6.32.42/drivers/ata/sata_sil.c 2011-04-17 15:56:46.000000000 -0400
25542@@ -182,7 +182,7 @@ static struct scsi_host_template sil_sht
25543 .sg_tablesize = ATA_MAX_PRD
25544 };
25545
25546-static struct ata_port_operations sil_ops = {
25547+static const struct ata_port_operations sil_ops = {
25548 .inherits = &ata_bmdma32_port_ops,
25549 .dev_config = sil_dev_config,
25550 .set_mode = sil_set_mode,
25551diff -urNp linux-2.6.32.42/drivers/ata/sata_sis.c linux-2.6.32.42/drivers/ata/sata_sis.c
25552--- linux-2.6.32.42/drivers/ata/sata_sis.c 2011-03-27 14:31:47.000000000 -0400
25553+++ linux-2.6.32.42/drivers/ata/sata_sis.c 2011-04-17 15:56:46.000000000 -0400
25554@@ -89,7 +89,7 @@ static struct scsi_host_template sis_sht
25555 ATA_BMDMA_SHT(DRV_NAME),
25556 };
25557
25558-static struct ata_port_operations sis_ops = {
25559+static const struct ata_port_operations sis_ops = {
25560 .inherits = &ata_bmdma_port_ops,
25561 .scr_read = sis_scr_read,
25562 .scr_write = sis_scr_write,
25563diff -urNp linux-2.6.32.42/drivers/ata/sata_svw.c linux-2.6.32.42/drivers/ata/sata_svw.c
25564--- linux-2.6.32.42/drivers/ata/sata_svw.c 2011-03-27 14:31:47.000000000 -0400
25565+++ linux-2.6.32.42/drivers/ata/sata_svw.c 2011-04-17 15:56:46.000000000 -0400
25566@@ -344,7 +344,7 @@ static struct scsi_host_template k2_sata
25567 };
25568
25569
25570-static struct ata_port_operations k2_sata_ops = {
25571+static const struct ata_port_operations k2_sata_ops = {
25572 .inherits = &ata_bmdma_port_ops,
25573 .sff_tf_load = k2_sata_tf_load,
25574 .sff_tf_read = k2_sata_tf_read,
25575diff -urNp linux-2.6.32.42/drivers/ata/sata_sx4.c linux-2.6.32.42/drivers/ata/sata_sx4.c
25576--- linux-2.6.32.42/drivers/ata/sata_sx4.c 2011-03-27 14:31:47.000000000 -0400
25577+++ linux-2.6.32.42/drivers/ata/sata_sx4.c 2011-04-17 15:56:46.000000000 -0400
25578@@ -248,7 +248,7 @@ static struct scsi_host_template pdc_sat
25579 };
25580
25581 /* TODO: inherit from base port_ops after converting to new EH */
25582-static struct ata_port_operations pdc_20621_ops = {
25583+static const struct ata_port_operations pdc_20621_ops = {
25584 .inherits = &ata_sff_port_ops,
25585
25586 .check_atapi_dma = pdc_check_atapi_dma,
25587diff -urNp linux-2.6.32.42/drivers/ata/sata_uli.c linux-2.6.32.42/drivers/ata/sata_uli.c
25588--- linux-2.6.32.42/drivers/ata/sata_uli.c 2011-03-27 14:31:47.000000000 -0400
25589+++ linux-2.6.32.42/drivers/ata/sata_uli.c 2011-04-17 15:56:46.000000000 -0400
25590@@ -79,7 +79,7 @@ static struct scsi_host_template uli_sht
25591 ATA_BMDMA_SHT(DRV_NAME),
25592 };
25593
25594-static struct ata_port_operations uli_ops = {
25595+static const struct ata_port_operations uli_ops = {
25596 .inherits = &ata_bmdma_port_ops,
25597 .scr_read = uli_scr_read,
25598 .scr_write = uli_scr_write,
25599diff -urNp linux-2.6.32.42/drivers/ata/sata_via.c linux-2.6.32.42/drivers/ata/sata_via.c
25600--- linux-2.6.32.42/drivers/ata/sata_via.c 2011-05-10 22:12:01.000000000 -0400
25601+++ linux-2.6.32.42/drivers/ata/sata_via.c 2011-05-10 22:15:08.000000000 -0400
25602@@ -115,32 +115,32 @@ static struct scsi_host_template svia_sh
25603 ATA_BMDMA_SHT(DRV_NAME),
25604 };
25605
25606-static struct ata_port_operations svia_base_ops = {
25607+static const struct ata_port_operations svia_base_ops = {
25608 .inherits = &ata_bmdma_port_ops,
25609 .sff_tf_load = svia_tf_load,
25610 };
25611
25612-static struct ata_port_operations vt6420_sata_ops = {
25613+static const struct ata_port_operations vt6420_sata_ops = {
25614 .inherits = &svia_base_ops,
25615 .freeze = svia_noop_freeze,
25616 .prereset = vt6420_prereset,
25617 .bmdma_start = vt6420_bmdma_start,
25618 };
25619
25620-static struct ata_port_operations vt6421_pata_ops = {
25621+static const struct ata_port_operations vt6421_pata_ops = {
25622 .inherits = &svia_base_ops,
25623 .cable_detect = vt6421_pata_cable_detect,
25624 .set_piomode = vt6421_set_pio_mode,
25625 .set_dmamode = vt6421_set_dma_mode,
25626 };
25627
25628-static struct ata_port_operations vt6421_sata_ops = {
25629+static const struct ata_port_operations vt6421_sata_ops = {
25630 .inherits = &svia_base_ops,
25631 .scr_read = svia_scr_read,
25632 .scr_write = svia_scr_write,
25633 };
25634
25635-static struct ata_port_operations vt8251_ops = {
25636+static const struct ata_port_operations vt8251_ops = {
25637 .inherits = &svia_base_ops,
25638 .hardreset = sata_std_hardreset,
25639 .scr_read = vt8251_scr_read,
25640diff -urNp linux-2.6.32.42/drivers/ata/sata_vsc.c linux-2.6.32.42/drivers/ata/sata_vsc.c
25641--- linux-2.6.32.42/drivers/ata/sata_vsc.c 2011-03-27 14:31:47.000000000 -0400
25642+++ linux-2.6.32.42/drivers/ata/sata_vsc.c 2011-04-17 15:56:46.000000000 -0400
25643@@ -306,7 +306,7 @@ static struct scsi_host_template vsc_sat
25644 };
25645
25646
25647-static struct ata_port_operations vsc_sata_ops = {
25648+static const struct ata_port_operations vsc_sata_ops = {
25649 .inherits = &ata_bmdma_port_ops,
25650 /* The IRQ handling is not quite standard SFF behaviour so we
25651 cannot use the default lost interrupt handler */
25652diff -urNp linux-2.6.32.42/drivers/atm/adummy.c linux-2.6.32.42/drivers/atm/adummy.c
25653--- linux-2.6.32.42/drivers/atm/adummy.c 2011-03-27 14:31:47.000000000 -0400
25654+++ linux-2.6.32.42/drivers/atm/adummy.c 2011-04-17 15:56:46.000000000 -0400
25655@@ -77,7 +77,7 @@ adummy_send(struct atm_vcc *vcc, struct
25656 vcc->pop(vcc, skb);
25657 else
25658 dev_kfree_skb_any(skb);
25659- atomic_inc(&vcc->stats->tx);
25660+ atomic_inc_unchecked(&vcc->stats->tx);
25661
25662 return 0;
25663 }
25664diff -urNp linux-2.6.32.42/drivers/atm/ambassador.c linux-2.6.32.42/drivers/atm/ambassador.c
25665--- linux-2.6.32.42/drivers/atm/ambassador.c 2011-03-27 14:31:47.000000000 -0400
25666+++ linux-2.6.32.42/drivers/atm/ambassador.c 2011-04-17 15:56:46.000000000 -0400
25667@@ -453,7 +453,7 @@ static void tx_complete (amb_dev * dev,
25668 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
25669
25670 // VC layer stats
25671- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25672+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25673
25674 // free the descriptor
25675 kfree (tx_descr);
25676@@ -494,7 +494,7 @@ static void rx_complete (amb_dev * dev,
25677 dump_skb ("<<<", vc, skb);
25678
25679 // VC layer stats
25680- atomic_inc(&atm_vcc->stats->rx);
25681+ atomic_inc_unchecked(&atm_vcc->stats->rx);
25682 __net_timestamp(skb);
25683 // end of our responsability
25684 atm_vcc->push (atm_vcc, skb);
25685@@ -509,7 +509,7 @@ static void rx_complete (amb_dev * dev,
25686 } else {
25687 PRINTK (KERN_INFO, "dropped over-size frame");
25688 // should we count this?
25689- atomic_inc(&atm_vcc->stats->rx_drop);
25690+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25691 }
25692
25693 } else {
25694@@ -1341,7 +1341,7 @@ static int amb_send (struct atm_vcc * at
25695 }
25696
25697 if (check_area (skb->data, skb->len)) {
25698- atomic_inc(&atm_vcc->stats->tx_err);
25699+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
25700 return -ENOMEM; // ?
25701 }
25702
25703diff -urNp linux-2.6.32.42/drivers/atm/atmtcp.c linux-2.6.32.42/drivers/atm/atmtcp.c
25704--- linux-2.6.32.42/drivers/atm/atmtcp.c 2011-03-27 14:31:47.000000000 -0400
25705+++ linux-2.6.32.42/drivers/atm/atmtcp.c 2011-04-17 15:56:46.000000000 -0400
25706@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc
25707 if (vcc->pop) vcc->pop(vcc,skb);
25708 else dev_kfree_skb(skb);
25709 if (dev_data) return 0;
25710- atomic_inc(&vcc->stats->tx_err);
25711+ atomic_inc_unchecked(&vcc->stats->tx_err);
25712 return -ENOLINK;
25713 }
25714 size = skb->len+sizeof(struct atmtcp_hdr);
25715@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc
25716 if (!new_skb) {
25717 if (vcc->pop) vcc->pop(vcc,skb);
25718 else dev_kfree_skb(skb);
25719- atomic_inc(&vcc->stats->tx_err);
25720+ atomic_inc_unchecked(&vcc->stats->tx_err);
25721 return -ENOBUFS;
25722 }
25723 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
25724@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc
25725 if (vcc->pop) vcc->pop(vcc,skb);
25726 else dev_kfree_skb(skb);
25727 out_vcc->push(out_vcc,new_skb);
25728- atomic_inc(&vcc->stats->tx);
25729- atomic_inc(&out_vcc->stats->rx);
25730+ atomic_inc_unchecked(&vcc->stats->tx);
25731+ atomic_inc_unchecked(&out_vcc->stats->rx);
25732 return 0;
25733 }
25734
25735@@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc
25736 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
25737 read_unlock(&vcc_sklist_lock);
25738 if (!out_vcc) {
25739- atomic_inc(&vcc->stats->tx_err);
25740+ atomic_inc_unchecked(&vcc->stats->tx_err);
25741 goto done;
25742 }
25743 skb_pull(skb,sizeof(struct atmtcp_hdr));
25744@@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc
25745 __net_timestamp(new_skb);
25746 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
25747 out_vcc->push(out_vcc,new_skb);
25748- atomic_inc(&vcc->stats->tx);
25749- atomic_inc(&out_vcc->stats->rx);
25750+ atomic_inc_unchecked(&vcc->stats->tx);
25751+ atomic_inc_unchecked(&out_vcc->stats->rx);
25752 done:
25753 if (vcc->pop) vcc->pop(vcc,skb);
25754 else dev_kfree_skb(skb);
25755diff -urNp linux-2.6.32.42/drivers/atm/eni.c linux-2.6.32.42/drivers/atm/eni.c
25756--- linux-2.6.32.42/drivers/atm/eni.c 2011-03-27 14:31:47.000000000 -0400
25757+++ linux-2.6.32.42/drivers/atm/eni.c 2011-04-17 15:56:46.000000000 -0400
25758@@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
25759 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
25760 vcc->dev->number);
25761 length = 0;
25762- atomic_inc(&vcc->stats->rx_err);
25763+ atomic_inc_unchecked(&vcc->stats->rx_err);
25764 }
25765 else {
25766 length = ATM_CELL_SIZE-1; /* no HEC */
25767@@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
25768 size);
25769 }
25770 eff = length = 0;
25771- atomic_inc(&vcc->stats->rx_err);
25772+ atomic_inc_unchecked(&vcc->stats->rx_err);
25773 }
25774 else {
25775 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
25776@@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
25777 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
25778 vcc->dev->number,vcc->vci,length,size << 2,descr);
25779 length = eff = 0;
25780- atomic_inc(&vcc->stats->rx_err);
25781+ atomic_inc_unchecked(&vcc->stats->rx_err);
25782 }
25783 }
25784 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
25785@@ -770,7 +770,7 @@ rx_dequeued++;
25786 vcc->push(vcc,skb);
25787 pushed++;
25788 }
25789- atomic_inc(&vcc->stats->rx);
25790+ atomic_inc_unchecked(&vcc->stats->rx);
25791 }
25792 wake_up(&eni_dev->rx_wait);
25793 }
25794@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *d
25795 PCI_DMA_TODEVICE);
25796 if (vcc->pop) vcc->pop(vcc,skb);
25797 else dev_kfree_skb_irq(skb);
25798- atomic_inc(&vcc->stats->tx);
25799+ atomic_inc_unchecked(&vcc->stats->tx);
25800 wake_up(&eni_dev->tx_wait);
25801 dma_complete++;
25802 }
25803diff -urNp linux-2.6.32.42/drivers/atm/firestream.c linux-2.6.32.42/drivers/atm/firestream.c
25804--- linux-2.6.32.42/drivers/atm/firestream.c 2011-03-27 14:31:47.000000000 -0400
25805+++ linux-2.6.32.42/drivers/atm/firestream.c 2011-04-17 15:56:46.000000000 -0400
25806@@ -748,7 +748,7 @@ static void process_txdone_queue (struct
25807 }
25808 }
25809
25810- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25811+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25812
25813 fs_dprintk (FS_DEBUG_TXMEM, "i");
25814 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
25815@@ -815,7 +815,7 @@ static void process_incoming (struct fs_
25816 #endif
25817 skb_put (skb, qe->p1 & 0xffff);
25818 ATM_SKB(skb)->vcc = atm_vcc;
25819- atomic_inc(&atm_vcc->stats->rx);
25820+ atomic_inc_unchecked(&atm_vcc->stats->rx);
25821 __net_timestamp(skb);
25822 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
25823 atm_vcc->push (atm_vcc, skb);
25824@@ -836,12 +836,12 @@ static void process_incoming (struct fs_
25825 kfree (pe);
25826 }
25827 if (atm_vcc)
25828- atomic_inc(&atm_vcc->stats->rx_drop);
25829+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25830 break;
25831 case 0x1f: /* Reassembly abort: no buffers. */
25832 /* Silently increment error counter. */
25833 if (atm_vcc)
25834- atomic_inc(&atm_vcc->stats->rx_drop);
25835+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25836 break;
25837 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
25838 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
25839diff -urNp linux-2.6.32.42/drivers/atm/fore200e.c linux-2.6.32.42/drivers/atm/fore200e.c
25840--- linux-2.6.32.42/drivers/atm/fore200e.c 2011-03-27 14:31:47.000000000 -0400
25841+++ linux-2.6.32.42/drivers/atm/fore200e.c 2011-04-17 15:56:46.000000000 -0400
25842@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200
25843 #endif
25844 /* check error condition */
25845 if (*entry->status & STATUS_ERROR)
25846- atomic_inc(&vcc->stats->tx_err);
25847+ atomic_inc_unchecked(&vcc->stats->tx_err);
25848 else
25849- atomic_inc(&vcc->stats->tx);
25850+ atomic_inc_unchecked(&vcc->stats->tx);
25851 }
25852 }
25853
25854@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore2
25855 if (skb == NULL) {
25856 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
25857
25858- atomic_inc(&vcc->stats->rx_drop);
25859+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25860 return -ENOMEM;
25861 }
25862
25863@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore2
25864
25865 dev_kfree_skb_any(skb);
25866
25867- atomic_inc(&vcc->stats->rx_drop);
25868+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25869 return -ENOMEM;
25870 }
25871
25872 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25873
25874 vcc->push(vcc, skb);
25875- atomic_inc(&vcc->stats->rx);
25876+ atomic_inc_unchecked(&vcc->stats->rx);
25877
25878 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25879
25880@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200
25881 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
25882 fore200e->atm_dev->number,
25883 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
25884- atomic_inc(&vcc->stats->rx_err);
25885+ atomic_inc_unchecked(&vcc->stats->rx_err);
25886 }
25887 }
25888
25889@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struc
25890 goto retry_here;
25891 }
25892
25893- atomic_inc(&vcc->stats->tx_err);
25894+ atomic_inc_unchecked(&vcc->stats->tx_err);
25895
25896 fore200e->tx_sat++;
25897 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
25898diff -urNp linux-2.6.32.42/drivers/atm/he.c linux-2.6.32.42/drivers/atm/he.c
25899--- linux-2.6.32.42/drivers/atm/he.c 2011-03-27 14:31:47.000000000 -0400
25900+++ linux-2.6.32.42/drivers/atm/he.c 2011-04-17 15:56:46.000000000 -0400
25901@@ -1769,7 +1769,7 @@ he_service_rbrq(struct he_dev *he_dev, i
25902
25903 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
25904 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
25905- atomic_inc(&vcc->stats->rx_drop);
25906+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25907 goto return_host_buffers;
25908 }
25909
25910@@ -1802,7 +1802,7 @@ he_service_rbrq(struct he_dev *he_dev, i
25911 RBRQ_LEN_ERR(he_dev->rbrq_head)
25912 ? "LEN_ERR" : "",
25913 vcc->vpi, vcc->vci);
25914- atomic_inc(&vcc->stats->rx_err);
25915+ atomic_inc_unchecked(&vcc->stats->rx_err);
25916 goto return_host_buffers;
25917 }
25918
25919@@ -1861,7 +1861,7 @@ he_service_rbrq(struct he_dev *he_dev, i
25920 vcc->push(vcc, skb);
25921 spin_lock(&he_dev->global_lock);
25922
25923- atomic_inc(&vcc->stats->rx);
25924+ atomic_inc_unchecked(&vcc->stats->rx);
25925
25926 return_host_buffers:
25927 ++pdus_assembled;
25928@@ -2206,7 +2206,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
25929 tpd->vcc->pop(tpd->vcc, tpd->skb);
25930 else
25931 dev_kfree_skb_any(tpd->skb);
25932- atomic_inc(&tpd->vcc->stats->tx_err);
25933+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
25934 }
25935 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
25936 return;
25937@@ -2618,7 +2618,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
25938 vcc->pop(vcc, skb);
25939 else
25940 dev_kfree_skb_any(skb);
25941- atomic_inc(&vcc->stats->tx_err);
25942+ atomic_inc_unchecked(&vcc->stats->tx_err);
25943 return -EINVAL;
25944 }
25945
25946@@ -2629,7 +2629,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
25947 vcc->pop(vcc, skb);
25948 else
25949 dev_kfree_skb_any(skb);
25950- atomic_inc(&vcc->stats->tx_err);
25951+ atomic_inc_unchecked(&vcc->stats->tx_err);
25952 return -EINVAL;
25953 }
25954 #endif
25955@@ -2641,7 +2641,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
25956 vcc->pop(vcc, skb);
25957 else
25958 dev_kfree_skb_any(skb);
25959- atomic_inc(&vcc->stats->tx_err);
25960+ atomic_inc_unchecked(&vcc->stats->tx_err);
25961 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25962 return -ENOMEM;
25963 }
25964@@ -2683,7 +2683,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
25965 vcc->pop(vcc, skb);
25966 else
25967 dev_kfree_skb_any(skb);
25968- atomic_inc(&vcc->stats->tx_err);
25969+ atomic_inc_unchecked(&vcc->stats->tx_err);
25970 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25971 return -ENOMEM;
25972 }
25973@@ -2714,7 +2714,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
25974 __enqueue_tpd(he_dev, tpd, cid);
25975 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25976
25977- atomic_inc(&vcc->stats->tx);
25978+ atomic_inc_unchecked(&vcc->stats->tx);
25979
25980 return 0;
25981 }
25982diff -urNp linux-2.6.32.42/drivers/atm/horizon.c linux-2.6.32.42/drivers/atm/horizon.c
25983--- linux-2.6.32.42/drivers/atm/horizon.c 2011-03-27 14:31:47.000000000 -0400
25984+++ linux-2.6.32.42/drivers/atm/horizon.c 2011-04-17 15:56:46.000000000 -0400
25985@@ -1033,7 +1033,7 @@ static void rx_schedule (hrz_dev * dev,
25986 {
25987 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
25988 // VC layer stats
25989- atomic_inc(&vcc->stats->rx);
25990+ atomic_inc_unchecked(&vcc->stats->rx);
25991 __net_timestamp(skb);
25992 // end of our responsability
25993 vcc->push (vcc, skb);
25994@@ -1185,7 +1185,7 @@ static void tx_schedule (hrz_dev * const
25995 dev->tx_iovec = NULL;
25996
25997 // VC layer stats
25998- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25999+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
26000
26001 // free the skb
26002 hrz_kfree_skb (skb);
26003diff -urNp linux-2.6.32.42/drivers/atm/idt77252.c linux-2.6.32.42/drivers/atm/idt77252.c
26004--- linux-2.6.32.42/drivers/atm/idt77252.c 2011-03-27 14:31:47.000000000 -0400
26005+++ linux-2.6.32.42/drivers/atm/idt77252.c 2011-04-17 15:56:46.000000000 -0400
26006@@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, str
26007 else
26008 dev_kfree_skb(skb);
26009
26010- atomic_inc(&vcc->stats->tx);
26011+ atomic_inc_unchecked(&vcc->stats->tx);
26012 }
26013
26014 atomic_dec(&scq->used);
26015@@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, st
26016 if ((sb = dev_alloc_skb(64)) == NULL) {
26017 printk("%s: Can't allocate buffers for aal0.\n",
26018 card->name);
26019- atomic_add(i, &vcc->stats->rx_drop);
26020+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
26021 break;
26022 }
26023 if (!atm_charge(vcc, sb->truesize)) {
26024 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
26025 card->name);
26026- atomic_add(i - 1, &vcc->stats->rx_drop);
26027+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
26028 dev_kfree_skb(sb);
26029 break;
26030 }
26031@@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, st
26032 ATM_SKB(sb)->vcc = vcc;
26033 __net_timestamp(sb);
26034 vcc->push(vcc, sb);
26035- atomic_inc(&vcc->stats->rx);
26036+ atomic_inc_unchecked(&vcc->stats->rx);
26037
26038 cell += ATM_CELL_PAYLOAD;
26039 }
26040@@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, st
26041 "(CDC: %08x)\n",
26042 card->name, len, rpp->len, readl(SAR_REG_CDC));
26043 recycle_rx_pool_skb(card, rpp);
26044- atomic_inc(&vcc->stats->rx_err);
26045+ atomic_inc_unchecked(&vcc->stats->rx_err);
26046 return;
26047 }
26048 if (stat & SAR_RSQE_CRC) {
26049 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
26050 recycle_rx_pool_skb(card, rpp);
26051- atomic_inc(&vcc->stats->rx_err);
26052+ atomic_inc_unchecked(&vcc->stats->rx_err);
26053 return;
26054 }
26055 if (skb_queue_len(&rpp->queue) > 1) {
26056@@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, st
26057 RXPRINTK("%s: Can't alloc RX skb.\n",
26058 card->name);
26059 recycle_rx_pool_skb(card, rpp);
26060- atomic_inc(&vcc->stats->rx_err);
26061+ atomic_inc_unchecked(&vcc->stats->rx_err);
26062 return;
26063 }
26064 if (!atm_charge(vcc, skb->truesize)) {
26065@@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, st
26066 __net_timestamp(skb);
26067
26068 vcc->push(vcc, skb);
26069- atomic_inc(&vcc->stats->rx);
26070+ atomic_inc_unchecked(&vcc->stats->rx);
26071
26072 return;
26073 }
26074@@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, st
26075 __net_timestamp(skb);
26076
26077 vcc->push(vcc, skb);
26078- atomic_inc(&vcc->stats->rx);
26079+ atomic_inc_unchecked(&vcc->stats->rx);
26080
26081 if (skb->truesize > SAR_FB_SIZE_3)
26082 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
26083@@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *car
26084 if (vcc->qos.aal != ATM_AAL0) {
26085 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
26086 card->name, vpi, vci);
26087- atomic_inc(&vcc->stats->rx_drop);
26088+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26089 goto drop;
26090 }
26091
26092 if ((sb = dev_alloc_skb(64)) == NULL) {
26093 printk("%s: Can't allocate buffers for AAL0.\n",
26094 card->name);
26095- atomic_inc(&vcc->stats->rx_err);
26096+ atomic_inc_unchecked(&vcc->stats->rx_err);
26097 goto drop;
26098 }
26099
26100@@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *car
26101 ATM_SKB(sb)->vcc = vcc;
26102 __net_timestamp(sb);
26103 vcc->push(vcc, sb);
26104- atomic_inc(&vcc->stats->rx);
26105+ atomic_inc_unchecked(&vcc->stats->rx);
26106
26107 drop:
26108 skb_pull(queue, 64);
26109@@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
26110
26111 if (vc == NULL) {
26112 printk("%s: NULL connection in send().\n", card->name);
26113- atomic_inc(&vcc->stats->tx_err);
26114+ atomic_inc_unchecked(&vcc->stats->tx_err);
26115 dev_kfree_skb(skb);
26116 return -EINVAL;
26117 }
26118 if (!test_bit(VCF_TX, &vc->flags)) {
26119 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
26120- atomic_inc(&vcc->stats->tx_err);
26121+ atomic_inc_unchecked(&vcc->stats->tx_err);
26122 dev_kfree_skb(skb);
26123 return -EINVAL;
26124 }
26125@@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
26126 break;
26127 default:
26128 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
26129- atomic_inc(&vcc->stats->tx_err);
26130+ atomic_inc_unchecked(&vcc->stats->tx_err);
26131 dev_kfree_skb(skb);
26132 return -EINVAL;
26133 }
26134
26135 if (skb_shinfo(skb)->nr_frags != 0) {
26136 printk("%s: No scatter-gather yet.\n", card->name);
26137- atomic_inc(&vcc->stats->tx_err);
26138+ atomic_inc_unchecked(&vcc->stats->tx_err);
26139 dev_kfree_skb(skb);
26140 return -EINVAL;
26141 }
26142@@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
26143
26144 err = queue_skb(card, vc, skb, oam);
26145 if (err) {
26146- atomic_inc(&vcc->stats->tx_err);
26147+ atomic_inc_unchecked(&vcc->stats->tx_err);
26148 dev_kfree_skb(skb);
26149 return err;
26150 }
26151@@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
26152 skb = dev_alloc_skb(64);
26153 if (!skb) {
26154 printk("%s: Out of memory in send_oam().\n", card->name);
26155- atomic_inc(&vcc->stats->tx_err);
26156+ atomic_inc_unchecked(&vcc->stats->tx_err);
26157 return -ENOMEM;
26158 }
26159 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
26160diff -urNp linux-2.6.32.42/drivers/atm/iphase.c linux-2.6.32.42/drivers/atm/iphase.c
26161--- linux-2.6.32.42/drivers/atm/iphase.c 2011-03-27 14:31:47.000000000 -0400
26162+++ linux-2.6.32.42/drivers/atm/iphase.c 2011-04-17 15:56:46.000000000 -0400
26163@@ -1123,7 +1123,7 @@ static int rx_pkt(struct atm_dev *dev)
26164 status = (u_short) (buf_desc_ptr->desc_mode);
26165 if (status & (RX_CER | RX_PTE | RX_OFL))
26166 {
26167- atomic_inc(&vcc->stats->rx_err);
26168+ atomic_inc_unchecked(&vcc->stats->rx_err);
26169 IF_ERR(printk("IA: bad packet, dropping it");)
26170 if (status & RX_CER) {
26171 IF_ERR(printk(" cause: packet CRC error\n");)
26172@@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
26173 len = dma_addr - buf_addr;
26174 if (len > iadev->rx_buf_sz) {
26175 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
26176- atomic_inc(&vcc->stats->rx_err);
26177+ atomic_inc_unchecked(&vcc->stats->rx_err);
26178 goto out_free_desc;
26179 }
26180
26181@@ -1296,7 +1296,7 @@ static void rx_dle_intr(struct atm_dev *
26182 ia_vcc = INPH_IA_VCC(vcc);
26183 if (ia_vcc == NULL)
26184 {
26185- atomic_inc(&vcc->stats->rx_err);
26186+ atomic_inc_unchecked(&vcc->stats->rx_err);
26187 dev_kfree_skb_any(skb);
26188 atm_return(vcc, atm_guess_pdu2truesize(len));
26189 goto INCR_DLE;
26190@@ -1308,7 +1308,7 @@ static void rx_dle_intr(struct atm_dev *
26191 if ((length > iadev->rx_buf_sz) || (length >
26192 (skb->len - sizeof(struct cpcs_trailer))))
26193 {
26194- atomic_inc(&vcc->stats->rx_err);
26195+ atomic_inc_unchecked(&vcc->stats->rx_err);
26196 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
26197 length, skb->len);)
26198 dev_kfree_skb_any(skb);
26199@@ -1324,7 +1324,7 @@ static void rx_dle_intr(struct atm_dev *
26200
26201 IF_RX(printk("rx_dle_intr: skb push");)
26202 vcc->push(vcc,skb);
26203- atomic_inc(&vcc->stats->rx);
26204+ atomic_inc_unchecked(&vcc->stats->rx);
26205 iadev->rx_pkt_cnt++;
26206 }
26207 INCR_DLE:
26208@@ -2806,15 +2806,15 @@ static int ia_ioctl(struct atm_dev *dev,
26209 {
26210 struct k_sonet_stats *stats;
26211 stats = &PRIV(_ia_dev[board])->sonet_stats;
26212- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
26213- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
26214- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
26215- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
26216- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
26217- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
26218- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
26219- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
26220- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
26221+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
26222+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
26223+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
26224+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
26225+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
26226+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
26227+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
26228+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
26229+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
26230 }
26231 ia_cmds.status = 0;
26232 break;
26233@@ -2919,7 +2919,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
26234 if ((desc == 0) || (desc > iadev->num_tx_desc))
26235 {
26236 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
26237- atomic_inc(&vcc->stats->tx);
26238+ atomic_inc_unchecked(&vcc->stats->tx);
26239 if (vcc->pop)
26240 vcc->pop(vcc, skb);
26241 else
26242@@ -3024,14 +3024,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
26243 ATM_DESC(skb) = vcc->vci;
26244 skb_queue_tail(&iadev->tx_dma_q, skb);
26245
26246- atomic_inc(&vcc->stats->tx);
26247+ atomic_inc_unchecked(&vcc->stats->tx);
26248 iadev->tx_pkt_cnt++;
26249 /* Increment transaction counter */
26250 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
26251
26252 #if 0
26253 /* add flow control logic */
26254- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
26255+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
26256 if (iavcc->vc_desc_cnt > 10) {
26257 vcc->tx_quota = vcc->tx_quota * 3 / 4;
26258 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
26259diff -urNp linux-2.6.32.42/drivers/atm/lanai.c linux-2.6.32.42/drivers/atm/lanai.c
26260--- linux-2.6.32.42/drivers/atm/lanai.c 2011-03-27 14:31:47.000000000 -0400
26261+++ linux-2.6.32.42/drivers/atm/lanai.c 2011-04-17 15:56:46.000000000 -0400
26262@@ -1305,7 +1305,7 @@ static void lanai_send_one_aal5(struct l
26263 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
26264 lanai_endtx(lanai, lvcc);
26265 lanai_free_skb(lvcc->tx.atmvcc, skb);
26266- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
26267+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
26268 }
26269
26270 /* Try to fill the buffer - don't call unless there is backlog */
26271@@ -1428,7 +1428,7 @@ static void vcc_rx_aal5(struct lanai_vcc
26272 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
26273 __net_timestamp(skb);
26274 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
26275- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
26276+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
26277 out:
26278 lvcc->rx.buf.ptr = end;
26279 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
26280@@ -1670,7 +1670,7 @@ static int handle_service(struct lanai_d
26281 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
26282 "vcc %d\n", lanai->number, (unsigned int) s, vci);
26283 lanai->stats.service_rxnotaal5++;
26284- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26285+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26286 return 0;
26287 }
26288 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
26289@@ -1682,7 +1682,7 @@ static int handle_service(struct lanai_d
26290 int bytes;
26291 read_unlock(&vcc_sklist_lock);
26292 DPRINTK("got trashed rx pdu on vci %d\n", vci);
26293- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26294+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26295 lvcc->stats.x.aal5.service_trash++;
26296 bytes = (SERVICE_GET_END(s) * 16) -
26297 (((unsigned long) lvcc->rx.buf.ptr) -
26298@@ -1694,7 +1694,7 @@ static int handle_service(struct lanai_d
26299 }
26300 if (s & SERVICE_STREAM) {
26301 read_unlock(&vcc_sklist_lock);
26302- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26303+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26304 lvcc->stats.x.aal5.service_stream++;
26305 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
26306 "PDU on VCI %d!\n", lanai->number, vci);
26307@@ -1702,7 +1702,7 @@ static int handle_service(struct lanai_d
26308 return 0;
26309 }
26310 DPRINTK("got rx crc error on vci %d\n", vci);
26311- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26312+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26313 lvcc->stats.x.aal5.service_rxcrc++;
26314 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
26315 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
26316diff -urNp linux-2.6.32.42/drivers/atm/nicstar.c linux-2.6.32.42/drivers/atm/nicstar.c
26317--- linux-2.6.32.42/drivers/atm/nicstar.c 2011-03-27 14:31:47.000000000 -0400
26318+++ linux-2.6.32.42/drivers/atm/nicstar.c 2011-04-17 15:56:46.000000000 -0400
26319@@ -1723,7 +1723,7 @@ static int ns_send(struct atm_vcc *vcc,
26320 if ((vc = (vc_map *) vcc->dev_data) == NULL)
26321 {
26322 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index);
26323- atomic_inc(&vcc->stats->tx_err);
26324+ atomic_inc_unchecked(&vcc->stats->tx_err);
26325 dev_kfree_skb_any(skb);
26326 return -EINVAL;
26327 }
26328@@ -1731,7 +1731,7 @@ static int ns_send(struct atm_vcc *vcc,
26329 if (!vc->tx)
26330 {
26331 printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index);
26332- atomic_inc(&vcc->stats->tx_err);
26333+ atomic_inc_unchecked(&vcc->stats->tx_err);
26334 dev_kfree_skb_any(skb);
26335 return -EINVAL;
26336 }
26337@@ -1739,7 +1739,7 @@ static int ns_send(struct atm_vcc *vcc,
26338 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
26339 {
26340 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index);
26341- atomic_inc(&vcc->stats->tx_err);
26342+ atomic_inc_unchecked(&vcc->stats->tx_err);
26343 dev_kfree_skb_any(skb);
26344 return -EINVAL;
26345 }
26346@@ -1747,7 +1747,7 @@ static int ns_send(struct atm_vcc *vcc,
26347 if (skb_shinfo(skb)->nr_frags != 0)
26348 {
26349 printk("nicstar%d: No scatter-gather yet.\n", card->index);
26350- atomic_inc(&vcc->stats->tx_err);
26351+ atomic_inc_unchecked(&vcc->stats->tx_err);
26352 dev_kfree_skb_any(skb);
26353 return -EINVAL;
26354 }
26355@@ -1792,11 +1792,11 @@ static int ns_send(struct atm_vcc *vcc,
26356
26357 if (push_scqe(card, vc, scq, &scqe, skb) != 0)
26358 {
26359- atomic_inc(&vcc->stats->tx_err);
26360+ atomic_inc_unchecked(&vcc->stats->tx_err);
26361 dev_kfree_skb_any(skb);
26362 return -EIO;
26363 }
26364- atomic_inc(&vcc->stats->tx);
26365+ atomic_inc_unchecked(&vcc->stats->tx);
26366
26367 return 0;
26368 }
26369@@ -2111,14 +2111,14 @@ static void dequeue_rx(ns_dev *card, ns_
26370 {
26371 printk("nicstar%d: Can't allocate buffers for aal0.\n",
26372 card->index);
26373- atomic_add(i,&vcc->stats->rx_drop);
26374+ atomic_add_unchecked(i,&vcc->stats->rx_drop);
26375 break;
26376 }
26377 if (!atm_charge(vcc, sb->truesize))
26378 {
26379 RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n",
26380 card->index);
26381- atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
26382+ atomic_add_unchecked(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
26383 dev_kfree_skb_any(sb);
26384 break;
26385 }
26386@@ -2133,7 +2133,7 @@ static void dequeue_rx(ns_dev *card, ns_
26387 ATM_SKB(sb)->vcc = vcc;
26388 __net_timestamp(sb);
26389 vcc->push(vcc, sb);
26390- atomic_inc(&vcc->stats->rx);
26391+ atomic_inc_unchecked(&vcc->stats->rx);
26392 cell += ATM_CELL_PAYLOAD;
26393 }
26394
26395@@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev *card, ns_
26396 if (iovb == NULL)
26397 {
26398 printk("nicstar%d: Out of iovec buffers.\n", card->index);
26399- atomic_inc(&vcc->stats->rx_drop);
26400+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26401 recycle_rx_buf(card, skb);
26402 return;
26403 }
26404@@ -2182,7 +2182,7 @@ static void dequeue_rx(ns_dev *card, ns_
26405 else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS)
26406 {
26407 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
26408- atomic_inc(&vcc->stats->rx_err);
26409+ atomic_inc_unchecked(&vcc->stats->rx_err);
26410 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
26411 NS_SKB(iovb)->iovcnt = 0;
26412 iovb->len = 0;
26413@@ -2202,7 +2202,7 @@ static void dequeue_rx(ns_dev *card, ns_
26414 printk("nicstar%d: Expected a small buffer, and this is not one.\n",
26415 card->index);
26416 which_list(card, skb);
26417- atomic_inc(&vcc->stats->rx_err);
26418+ atomic_inc_unchecked(&vcc->stats->rx_err);
26419 recycle_rx_buf(card, skb);
26420 vc->rx_iov = NULL;
26421 recycle_iov_buf(card, iovb);
26422@@ -2216,7 +2216,7 @@ static void dequeue_rx(ns_dev *card, ns_
26423 printk("nicstar%d: Expected a large buffer, and this is not one.\n",
26424 card->index);
26425 which_list(card, skb);
26426- atomic_inc(&vcc->stats->rx_err);
26427+ atomic_inc_unchecked(&vcc->stats->rx_err);
26428 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
26429 NS_SKB(iovb)->iovcnt);
26430 vc->rx_iov = NULL;
26431@@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev *card, ns_
26432 printk(" - PDU size mismatch.\n");
26433 else
26434 printk(".\n");
26435- atomic_inc(&vcc->stats->rx_err);
26436+ atomic_inc_unchecked(&vcc->stats->rx_err);
26437 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
26438 NS_SKB(iovb)->iovcnt);
26439 vc->rx_iov = NULL;
26440@@ -2256,7 +2256,7 @@ static void dequeue_rx(ns_dev *card, ns_
26441 if (!atm_charge(vcc, skb->truesize))
26442 {
26443 push_rxbufs(card, skb);
26444- atomic_inc(&vcc->stats->rx_drop);
26445+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26446 }
26447 else
26448 {
26449@@ -2268,7 +2268,7 @@ static void dequeue_rx(ns_dev *card, ns_
26450 ATM_SKB(skb)->vcc = vcc;
26451 __net_timestamp(skb);
26452 vcc->push(vcc, skb);
26453- atomic_inc(&vcc->stats->rx);
26454+ atomic_inc_unchecked(&vcc->stats->rx);
26455 }
26456 }
26457 else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */
26458@@ -2283,7 +2283,7 @@ static void dequeue_rx(ns_dev *card, ns_
26459 if (!atm_charge(vcc, sb->truesize))
26460 {
26461 push_rxbufs(card, sb);
26462- atomic_inc(&vcc->stats->rx_drop);
26463+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26464 }
26465 else
26466 {
26467@@ -2295,7 +2295,7 @@ static void dequeue_rx(ns_dev *card, ns_
26468 ATM_SKB(sb)->vcc = vcc;
26469 __net_timestamp(sb);
26470 vcc->push(vcc, sb);
26471- atomic_inc(&vcc->stats->rx);
26472+ atomic_inc_unchecked(&vcc->stats->rx);
26473 }
26474
26475 push_rxbufs(card, skb);
26476@@ -2306,7 +2306,7 @@ static void dequeue_rx(ns_dev *card, ns_
26477 if (!atm_charge(vcc, skb->truesize))
26478 {
26479 push_rxbufs(card, skb);
26480- atomic_inc(&vcc->stats->rx_drop);
26481+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26482 }
26483 else
26484 {
26485@@ -2320,7 +2320,7 @@ static void dequeue_rx(ns_dev *card, ns_
26486 ATM_SKB(skb)->vcc = vcc;
26487 __net_timestamp(skb);
26488 vcc->push(vcc, skb);
26489- atomic_inc(&vcc->stats->rx);
26490+ atomic_inc_unchecked(&vcc->stats->rx);
26491 }
26492
26493 push_rxbufs(card, sb);
26494@@ -2342,7 +2342,7 @@ static void dequeue_rx(ns_dev *card, ns_
26495 if (hb == NULL)
26496 {
26497 printk("nicstar%d: Out of huge buffers.\n", card->index);
26498- atomic_inc(&vcc->stats->rx_drop);
26499+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26500 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
26501 NS_SKB(iovb)->iovcnt);
26502 vc->rx_iov = NULL;
26503@@ -2393,7 +2393,7 @@ static void dequeue_rx(ns_dev *card, ns_
26504 }
26505 else
26506 dev_kfree_skb_any(hb);
26507- atomic_inc(&vcc->stats->rx_drop);
26508+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26509 }
26510 else
26511 {
26512@@ -2427,7 +2427,7 @@ static void dequeue_rx(ns_dev *card, ns_
26513 #endif /* NS_USE_DESTRUCTORS */
26514 __net_timestamp(hb);
26515 vcc->push(vcc, hb);
26516- atomic_inc(&vcc->stats->rx);
26517+ atomic_inc_unchecked(&vcc->stats->rx);
26518 }
26519 }
26520
26521diff -urNp linux-2.6.32.42/drivers/atm/solos-pci.c linux-2.6.32.42/drivers/atm/solos-pci.c
26522--- linux-2.6.32.42/drivers/atm/solos-pci.c 2011-04-17 17:00:52.000000000 -0400
26523+++ linux-2.6.32.42/drivers/atm/solos-pci.c 2011-05-16 21:46:57.000000000 -0400
26524@@ -708,7 +708,7 @@ void solos_bh(unsigned long card_arg)
26525 }
26526 atm_charge(vcc, skb->truesize);
26527 vcc->push(vcc, skb);
26528- atomic_inc(&vcc->stats->rx);
26529+ atomic_inc_unchecked(&vcc->stats->rx);
26530 break;
26531
26532 case PKT_STATUS:
26533@@ -914,6 +914,8 @@ static int print_buffer(struct sk_buff *
26534 char msg[500];
26535 char item[10];
26536
26537+ pax_track_stack();
26538+
26539 len = buf->len;
26540 for (i = 0; i < len; i++){
26541 if(i % 8 == 0)
26542@@ -1023,7 +1025,7 @@ static uint32_t fpga_tx(struct solos_car
26543 vcc = SKB_CB(oldskb)->vcc;
26544
26545 if (vcc) {
26546- atomic_inc(&vcc->stats->tx);
26547+ atomic_inc_unchecked(&vcc->stats->tx);
26548 solos_pop(vcc, oldskb);
26549 } else
26550 dev_kfree_skb_irq(oldskb);
26551diff -urNp linux-2.6.32.42/drivers/atm/suni.c linux-2.6.32.42/drivers/atm/suni.c
26552--- linux-2.6.32.42/drivers/atm/suni.c 2011-03-27 14:31:47.000000000 -0400
26553+++ linux-2.6.32.42/drivers/atm/suni.c 2011-04-17 15:56:46.000000000 -0400
26554@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
26555
26556
26557 #define ADD_LIMITED(s,v) \
26558- atomic_add((v),&stats->s); \
26559- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
26560+ atomic_add_unchecked((v),&stats->s); \
26561+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
26562
26563
26564 static void suni_hz(unsigned long from_timer)
26565diff -urNp linux-2.6.32.42/drivers/atm/uPD98402.c linux-2.6.32.42/drivers/atm/uPD98402.c
26566--- linux-2.6.32.42/drivers/atm/uPD98402.c 2011-03-27 14:31:47.000000000 -0400
26567+++ linux-2.6.32.42/drivers/atm/uPD98402.c 2011-04-17 15:56:46.000000000 -0400
26568@@ -41,7 +41,7 @@ static int fetch_stats(struct atm_dev *d
26569 struct sonet_stats tmp;
26570 int error = 0;
26571
26572- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26573+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26574 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
26575 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
26576 if (zero && !error) {
26577@@ -160,9 +160,9 @@ static int uPD98402_ioctl(struct atm_dev
26578
26579
26580 #define ADD_LIMITED(s,v) \
26581- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
26582- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
26583- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26584+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
26585+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
26586+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26587
26588
26589 static void stat_event(struct atm_dev *dev)
26590@@ -193,7 +193,7 @@ static void uPD98402_int(struct atm_dev
26591 if (reason & uPD98402_INT_PFM) stat_event(dev);
26592 if (reason & uPD98402_INT_PCO) {
26593 (void) GET(PCOCR); /* clear interrupt cause */
26594- atomic_add(GET(HECCT),
26595+ atomic_add_unchecked(GET(HECCT),
26596 &PRIV(dev)->sonet_stats.uncorr_hcs);
26597 }
26598 if ((reason & uPD98402_INT_RFO) &&
26599@@ -221,9 +221,9 @@ static int uPD98402_start(struct atm_dev
26600 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
26601 uPD98402_INT_LOS),PIMR); /* enable them */
26602 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
26603- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
26604- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
26605- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
26606+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
26607+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
26608+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
26609 return 0;
26610 }
26611
26612diff -urNp linux-2.6.32.42/drivers/atm/zatm.c linux-2.6.32.42/drivers/atm/zatm.c
26613--- linux-2.6.32.42/drivers/atm/zatm.c 2011-03-27 14:31:47.000000000 -0400
26614+++ linux-2.6.32.42/drivers/atm/zatm.c 2011-04-17 15:56:46.000000000 -0400
26615@@ -458,7 +458,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
26616 }
26617 if (!size) {
26618 dev_kfree_skb_irq(skb);
26619- if (vcc) atomic_inc(&vcc->stats->rx_err);
26620+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
26621 continue;
26622 }
26623 if (!atm_charge(vcc,skb->truesize)) {
26624@@ -468,7 +468,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
26625 skb->len = size;
26626 ATM_SKB(skb)->vcc = vcc;
26627 vcc->push(vcc,skb);
26628- atomic_inc(&vcc->stats->rx);
26629+ atomic_inc_unchecked(&vcc->stats->rx);
26630 }
26631 zout(pos & 0xffff,MTA(mbx));
26632 #if 0 /* probably a stupid idea */
26633@@ -732,7 +732,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
26634 skb_queue_head(&zatm_vcc->backlog,skb);
26635 break;
26636 }
26637- atomic_inc(&vcc->stats->tx);
26638+ atomic_inc_unchecked(&vcc->stats->tx);
26639 wake_up(&zatm_vcc->tx_wait);
26640 }
26641
26642diff -urNp linux-2.6.32.42/drivers/base/bus.c linux-2.6.32.42/drivers/base/bus.c
26643--- linux-2.6.32.42/drivers/base/bus.c 2011-03-27 14:31:47.000000000 -0400
26644+++ linux-2.6.32.42/drivers/base/bus.c 2011-04-17 15:56:46.000000000 -0400
26645@@ -70,7 +70,7 @@ static ssize_t drv_attr_store(struct kob
26646 return ret;
26647 }
26648
26649-static struct sysfs_ops driver_sysfs_ops = {
26650+static const struct sysfs_ops driver_sysfs_ops = {
26651 .show = drv_attr_show,
26652 .store = drv_attr_store,
26653 };
26654@@ -115,7 +115,7 @@ static ssize_t bus_attr_store(struct kob
26655 return ret;
26656 }
26657
26658-static struct sysfs_ops bus_sysfs_ops = {
26659+static const struct sysfs_ops bus_sysfs_ops = {
26660 .show = bus_attr_show,
26661 .store = bus_attr_store,
26662 };
26663@@ -154,7 +154,7 @@ static int bus_uevent_filter(struct kset
26664 return 0;
26665 }
26666
26667-static struct kset_uevent_ops bus_uevent_ops = {
26668+static const struct kset_uevent_ops bus_uevent_ops = {
26669 .filter = bus_uevent_filter,
26670 };
26671
26672diff -urNp linux-2.6.32.42/drivers/base/class.c linux-2.6.32.42/drivers/base/class.c
26673--- linux-2.6.32.42/drivers/base/class.c 2011-03-27 14:31:47.000000000 -0400
26674+++ linux-2.6.32.42/drivers/base/class.c 2011-04-17 15:56:46.000000000 -0400
26675@@ -63,7 +63,7 @@ static void class_release(struct kobject
26676 kfree(cp);
26677 }
26678
26679-static struct sysfs_ops class_sysfs_ops = {
26680+static const struct sysfs_ops class_sysfs_ops = {
26681 .show = class_attr_show,
26682 .store = class_attr_store,
26683 };
26684diff -urNp linux-2.6.32.42/drivers/base/core.c linux-2.6.32.42/drivers/base/core.c
26685--- linux-2.6.32.42/drivers/base/core.c 2011-03-27 14:31:47.000000000 -0400
26686+++ linux-2.6.32.42/drivers/base/core.c 2011-04-17 15:56:46.000000000 -0400
26687@@ -100,7 +100,7 @@ static ssize_t dev_attr_store(struct kob
26688 return ret;
26689 }
26690
26691-static struct sysfs_ops dev_sysfs_ops = {
26692+static const struct sysfs_ops dev_sysfs_ops = {
26693 .show = dev_attr_show,
26694 .store = dev_attr_store,
26695 };
26696@@ -252,7 +252,7 @@ static int dev_uevent(struct kset *kset,
26697 return retval;
26698 }
26699
26700-static struct kset_uevent_ops device_uevent_ops = {
26701+static const struct kset_uevent_ops device_uevent_ops = {
26702 .filter = dev_uevent_filter,
26703 .name = dev_uevent_name,
26704 .uevent = dev_uevent,
26705diff -urNp linux-2.6.32.42/drivers/base/memory.c linux-2.6.32.42/drivers/base/memory.c
26706--- linux-2.6.32.42/drivers/base/memory.c 2011-03-27 14:31:47.000000000 -0400
26707+++ linux-2.6.32.42/drivers/base/memory.c 2011-04-17 15:56:46.000000000 -0400
26708@@ -44,7 +44,7 @@ static int memory_uevent(struct kset *ks
26709 return retval;
26710 }
26711
26712-static struct kset_uevent_ops memory_uevent_ops = {
26713+static const struct kset_uevent_ops memory_uevent_ops = {
26714 .name = memory_uevent_name,
26715 .uevent = memory_uevent,
26716 };
26717diff -urNp linux-2.6.32.42/drivers/base/sys.c linux-2.6.32.42/drivers/base/sys.c
26718--- linux-2.6.32.42/drivers/base/sys.c 2011-03-27 14:31:47.000000000 -0400
26719+++ linux-2.6.32.42/drivers/base/sys.c 2011-04-17 15:56:46.000000000 -0400
26720@@ -54,7 +54,7 @@ sysdev_store(struct kobject *kobj, struc
26721 return -EIO;
26722 }
26723
26724-static struct sysfs_ops sysfs_ops = {
26725+static const struct sysfs_ops sysfs_ops = {
26726 .show = sysdev_show,
26727 .store = sysdev_store,
26728 };
26729@@ -104,7 +104,7 @@ static ssize_t sysdev_class_store(struct
26730 return -EIO;
26731 }
26732
26733-static struct sysfs_ops sysfs_class_ops = {
26734+static const struct sysfs_ops sysfs_class_ops = {
26735 .show = sysdev_class_show,
26736 .store = sysdev_class_store,
26737 };
26738diff -urNp linux-2.6.32.42/drivers/block/cciss.c linux-2.6.32.42/drivers/block/cciss.c
26739--- linux-2.6.32.42/drivers/block/cciss.c 2011-03-27 14:31:47.000000000 -0400
26740+++ linux-2.6.32.42/drivers/block/cciss.c 2011-04-17 15:56:46.000000000 -0400
26741@@ -1011,6 +1011,8 @@ static int cciss_ioctl32_passthru(struct
26742 int err;
26743 u32 cp;
26744
26745+ memset(&arg64, 0, sizeof(arg64));
26746+
26747 err = 0;
26748 err |=
26749 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
26750diff -urNp linux-2.6.32.42/drivers/block/cpqarray.c linux-2.6.32.42/drivers/block/cpqarray.c
26751--- linux-2.6.32.42/drivers/block/cpqarray.c 2011-03-27 14:31:47.000000000 -0400
26752+++ linux-2.6.32.42/drivers/block/cpqarray.c 2011-05-16 21:46:57.000000000 -0400
26753@@ -896,6 +896,8 @@ static void do_ida_request(struct reques
26754 struct scatterlist tmp_sg[SG_MAX];
26755 int i, dir, seg;
26756
26757+ pax_track_stack();
26758+
26759 if (blk_queue_plugged(q))
26760 goto startio;
26761
26762diff -urNp linux-2.6.32.42/drivers/block/DAC960.c linux-2.6.32.42/drivers/block/DAC960.c
26763--- linux-2.6.32.42/drivers/block/DAC960.c 2011-03-27 14:31:47.000000000 -0400
26764+++ linux-2.6.32.42/drivers/block/DAC960.c 2011-05-16 21:46:57.000000000 -0400
26765@@ -1973,6 +1973,8 @@ static bool DAC960_V1_ReadDeviceConfigur
26766 unsigned long flags;
26767 int Channel, TargetID;
26768
26769+ pax_track_stack();
26770+
26771 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
26772 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
26773 sizeof(DAC960_SCSI_Inquiry_T) +
26774diff -urNp linux-2.6.32.42/drivers/block/nbd.c linux-2.6.32.42/drivers/block/nbd.c
26775--- linux-2.6.32.42/drivers/block/nbd.c 2011-06-25 12:55:34.000000000 -0400
26776+++ linux-2.6.32.42/drivers/block/nbd.c 2011-06-25 12:56:37.000000000 -0400
26777@@ -155,6 +155,8 @@ static int sock_xmit(struct nbd_device *
26778 struct kvec iov;
26779 sigset_t blocked, oldset;
26780
26781+ pax_track_stack();
26782+
26783 if (unlikely(!sock)) {
26784 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
26785 lo->disk->disk_name, (send ? "send" : "recv"));
26786@@ -569,6 +571,8 @@ static void do_nbd_request(struct reques
26787 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
26788 unsigned int cmd, unsigned long arg)
26789 {
26790+ pax_track_stack();
26791+
26792 switch (cmd) {
26793 case NBD_DISCONNECT: {
26794 struct request sreq;
26795diff -urNp linux-2.6.32.42/drivers/block/pktcdvd.c linux-2.6.32.42/drivers/block/pktcdvd.c
26796--- linux-2.6.32.42/drivers/block/pktcdvd.c 2011-03-27 14:31:47.000000000 -0400
26797+++ linux-2.6.32.42/drivers/block/pktcdvd.c 2011-04-17 15:56:46.000000000 -0400
26798@@ -284,7 +284,7 @@ static ssize_t kobj_pkt_store(struct kob
26799 return len;
26800 }
26801
26802-static struct sysfs_ops kobj_pkt_ops = {
26803+static const struct sysfs_ops kobj_pkt_ops = {
26804 .show = kobj_pkt_show,
26805 .store = kobj_pkt_store
26806 };
26807diff -urNp linux-2.6.32.42/drivers/char/agp/frontend.c linux-2.6.32.42/drivers/char/agp/frontend.c
26808--- linux-2.6.32.42/drivers/char/agp/frontend.c 2011-03-27 14:31:47.000000000 -0400
26809+++ linux-2.6.32.42/drivers/char/agp/frontend.c 2011-04-17 15:56:46.000000000 -0400
26810@@ -824,7 +824,7 @@ static int agpioc_reserve_wrap(struct ag
26811 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
26812 return -EFAULT;
26813
26814- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
26815+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
26816 return -EFAULT;
26817
26818 client = agp_find_client_by_pid(reserve.pid);
26819diff -urNp linux-2.6.32.42/drivers/char/briq_panel.c linux-2.6.32.42/drivers/char/briq_panel.c
26820--- linux-2.6.32.42/drivers/char/briq_panel.c 2011-03-27 14:31:47.000000000 -0400
26821+++ linux-2.6.32.42/drivers/char/briq_panel.c 2011-04-18 19:48:57.000000000 -0400
26822@@ -10,6 +10,7 @@
26823 #include <linux/types.h>
26824 #include <linux/errno.h>
26825 #include <linux/tty.h>
26826+#include <linux/mutex.h>
26827 #include <linux/timer.h>
26828 #include <linux/kernel.h>
26829 #include <linux/wait.h>
26830@@ -36,6 +37,7 @@ static int vfd_is_open;
26831 static unsigned char vfd[40];
26832 static int vfd_cursor;
26833 static unsigned char ledpb, led;
26834+static DEFINE_MUTEX(vfd_mutex);
26835
26836 static void update_vfd(void)
26837 {
26838@@ -142,12 +144,15 @@ static ssize_t briq_panel_write(struct f
26839 if (!vfd_is_open)
26840 return -EBUSY;
26841
26842+ mutex_lock(&vfd_mutex);
26843 for (;;) {
26844 char c;
26845 if (!indx)
26846 break;
26847- if (get_user(c, buf))
26848+ if (get_user(c, buf)) {
26849+ mutex_unlock(&vfd_mutex);
26850 return -EFAULT;
26851+ }
26852 if (esc) {
26853 set_led(c);
26854 esc = 0;
26855@@ -177,6 +182,7 @@ static ssize_t briq_panel_write(struct f
26856 buf++;
26857 }
26858 update_vfd();
26859+ mutex_unlock(&vfd_mutex);
26860
26861 return len;
26862 }
26863diff -urNp linux-2.6.32.42/drivers/char/genrtc.c linux-2.6.32.42/drivers/char/genrtc.c
26864--- linux-2.6.32.42/drivers/char/genrtc.c 2011-03-27 14:31:47.000000000 -0400
26865+++ linux-2.6.32.42/drivers/char/genrtc.c 2011-04-18 19:45:42.000000000 -0400
26866@@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct inode *i
26867 switch (cmd) {
26868
26869 case RTC_PLL_GET:
26870+ memset(&pll, 0, sizeof(pll));
26871 if (get_rtc_pll(&pll))
26872 return -EINVAL;
26873 else
26874diff -urNp linux-2.6.32.42/drivers/char/hpet.c linux-2.6.32.42/drivers/char/hpet.c
26875--- linux-2.6.32.42/drivers/char/hpet.c 2011-03-27 14:31:47.000000000 -0400
26876+++ linux-2.6.32.42/drivers/char/hpet.c 2011-04-23 12:56:11.000000000 -0400
26877@@ -430,7 +430,7 @@ static int hpet_release(struct inode *in
26878 return 0;
26879 }
26880
26881-static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int);
26882+static int hpet_ioctl_common(struct hpet_dev *, unsigned int, unsigned long, int);
26883
26884 static int
26885 hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
26886@@ -565,7 +565,7 @@ static inline unsigned long hpet_time_di
26887 }
26888
26889 static int
26890-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
26891+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, int kernel)
26892 {
26893 struct hpet_timer __iomem *timer;
26894 struct hpet __iomem *hpet;
26895@@ -608,11 +608,11 @@ hpet_ioctl_common(struct hpet_dev *devp,
26896 {
26897 struct hpet_info info;
26898
26899+ memset(&info, 0, sizeof(info));
26900+
26901 if (devp->hd_ireqfreq)
26902 info.hi_ireqfreq =
26903 hpet_time_div(hpetp, devp->hd_ireqfreq);
26904- else
26905- info.hi_ireqfreq = 0;
26906 info.hi_flags =
26907 readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
26908 info.hi_hpet = hpetp->hp_which;
26909diff -urNp linux-2.6.32.42/drivers/char/hvc_beat.c linux-2.6.32.42/drivers/char/hvc_beat.c
26910--- linux-2.6.32.42/drivers/char/hvc_beat.c 2011-03-27 14:31:47.000000000 -0400
26911+++ linux-2.6.32.42/drivers/char/hvc_beat.c 2011-04-17 15:56:46.000000000 -0400
26912@@ -84,7 +84,7 @@ static int hvc_beat_put_chars(uint32_t v
26913 return cnt;
26914 }
26915
26916-static struct hv_ops hvc_beat_get_put_ops = {
26917+static const struct hv_ops hvc_beat_get_put_ops = {
26918 .get_chars = hvc_beat_get_chars,
26919 .put_chars = hvc_beat_put_chars,
26920 };
26921diff -urNp linux-2.6.32.42/drivers/char/hvc_console.c linux-2.6.32.42/drivers/char/hvc_console.c
26922--- linux-2.6.32.42/drivers/char/hvc_console.c 2011-03-27 14:31:47.000000000 -0400
26923+++ linux-2.6.32.42/drivers/char/hvc_console.c 2011-04-17 15:56:46.000000000 -0400
26924@@ -125,7 +125,7 @@ static struct hvc_struct *hvc_get_by_ind
26925 * console interfaces but can still be used as a tty device. This has to be
26926 * static because kmalloc will not work during early console init.
26927 */
26928-static struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
26929+static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
26930 static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] =
26931 {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1};
26932
26933@@ -247,7 +247,7 @@ static void destroy_hvc_struct(struct kr
26934 * vty adapters do NOT get an hvc_instantiate() callback since they
26935 * appear after early console init.
26936 */
26937-int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops)
26938+int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
26939 {
26940 struct hvc_struct *hp;
26941
26942@@ -756,7 +756,7 @@ static const struct tty_operations hvc_o
26943 };
26944
26945 struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
26946- struct hv_ops *ops, int outbuf_size)
26947+ const struct hv_ops *ops, int outbuf_size)
26948 {
26949 struct hvc_struct *hp;
26950 int i;
26951diff -urNp linux-2.6.32.42/drivers/char/hvc_console.h linux-2.6.32.42/drivers/char/hvc_console.h
26952--- linux-2.6.32.42/drivers/char/hvc_console.h 2011-03-27 14:31:47.000000000 -0400
26953+++ linux-2.6.32.42/drivers/char/hvc_console.h 2011-04-17 15:56:46.000000000 -0400
26954@@ -55,7 +55,7 @@ struct hvc_struct {
26955 int outbuf_size;
26956 int n_outbuf;
26957 uint32_t vtermno;
26958- struct hv_ops *ops;
26959+ const struct hv_ops *ops;
26960 int irq_requested;
26961 int data;
26962 struct winsize ws;
26963@@ -76,11 +76,11 @@ struct hv_ops {
26964 };
26965
26966 /* Register a vterm and a slot index for use as a console (console_init) */
26967-extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
26968+extern int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops);
26969
26970 /* register a vterm for hvc tty operation (module_init or hotplug add) */
26971 extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data,
26972- struct hv_ops *ops, int outbuf_size);
26973+ const struct hv_ops *ops, int outbuf_size);
26974 /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
26975 extern int hvc_remove(struct hvc_struct *hp);
26976
26977diff -urNp linux-2.6.32.42/drivers/char/hvc_iseries.c linux-2.6.32.42/drivers/char/hvc_iseries.c
26978--- linux-2.6.32.42/drivers/char/hvc_iseries.c 2011-03-27 14:31:47.000000000 -0400
26979+++ linux-2.6.32.42/drivers/char/hvc_iseries.c 2011-04-17 15:56:46.000000000 -0400
26980@@ -197,7 +197,7 @@ done:
26981 return sent;
26982 }
26983
26984-static struct hv_ops hvc_get_put_ops = {
26985+static const struct hv_ops hvc_get_put_ops = {
26986 .get_chars = get_chars,
26987 .put_chars = put_chars,
26988 .notifier_add = notifier_add_irq,
26989diff -urNp linux-2.6.32.42/drivers/char/hvc_iucv.c linux-2.6.32.42/drivers/char/hvc_iucv.c
26990--- linux-2.6.32.42/drivers/char/hvc_iucv.c 2011-03-27 14:31:47.000000000 -0400
26991+++ linux-2.6.32.42/drivers/char/hvc_iucv.c 2011-04-17 15:56:46.000000000 -0400
26992@@ -924,7 +924,7 @@ static int hvc_iucv_pm_restore_thaw(stru
26993
26994
26995 /* HVC operations */
26996-static struct hv_ops hvc_iucv_ops = {
26997+static const struct hv_ops hvc_iucv_ops = {
26998 .get_chars = hvc_iucv_get_chars,
26999 .put_chars = hvc_iucv_put_chars,
27000 .notifier_add = hvc_iucv_notifier_add,
27001diff -urNp linux-2.6.32.42/drivers/char/hvc_rtas.c linux-2.6.32.42/drivers/char/hvc_rtas.c
27002--- linux-2.6.32.42/drivers/char/hvc_rtas.c 2011-03-27 14:31:47.000000000 -0400
27003+++ linux-2.6.32.42/drivers/char/hvc_rtas.c 2011-04-17 15:56:46.000000000 -0400
27004@@ -71,7 +71,7 @@ static int hvc_rtas_read_console(uint32_
27005 return i;
27006 }
27007
27008-static struct hv_ops hvc_rtas_get_put_ops = {
27009+static const struct hv_ops hvc_rtas_get_put_ops = {
27010 .get_chars = hvc_rtas_read_console,
27011 .put_chars = hvc_rtas_write_console,
27012 };
27013diff -urNp linux-2.6.32.42/drivers/char/hvcs.c linux-2.6.32.42/drivers/char/hvcs.c
27014--- linux-2.6.32.42/drivers/char/hvcs.c 2011-03-27 14:31:47.000000000 -0400
27015+++ linux-2.6.32.42/drivers/char/hvcs.c 2011-04-17 15:56:46.000000000 -0400
27016@@ -82,6 +82,7 @@
27017 #include <asm/hvcserver.h>
27018 #include <asm/uaccess.h>
27019 #include <asm/vio.h>
27020+#include <asm/local.h>
27021
27022 /*
27023 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
27024@@ -269,7 +270,7 @@ struct hvcs_struct {
27025 unsigned int index;
27026
27027 struct tty_struct *tty;
27028- int open_count;
27029+ local_t open_count;
27030
27031 /*
27032 * Used to tell the driver kernel_thread what operations need to take
27033@@ -419,7 +420,7 @@ static ssize_t hvcs_vterm_state_store(st
27034
27035 spin_lock_irqsave(&hvcsd->lock, flags);
27036
27037- if (hvcsd->open_count > 0) {
27038+ if (local_read(&hvcsd->open_count) > 0) {
27039 spin_unlock_irqrestore(&hvcsd->lock, flags);
27040 printk(KERN_INFO "HVCS: vterm state unchanged. "
27041 "The hvcs device node is still in use.\n");
27042@@ -1135,7 +1136,7 @@ static int hvcs_open(struct tty_struct *
27043 if ((retval = hvcs_partner_connect(hvcsd)))
27044 goto error_release;
27045
27046- hvcsd->open_count = 1;
27047+ local_set(&hvcsd->open_count, 1);
27048 hvcsd->tty = tty;
27049 tty->driver_data = hvcsd;
27050
27051@@ -1169,7 +1170,7 @@ fast_open:
27052
27053 spin_lock_irqsave(&hvcsd->lock, flags);
27054 kref_get(&hvcsd->kref);
27055- hvcsd->open_count++;
27056+ local_inc(&hvcsd->open_count);
27057 hvcsd->todo_mask |= HVCS_SCHED_READ;
27058 spin_unlock_irqrestore(&hvcsd->lock, flags);
27059
27060@@ -1213,7 +1214,7 @@ static void hvcs_close(struct tty_struct
27061 hvcsd = tty->driver_data;
27062
27063 spin_lock_irqsave(&hvcsd->lock, flags);
27064- if (--hvcsd->open_count == 0) {
27065+ if (local_dec_and_test(&hvcsd->open_count)) {
27066
27067 vio_disable_interrupts(hvcsd->vdev);
27068
27069@@ -1239,10 +1240,10 @@ static void hvcs_close(struct tty_struct
27070 free_irq(irq, hvcsd);
27071 kref_put(&hvcsd->kref, destroy_hvcs_struct);
27072 return;
27073- } else if (hvcsd->open_count < 0) {
27074+ } else if (local_read(&hvcsd->open_count) < 0) {
27075 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
27076 " is missmanaged.\n",
27077- hvcsd->vdev->unit_address, hvcsd->open_count);
27078+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
27079 }
27080
27081 spin_unlock_irqrestore(&hvcsd->lock, flags);
27082@@ -1258,7 +1259,7 @@ static void hvcs_hangup(struct tty_struc
27083
27084 spin_lock_irqsave(&hvcsd->lock, flags);
27085 /* Preserve this so that we know how many kref refs to put */
27086- temp_open_count = hvcsd->open_count;
27087+ temp_open_count = local_read(&hvcsd->open_count);
27088
27089 /*
27090 * Don't kref put inside the spinlock because the destruction
27091@@ -1273,7 +1274,7 @@ static void hvcs_hangup(struct tty_struc
27092 hvcsd->tty->driver_data = NULL;
27093 hvcsd->tty = NULL;
27094
27095- hvcsd->open_count = 0;
27096+ local_set(&hvcsd->open_count, 0);
27097
27098 /* This will drop any buffered data on the floor which is OK in a hangup
27099 * scenario. */
27100@@ -1344,7 +1345,7 @@ static int hvcs_write(struct tty_struct
27101 * the middle of a write operation? This is a crummy place to do this
27102 * but we want to keep it all in the spinlock.
27103 */
27104- if (hvcsd->open_count <= 0) {
27105+ if (local_read(&hvcsd->open_count) <= 0) {
27106 spin_unlock_irqrestore(&hvcsd->lock, flags);
27107 return -ENODEV;
27108 }
27109@@ -1418,7 +1419,7 @@ static int hvcs_write_room(struct tty_st
27110 {
27111 struct hvcs_struct *hvcsd = tty->driver_data;
27112
27113- if (!hvcsd || hvcsd->open_count <= 0)
27114+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
27115 return 0;
27116
27117 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
27118diff -urNp linux-2.6.32.42/drivers/char/hvc_udbg.c linux-2.6.32.42/drivers/char/hvc_udbg.c
27119--- linux-2.6.32.42/drivers/char/hvc_udbg.c 2011-03-27 14:31:47.000000000 -0400
27120+++ linux-2.6.32.42/drivers/char/hvc_udbg.c 2011-04-17 15:56:46.000000000 -0400
27121@@ -58,7 +58,7 @@ static int hvc_udbg_get(uint32_t vtermno
27122 return i;
27123 }
27124
27125-static struct hv_ops hvc_udbg_ops = {
27126+static const struct hv_ops hvc_udbg_ops = {
27127 .get_chars = hvc_udbg_get,
27128 .put_chars = hvc_udbg_put,
27129 };
27130diff -urNp linux-2.6.32.42/drivers/char/hvc_vio.c linux-2.6.32.42/drivers/char/hvc_vio.c
27131--- linux-2.6.32.42/drivers/char/hvc_vio.c 2011-03-27 14:31:47.000000000 -0400
27132+++ linux-2.6.32.42/drivers/char/hvc_vio.c 2011-04-17 15:56:46.000000000 -0400
27133@@ -77,7 +77,7 @@ static int filtered_get_chars(uint32_t v
27134 return got;
27135 }
27136
27137-static struct hv_ops hvc_get_put_ops = {
27138+static const struct hv_ops hvc_get_put_ops = {
27139 .get_chars = filtered_get_chars,
27140 .put_chars = hvc_put_chars,
27141 .notifier_add = notifier_add_irq,
27142diff -urNp linux-2.6.32.42/drivers/char/hvc_xen.c linux-2.6.32.42/drivers/char/hvc_xen.c
27143--- linux-2.6.32.42/drivers/char/hvc_xen.c 2011-03-27 14:31:47.000000000 -0400
27144+++ linux-2.6.32.42/drivers/char/hvc_xen.c 2011-04-17 15:56:46.000000000 -0400
27145@@ -120,7 +120,7 @@ static int read_console(uint32_t vtermno
27146 return recv;
27147 }
27148
27149-static struct hv_ops hvc_ops = {
27150+static const struct hv_ops hvc_ops = {
27151 .get_chars = read_console,
27152 .put_chars = write_console,
27153 .notifier_add = notifier_add_irq,
27154diff -urNp linux-2.6.32.42/drivers/char/ipmi/ipmi_msghandler.c linux-2.6.32.42/drivers/char/ipmi/ipmi_msghandler.c
27155--- linux-2.6.32.42/drivers/char/ipmi/ipmi_msghandler.c 2011-03-27 14:31:47.000000000 -0400
27156+++ linux-2.6.32.42/drivers/char/ipmi/ipmi_msghandler.c 2011-05-16 21:46:57.000000000 -0400
27157@@ -414,7 +414,7 @@ struct ipmi_smi {
27158 struct proc_dir_entry *proc_dir;
27159 char proc_dir_name[10];
27160
27161- atomic_t stats[IPMI_NUM_STATS];
27162+ atomic_unchecked_t stats[IPMI_NUM_STATS];
27163
27164 /*
27165 * run_to_completion duplicate of smb_info, smi_info
27166@@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
27167
27168
27169 #define ipmi_inc_stat(intf, stat) \
27170- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
27171+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
27172 #define ipmi_get_stat(intf, stat) \
27173- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
27174+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
27175
27176 static int is_lan_addr(struct ipmi_addr *addr)
27177 {
27178@@ -2808,7 +2808,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
27179 INIT_LIST_HEAD(&intf->cmd_rcvrs);
27180 init_waitqueue_head(&intf->waitq);
27181 for (i = 0; i < IPMI_NUM_STATS; i++)
27182- atomic_set(&intf->stats[i], 0);
27183+ atomic_set_unchecked(&intf->stats[i], 0);
27184
27185 intf->proc_dir = NULL;
27186
27187@@ -4160,6 +4160,8 @@ static void send_panic_events(char *str)
27188 struct ipmi_smi_msg smi_msg;
27189 struct ipmi_recv_msg recv_msg;
27190
27191+ pax_track_stack();
27192+
27193 si = (struct ipmi_system_interface_addr *) &addr;
27194 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
27195 si->channel = IPMI_BMC_CHANNEL;
27196diff -urNp linux-2.6.32.42/drivers/char/ipmi/ipmi_si_intf.c linux-2.6.32.42/drivers/char/ipmi/ipmi_si_intf.c
27197--- linux-2.6.32.42/drivers/char/ipmi/ipmi_si_intf.c 2011-03-27 14:31:47.000000000 -0400
27198+++ linux-2.6.32.42/drivers/char/ipmi/ipmi_si_intf.c 2011-04-17 15:56:46.000000000 -0400
27199@@ -277,7 +277,7 @@ struct smi_info {
27200 unsigned char slave_addr;
27201
27202 /* Counters and things for the proc filesystem. */
27203- atomic_t stats[SI_NUM_STATS];
27204+ atomic_unchecked_t stats[SI_NUM_STATS];
27205
27206 struct task_struct *thread;
27207
27208@@ -285,9 +285,9 @@ struct smi_info {
27209 };
27210
27211 #define smi_inc_stat(smi, stat) \
27212- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
27213+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
27214 #define smi_get_stat(smi, stat) \
27215- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
27216+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
27217
27218 #define SI_MAX_PARMS 4
27219
27220@@ -2931,7 +2931,7 @@ static int try_smi_init(struct smi_info
27221 atomic_set(&new_smi->req_events, 0);
27222 new_smi->run_to_completion = 0;
27223 for (i = 0; i < SI_NUM_STATS; i++)
27224- atomic_set(&new_smi->stats[i], 0);
27225+ atomic_set_unchecked(&new_smi->stats[i], 0);
27226
27227 new_smi->interrupt_disabled = 0;
27228 atomic_set(&new_smi->stop_operation, 0);
27229diff -urNp linux-2.6.32.42/drivers/char/istallion.c linux-2.6.32.42/drivers/char/istallion.c
27230--- linux-2.6.32.42/drivers/char/istallion.c 2011-03-27 14:31:47.000000000 -0400
27231+++ linux-2.6.32.42/drivers/char/istallion.c 2011-05-16 21:46:57.000000000 -0400
27232@@ -187,7 +187,6 @@ static struct ktermios stli_deftermios
27233 * re-used for each stats call.
27234 */
27235 static comstats_t stli_comstats;
27236-static combrd_t stli_brdstats;
27237 static struct asystats stli_cdkstats;
27238
27239 /*****************************************************************************/
27240@@ -4058,6 +4057,7 @@ static int stli_getbrdstats(combrd_t __u
27241 {
27242 struct stlibrd *brdp;
27243 unsigned int i;
27244+ combrd_t stli_brdstats;
27245
27246 if (copy_from_user(&stli_brdstats, bp, sizeof(combrd_t)))
27247 return -EFAULT;
27248@@ -4269,6 +4269,8 @@ static int stli_getportstruct(struct stl
27249 struct stliport stli_dummyport;
27250 struct stliport *portp;
27251
27252+ pax_track_stack();
27253+
27254 if (copy_from_user(&stli_dummyport, arg, sizeof(struct stliport)))
27255 return -EFAULT;
27256 portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
27257@@ -4291,6 +4293,8 @@ static int stli_getbrdstruct(struct stli
27258 struct stlibrd stli_dummybrd;
27259 struct stlibrd *brdp;
27260
27261+ pax_track_stack();
27262+
27263 if (copy_from_user(&stli_dummybrd, arg, sizeof(struct stlibrd)))
27264 return -EFAULT;
27265 if (stli_dummybrd.brdnr >= STL_MAXBRDS)
27266diff -urNp linux-2.6.32.42/drivers/char/Kconfig linux-2.6.32.42/drivers/char/Kconfig
27267--- linux-2.6.32.42/drivers/char/Kconfig 2011-03-27 14:31:47.000000000 -0400
27268+++ linux-2.6.32.42/drivers/char/Kconfig 2011-04-18 19:20:15.000000000 -0400
27269@@ -90,7 +90,8 @@ config VT_HW_CONSOLE_BINDING
27270
27271 config DEVKMEM
27272 bool "/dev/kmem virtual device support"
27273- default y
27274+ default n
27275+ depends on !GRKERNSEC_KMEM
27276 help
27277 Say Y here if you want to support the /dev/kmem device. The
27278 /dev/kmem device is rarely used, but can be used for certain
27279@@ -1114,6 +1115,7 @@ config DEVPORT
27280 bool
27281 depends on !M68K
27282 depends on ISA || PCI
27283+ depends on !GRKERNSEC_KMEM
27284 default y
27285
27286 source "drivers/s390/char/Kconfig"
27287diff -urNp linux-2.6.32.42/drivers/char/keyboard.c linux-2.6.32.42/drivers/char/keyboard.c
27288--- linux-2.6.32.42/drivers/char/keyboard.c 2011-03-27 14:31:47.000000000 -0400
27289+++ linux-2.6.32.42/drivers/char/keyboard.c 2011-04-17 15:56:46.000000000 -0400
27290@@ -635,6 +635,16 @@ static void k_spec(struct vc_data *vc, u
27291 kbd->kbdmode == VC_MEDIUMRAW) &&
27292 value != KVAL(K_SAK))
27293 return; /* SAK is allowed even in raw mode */
27294+
27295+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
27296+ {
27297+ void *func = fn_handler[value];
27298+ if (func == fn_show_state || func == fn_show_ptregs ||
27299+ func == fn_show_mem)
27300+ return;
27301+ }
27302+#endif
27303+
27304 fn_handler[value](vc);
27305 }
27306
27307@@ -1386,7 +1396,7 @@ static const struct input_device_id kbd_
27308 .evbit = { BIT_MASK(EV_SND) },
27309 },
27310
27311- { }, /* Terminating entry */
27312+ { 0 }, /* Terminating entry */
27313 };
27314
27315 MODULE_DEVICE_TABLE(input, kbd_ids);
27316diff -urNp linux-2.6.32.42/drivers/char/mem.c linux-2.6.32.42/drivers/char/mem.c
27317--- linux-2.6.32.42/drivers/char/mem.c 2011-03-27 14:31:47.000000000 -0400
27318+++ linux-2.6.32.42/drivers/char/mem.c 2011-04-17 15:56:46.000000000 -0400
27319@@ -18,6 +18,7 @@
27320 #include <linux/raw.h>
27321 #include <linux/tty.h>
27322 #include <linux/capability.h>
27323+#include <linux/security.h>
27324 #include <linux/ptrace.h>
27325 #include <linux/device.h>
27326 #include <linux/highmem.h>
27327@@ -35,6 +36,10 @@
27328 # include <linux/efi.h>
27329 #endif
27330
27331+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
27332+extern struct file_operations grsec_fops;
27333+#endif
27334+
27335 static inline unsigned long size_inside_page(unsigned long start,
27336 unsigned long size)
27337 {
27338@@ -102,9 +107,13 @@ static inline int range_is_allowed(unsig
27339
27340 while (cursor < to) {
27341 if (!devmem_is_allowed(pfn)) {
27342+#ifdef CONFIG_GRKERNSEC_KMEM
27343+ gr_handle_mem_readwrite(from, to);
27344+#else
27345 printk(KERN_INFO
27346 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
27347 current->comm, from, to);
27348+#endif
27349 return 0;
27350 }
27351 cursor += PAGE_SIZE;
27352@@ -112,6 +121,11 @@ static inline int range_is_allowed(unsig
27353 }
27354 return 1;
27355 }
27356+#elif defined(CONFIG_GRKERNSEC_KMEM)
27357+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27358+{
27359+ return 0;
27360+}
27361 #else
27362 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27363 {
27364@@ -155,6 +169,8 @@ static ssize_t read_mem(struct file * fi
27365 #endif
27366
27367 while (count > 0) {
27368+ char *temp;
27369+
27370 /*
27371 * Handle first page in case it's not aligned
27372 */
27373@@ -177,11 +193,31 @@ static ssize_t read_mem(struct file * fi
27374 if (!ptr)
27375 return -EFAULT;
27376
27377- if (copy_to_user(buf, ptr, sz)) {
27378+#ifdef CONFIG_PAX_USERCOPY
27379+ temp = kmalloc(sz, GFP_KERNEL);
27380+ if (!temp) {
27381+ unxlate_dev_mem_ptr(p, ptr);
27382+ return -ENOMEM;
27383+ }
27384+ memcpy(temp, ptr, sz);
27385+#else
27386+ temp = ptr;
27387+#endif
27388+
27389+ if (copy_to_user(buf, temp, sz)) {
27390+
27391+#ifdef CONFIG_PAX_USERCOPY
27392+ kfree(temp);
27393+#endif
27394+
27395 unxlate_dev_mem_ptr(p, ptr);
27396 return -EFAULT;
27397 }
27398
27399+#ifdef CONFIG_PAX_USERCOPY
27400+ kfree(temp);
27401+#endif
27402+
27403 unxlate_dev_mem_ptr(p, ptr);
27404
27405 buf += sz;
27406@@ -419,9 +455,8 @@ static ssize_t read_kmem(struct file *fi
27407 size_t count, loff_t *ppos)
27408 {
27409 unsigned long p = *ppos;
27410- ssize_t low_count, read, sz;
27411+ ssize_t low_count, read, sz, err = 0;
27412 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
27413- int err = 0;
27414
27415 read = 0;
27416 if (p < (unsigned long) high_memory) {
27417@@ -444,6 +479,8 @@ static ssize_t read_kmem(struct file *fi
27418 }
27419 #endif
27420 while (low_count > 0) {
27421+ char *temp;
27422+
27423 sz = size_inside_page(p, low_count);
27424
27425 /*
27426@@ -453,7 +490,22 @@ static ssize_t read_kmem(struct file *fi
27427 */
27428 kbuf = xlate_dev_kmem_ptr((char *)p);
27429
27430- if (copy_to_user(buf, kbuf, sz))
27431+#ifdef CONFIG_PAX_USERCOPY
27432+ temp = kmalloc(sz, GFP_KERNEL);
27433+ if (!temp)
27434+ return -ENOMEM;
27435+ memcpy(temp, kbuf, sz);
27436+#else
27437+ temp = kbuf;
27438+#endif
27439+
27440+ err = copy_to_user(buf, temp, sz);
27441+
27442+#ifdef CONFIG_PAX_USERCOPY
27443+ kfree(temp);
27444+#endif
27445+
27446+ if (err)
27447 return -EFAULT;
27448 buf += sz;
27449 p += sz;
27450@@ -889,6 +941,9 @@ static const struct memdev {
27451 #ifdef CONFIG_CRASH_DUMP
27452 [12] = { "oldmem", 0, &oldmem_fops, NULL },
27453 #endif
27454+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
27455+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
27456+#endif
27457 };
27458
27459 static int memory_open(struct inode *inode, struct file *filp)
27460diff -urNp linux-2.6.32.42/drivers/char/pcmcia/ipwireless/tty.c linux-2.6.32.42/drivers/char/pcmcia/ipwireless/tty.c
27461--- linux-2.6.32.42/drivers/char/pcmcia/ipwireless/tty.c 2011-03-27 14:31:47.000000000 -0400
27462+++ linux-2.6.32.42/drivers/char/pcmcia/ipwireless/tty.c 2011-04-17 15:56:46.000000000 -0400
27463@@ -29,6 +29,7 @@
27464 #include <linux/tty_driver.h>
27465 #include <linux/tty_flip.h>
27466 #include <linux/uaccess.h>
27467+#include <asm/local.h>
27468
27469 #include "tty.h"
27470 #include "network.h"
27471@@ -51,7 +52,7 @@ struct ipw_tty {
27472 int tty_type;
27473 struct ipw_network *network;
27474 struct tty_struct *linux_tty;
27475- int open_count;
27476+ local_t open_count;
27477 unsigned int control_lines;
27478 struct mutex ipw_tty_mutex;
27479 int tx_bytes_queued;
27480@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
27481 mutex_unlock(&tty->ipw_tty_mutex);
27482 return -ENODEV;
27483 }
27484- if (tty->open_count == 0)
27485+ if (local_read(&tty->open_count) == 0)
27486 tty->tx_bytes_queued = 0;
27487
27488- tty->open_count++;
27489+ local_inc(&tty->open_count);
27490
27491 tty->linux_tty = linux_tty;
27492 linux_tty->driver_data = tty;
27493@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
27494
27495 static void do_ipw_close(struct ipw_tty *tty)
27496 {
27497- tty->open_count--;
27498-
27499- if (tty->open_count == 0) {
27500+ if (local_dec_return(&tty->open_count) == 0) {
27501 struct tty_struct *linux_tty = tty->linux_tty;
27502
27503 if (linux_tty != NULL) {
27504@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
27505 return;
27506
27507 mutex_lock(&tty->ipw_tty_mutex);
27508- if (tty->open_count == 0) {
27509+ if (local_read(&tty->open_count) == 0) {
27510 mutex_unlock(&tty->ipw_tty_mutex);
27511 return;
27512 }
27513@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
27514 return;
27515 }
27516
27517- if (!tty->open_count) {
27518+ if (!local_read(&tty->open_count)) {
27519 mutex_unlock(&tty->ipw_tty_mutex);
27520 return;
27521 }
27522@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
27523 return -ENODEV;
27524
27525 mutex_lock(&tty->ipw_tty_mutex);
27526- if (!tty->open_count) {
27527+ if (!local_read(&tty->open_count)) {
27528 mutex_unlock(&tty->ipw_tty_mutex);
27529 return -EINVAL;
27530 }
27531@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
27532 if (!tty)
27533 return -ENODEV;
27534
27535- if (!tty->open_count)
27536+ if (!local_read(&tty->open_count))
27537 return -EINVAL;
27538
27539 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
27540@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
27541 if (!tty)
27542 return 0;
27543
27544- if (!tty->open_count)
27545+ if (!local_read(&tty->open_count))
27546 return 0;
27547
27548 return tty->tx_bytes_queued;
27549@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
27550 if (!tty)
27551 return -ENODEV;
27552
27553- if (!tty->open_count)
27554+ if (!local_read(&tty->open_count))
27555 return -EINVAL;
27556
27557 return get_control_lines(tty);
27558@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
27559 if (!tty)
27560 return -ENODEV;
27561
27562- if (!tty->open_count)
27563+ if (!local_read(&tty->open_count))
27564 return -EINVAL;
27565
27566 return set_control_lines(tty, set, clear);
27567@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
27568 if (!tty)
27569 return -ENODEV;
27570
27571- if (!tty->open_count)
27572+ if (!local_read(&tty->open_count))
27573 return -EINVAL;
27574
27575 /* FIXME: Exactly how is the tty object locked here .. */
27576@@ -591,7 +590,7 @@ void ipwireless_tty_free(struct ipw_tty
27577 against a parallel ioctl etc */
27578 mutex_lock(&ttyj->ipw_tty_mutex);
27579 }
27580- while (ttyj->open_count)
27581+ while (local_read(&ttyj->open_count))
27582 do_ipw_close(ttyj);
27583 ipwireless_disassociate_network_ttys(network,
27584 ttyj->channel_idx);
27585diff -urNp linux-2.6.32.42/drivers/char/pty.c linux-2.6.32.42/drivers/char/pty.c
27586--- linux-2.6.32.42/drivers/char/pty.c 2011-03-27 14:31:47.000000000 -0400
27587+++ linux-2.6.32.42/drivers/char/pty.c 2011-04-17 15:56:46.000000000 -0400
27588@@ -682,7 +682,18 @@ static int ptmx_open(struct inode *inode
27589 return ret;
27590 }
27591
27592-static struct file_operations ptmx_fops;
27593+static const struct file_operations ptmx_fops = {
27594+ .llseek = no_llseek,
27595+ .read = tty_read,
27596+ .write = tty_write,
27597+ .poll = tty_poll,
27598+ .unlocked_ioctl = tty_ioctl,
27599+ .compat_ioctl = tty_compat_ioctl,
27600+ .open = ptmx_open,
27601+ .release = tty_release,
27602+ .fasync = tty_fasync,
27603+};
27604+
27605
27606 static void __init unix98_pty_init(void)
27607 {
27608@@ -736,9 +747,6 @@ static void __init unix98_pty_init(void)
27609 register_sysctl_table(pty_root_table);
27610
27611 /* Now create the /dev/ptmx special device */
27612- tty_default_fops(&ptmx_fops);
27613- ptmx_fops.open = ptmx_open;
27614-
27615 cdev_init(&ptmx_cdev, &ptmx_fops);
27616 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
27617 register_chrdev_region(MKDEV(TTYAUX_MAJOR, 2), 1, "/dev/ptmx") < 0)
27618diff -urNp linux-2.6.32.42/drivers/char/random.c linux-2.6.32.42/drivers/char/random.c
27619--- linux-2.6.32.42/drivers/char/random.c 2011-03-27 14:31:47.000000000 -0400
27620+++ linux-2.6.32.42/drivers/char/random.c 2011-04-17 15:56:46.000000000 -0400
27621@@ -254,8 +254,13 @@
27622 /*
27623 * Configuration information
27624 */
27625+#ifdef CONFIG_GRKERNSEC_RANDNET
27626+#define INPUT_POOL_WORDS 512
27627+#define OUTPUT_POOL_WORDS 128
27628+#else
27629 #define INPUT_POOL_WORDS 128
27630 #define OUTPUT_POOL_WORDS 32
27631+#endif
27632 #define SEC_XFER_SIZE 512
27633
27634 /*
27635@@ -292,10 +297,17 @@ static struct poolinfo {
27636 int poolwords;
27637 int tap1, tap2, tap3, tap4, tap5;
27638 } poolinfo_table[] = {
27639+#ifdef CONFIG_GRKERNSEC_RANDNET
27640+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
27641+ { 512, 411, 308, 208, 104, 1 },
27642+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
27643+ { 128, 103, 76, 51, 25, 1 },
27644+#else
27645 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
27646 { 128, 103, 76, 51, 25, 1 },
27647 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
27648 { 32, 26, 20, 14, 7, 1 },
27649+#endif
27650 #if 0
27651 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
27652 { 2048, 1638, 1231, 819, 411, 1 },
27653@@ -1209,7 +1221,7 @@ EXPORT_SYMBOL(generate_random_uuid);
27654 #include <linux/sysctl.h>
27655
27656 static int min_read_thresh = 8, min_write_thresh;
27657-static int max_read_thresh = INPUT_POOL_WORDS * 32;
27658+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
27659 static int max_write_thresh = INPUT_POOL_WORDS * 32;
27660 static char sysctl_bootid[16];
27661
27662diff -urNp linux-2.6.32.42/drivers/char/rocket.c linux-2.6.32.42/drivers/char/rocket.c
27663--- linux-2.6.32.42/drivers/char/rocket.c 2011-03-27 14:31:47.000000000 -0400
27664+++ linux-2.6.32.42/drivers/char/rocket.c 2011-05-16 21:46:57.000000000 -0400
27665@@ -1266,6 +1266,8 @@ static int get_ports(struct r_port *info
27666 struct rocket_ports tmp;
27667 int board;
27668
27669+ pax_track_stack();
27670+
27671 if (!retports)
27672 return -EFAULT;
27673 memset(&tmp, 0, sizeof (tmp));
27674diff -urNp linux-2.6.32.42/drivers/char/sonypi.c linux-2.6.32.42/drivers/char/sonypi.c
27675--- linux-2.6.32.42/drivers/char/sonypi.c 2011-03-27 14:31:47.000000000 -0400
27676+++ linux-2.6.32.42/drivers/char/sonypi.c 2011-04-17 15:56:46.000000000 -0400
27677@@ -55,6 +55,7 @@
27678 #include <asm/uaccess.h>
27679 #include <asm/io.h>
27680 #include <asm/system.h>
27681+#include <asm/local.h>
27682
27683 #include <linux/sonypi.h>
27684
27685@@ -491,7 +492,7 @@ static struct sonypi_device {
27686 spinlock_t fifo_lock;
27687 wait_queue_head_t fifo_proc_list;
27688 struct fasync_struct *fifo_async;
27689- int open_count;
27690+ local_t open_count;
27691 int model;
27692 struct input_dev *input_jog_dev;
27693 struct input_dev *input_key_dev;
27694@@ -895,7 +896,7 @@ static int sonypi_misc_fasync(int fd, st
27695 static int sonypi_misc_release(struct inode *inode, struct file *file)
27696 {
27697 mutex_lock(&sonypi_device.lock);
27698- sonypi_device.open_count--;
27699+ local_dec(&sonypi_device.open_count);
27700 mutex_unlock(&sonypi_device.lock);
27701 return 0;
27702 }
27703@@ -905,9 +906,9 @@ static int sonypi_misc_open(struct inode
27704 lock_kernel();
27705 mutex_lock(&sonypi_device.lock);
27706 /* Flush input queue on first open */
27707- if (!sonypi_device.open_count)
27708+ if (!local_read(&sonypi_device.open_count))
27709 kfifo_reset(sonypi_device.fifo);
27710- sonypi_device.open_count++;
27711+ local_inc(&sonypi_device.open_count);
27712 mutex_unlock(&sonypi_device.lock);
27713 unlock_kernel();
27714 return 0;
27715diff -urNp linux-2.6.32.42/drivers/char/stallion.c linux-2.6.32.42/drivers/char/stallion.c
27716--- linux-2.6.32.42/drivers/char/stallion.c 2011-03-27 14:31:47.000000000 -0400
27717+++ linux-2.6.32.42/drivers/char/stallion.c 2011-05-16 21:46:57.000000000 -0400
27718@@ -2448,6 +2448,8 @@ static int stl_getportstruct(struct stlp
27719 struct stlport stl_dummyport;
27720 struct stlport *portp;
27721
27722+ pax_track_stack();
27723+
27724 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
27725 return -EFAULT;
27726 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
27727diff -urNp linux-2.6.32.42/drivers/char/tpm/tpm_bios.c linux-2.6.32.42/drivers/char/tpm/tpm_bios.c
27728--- linux-2.6.32.42/drivers/char/tpm/tpm_bios.c 2011-03-27 14:31:47.000000000 -0400
27729+++ linux-2.6.32.42/drivers/char/tpm/tpm_bios.c 2011-04-17 15:56:46.000000000 -0400
27730@@ -172,7 +172,7 @@ static void *tpm_bios_measurements_start
27731 event = addr;
27732
27733 if ((event->event_type == 0 && event->event_size == 0) ||
27734- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
27735+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
27736 return NULL;
27737
27738 return addr;
27739@@ -197,7 +197,7 @@ static void *tpm_bios_measurements_next(
27740 return NULL;
27741
27742 if ((event->event_type == 0 && event->event_size == 0) ||
27743- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
27744+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
27745 return NULL;
27746
27747 (*pos)++;
27748@@ -290,7 +290,8 @@ static int tpm_binary_bios_measurements_
27749 int i;
27750
27751 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
27752- seq_putc(m, data[i]);
27753+ if (!seq_putc(m, data[i]))
27754+ return -EFAULT;
27755
27756 return 0;
27757 }
27758@@ -409,6 +410,11 @@ static int read_log(struct tpm_bios_log
27759 log->bios_event_log_end = log->bios_event_log + len;
27760
27761 virt = acpi_os_map_memory(start, len);
27762+ if (!virt) {
27763+ kfree(log->bios_event_log);
27764+ log->bios_event_log = NULL;
27765+ return -EFAULT;
27766+ }
27767
27768 memcpy(log->bios_event_log, virt, len);
27769
27770diff -urNp linux-2.6.32.42/drivers/char/tpm/tpm.c linux-2.6.32.42/drivers/char/tpm/tpm.c
27771--- linux-2.6.32.42/drivers/char/tpm/tpm.c 2011-04-17 17:00:52.000000000 -0400
27772+++ linux-2.6.32.42/drivers/char/tpm/tpm.c 2011-05-16 21:46:57.000000000 -0400
27773@@ -402,7 +402,7 @@ static ssize_t tpm_transmit(struct tpm_c
27774 chip->vendor.req_complete_val)
27775 goto out_recv;
27776
27777- if ((status == chip->vendor.req_canceled)) {
27778+ if (status == chip->vendor.req_canceled) {
27779 dev_err(chip->dev, "Operation Canceled\n");
27780 rc = -ECANCELED;
27781 goto out;
27782@@ -821,6 +821,8 @@ ssize_t tpm_show_pubek(struct device *de
27783
27784 struct tpm_chip *chip = dev_get_drvdata(dev);
27785
27786+ pax_track_stack();
27787+
27788 tpm_cmd.header.in = tpm_readpubek_header;
27789 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
27790 "attempting to read the PUBEK");
27791diff -urNp linux-2.6.32.42/drivers/char/tty_io.c linux-2.6.32.42/drivers/char/tty_io.c
27792--- linux-2.6.32.42/drivers/char/tty_io.c 2011-03-27 14:31:47.000000000 -0400
27793+++ linux-2.6.32.42/drivers/char/tty_io.c 2011-04-17 15:56:46.000000000 -0400
27794@@ -136,21 +136,10 @@ LIST_HEAD(tty_drivers); /* linked list
27795 DEFINE_MUTEX(tty_mutex);
27796 EXPORT_SYMBOL(tty_mutex);
27797
27798-static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
27799-static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
27800 ssize_t redirected_tty_write(struct file *, const char __user *,
27801 size_t, loff_t *);
27802-static unsigned int tty_poll(struct file *, poll_table *);
27803 static int tty_open(struct inode *, struct file *);
27804-static int tty_release(struct inode *, struct file *);
27805 long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
27806-#ifdef CONFIG_COMPAT
27807-static long tty_compat_ioctl(struct file *file, unsigned int cmd,
27808- unsigned long arg);
27809-#else
27810-#define tty_compat_ioctl NULL
27811-#endif
27812-static int tty_fasync(int fd, struct file *filp, int on);
27813 static void release_tty(struct tty_struct *tty, int idx);
27814 static void __proc_set_tty(struct task_struct *tsk, struct tty_struct *tty);
27815 static void proc_set_tty(struct task_struct *tsk, struct tty_struct *tty);
27816@@ -870,7 +859,7 @@ EXPORT_SYMBOL(start_tty);
27817 * read calls may be outstanding in parallel.
27818 */
27819
27820-static ssize_t tty_read(struct file *file, char __user *buf, size_t count,
27821+ssize_t tty_read(struct file *file, char __user *buf, size_t count,
27822 loff_t *ppos)
27823 {
27824 int i;
27825@@ -898,6 +887,8 @@ static ssize_t tty_read(struct file *fil
27826 return i;
27827 }
27828
27829+EXPORT_SYMBOL(tty_read);
27830+
27831 void tty_write_unlock(struct tty_struct *tty)
27832 {
27833 mutex_unlock(&tty->atomic_write_lock);
27834@@ -1045,7 +1036,7 @@ void tty_write_message(struct tty_struct
27835 * write method will not be invoked in parallel for each device.
27836 */
27837
27838-static ssize_t tty_write(struct file *file, const char __user *buf,
27839+ssize_t tty_write(struct file *file, const char __user *buf,
27840 size_t count, loff_t *ppos)
27841 {
27842 struct tty_struct *tty;
27843@@ -1072,6 +1063,8 @@ static ssize_t tty_write(struct file *fi
27844 return ret;
27845 }
27846
27847+EXPORT_SYMBOL(tty_write);
27848+
27849 ssize_t redirected_tty_write(struct file *file, const char __user *buf,
27850 size_t count, loff_t *ppos)
27851 {
27852@@ -1867,7 +1860,7 @@ static int tty_open(struct inode *inode,
27853 * Takes bkl. See tty_release_dev
27854 */
27855
27856-static int tty_release(struct inode *inode, struct file *filp)
27857+int tty_release(struct inode *inode, struct file *filp)
27858 {
27859 lock_kernel();
27860 tty_release_dev(filp);
27861@@ -1875,6 +1868,8 @@ static int tty_release(struct inode *ino
27862 return 0;
27863 }
27864
27865+EXPORT_SYMBOL(tty_release);
27866+
27867 /**
27868 * tty_poll - check tty status
27869 * @filp: file being polled
27870@@ -1887,7 +1882,7 @@ static int tty_release(struct inode *ino
27871 * may be re-entered freely by other callers.
27872 */
27873
27874-static unsigned int tty_poll(struct file *filp, poll_table *wait)
27875+unsigned int tty_poll(struct file *filp, poll_table *wait)
27876 {
27877 struct tty_struct *tty;
27878 struct tty_ldisc *ld;
27879@@ -1904,7 +1899,9 @@ static unsigned int tty_poll(struct file
27880 return ret;
27881 }
27882
27883-static int tty_fasync(int fd, struct file *filp, int on)
27884+EXPORT_SYMBOL(tty_poll);
27885+
27886+int tty_fasync(int fd, struct file *filp, int on)
27887 {
27888 struct tty_struct *tty;
27889 unsigned long flags;
27890@@ -1948,6 +1945,8 @@ out:
27891 return retval;
27892 }
27893
27894+EXPORT_SYMBOL(tty_fasync);
27895+
27896 /**
27897 * tiocsti - fake input character
27898 * @tty: tty to fake input into
27899@@ -2582,8 +2581,10 @@ long tty_ioctl(struct file *file, unsign
27900 return retval;
27901 }
27902
27903+EXPORT_SYMBOL(tty_ioctl);
27904+
27905 #ifdef CONFIG_COMPAT
27906-static long tty_compat_ioctl(struct file *file, unsigned int cmd,
27907+long tty_compat_ioctl(struct file *file, unsigned int cmd,
27908 unsigned long arg)
27909 {
27910 struct inode *inode = file->f_dentry->d_inode;
27911@@ -2607,6 +2608,8 @@ static long tty_compat_ioctl(struct file
27912
27913 return retval;
27914 }
27915+
27916+EXPORT_SYMBOL(tty_compat_ioctl);
27917 #endif
27918
27919 /*
27920@@ -3050,11 +3053,6 @@ struct tty_struct *get_current_tty(void)
27921 }
27922 EXPORT_SYMBOL_GPL(get_current_tty);
27923
27924-void tty_default_fops(struct file_operations *fops)
27925-{
27926- *fops = tty_fops;
27927-}
27928-
27929 /*
27930 * Initialize the console device. This is called *early*, so
27931 * we can't necessarily depend on lots of kernel help here.
27932diff -urNp linux-2.6.32.42/drivers/char/tty_ldisc.c linux-2.6.32.42/drivers/char/tty_ldisc.c
27933--- linux-2.6.32.42/drivers/char/tty_ldisc.c 2011-03-27 14:31:47.000000000 -0400
27934+++ linux-2.6.32.42/drivers/char/tty_ldisc.c 2011-04-17 15:56:46.000000000 -0400
27935@@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *
27936 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
27937 struct tty_ldisc_ops *ldo = ld->ops;
27938
27939- ldo->refcount--;
27940+ atomic_dec(&ldo->refcount);
27941 module_put(ldo->owner);
27942 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
27943
27944@@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
27945 spin_lock_irqsave(&tty_ldisc_lock, flags);
27946 tty_ldiscs[disc] = new_ldisc;
27947 new_ldisc->num = disc;
27948- new_ldisc->refcount = 0;
27949+ atomic_set(&new_ldisc->refcount, 0);
27950 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
27951
27952 return ret;
27953@@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
27954 return -EINVAL;
27955
27956 spin_lock_irqsave(&tty_ldisc_lock, flags);
27957- if (tty_ldiscs[disc]->refcount)
27958+ if (atomic_read(&tty_ldiscs[disc]->refcount))
27959 ret = -EBUSY;
27960 else
27961 tty_ldiscs[disc] = NULL;
27962@@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
27963 if (ldops) {
27964 ret = ERR_PTR(-EAGAIN);
27965 if (try_module_get(ldops->owner)) {
27966- ldops->refcount++;
27967+ atomic_inc(&ldops->refcount);
27968 ret = ldops;
27969 }
27970 }
27971@@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
27972 unsigned long flags;
27973
27974 spin_lock_irqsave(&tty_ldisc_lock, flags);
27975- ldops->refcount--;
27976+ atomic_dec(&ldops->refcount);
27977 module_put(ldops->owner);
27978 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
27979 }
27980diff -urNp linux-2.6.32.42/drivers/char/virtio_console.c linux-2.6.32.42/drivers/char/virtio_console.c
27981--- linux-2.6.32.42/drivers/char/virtio_console.c 2011-03-27 14:31:47.000000000 -0400
27982+++ linux-2.6.32.42/drivers/char/virtio_console.c 2011-04-17 15:56:46.000000000 -0400
27983@@ -44,6 +44,7 @@ static unsigned int in_len;
27984 static char *in, *inbuf;
27985
27986 /* The operations for our console. */
27987+/* cannot be const */
27988 static struct hv_ops virtio_cons;
27989
27990 /* The hvc device */
27991diff -urNp linux-2.6.32.42/drivers/char/vt.c linux-2.6.32.42/drivers/char/vt.c
27992--- linux-2.6.32.42/drivers/char/vt.c 2011-03-27 14:31:47.000000000 -0400
27993+++ linux-2.6.32.42/drivers/char/vt.c 2011-04-17 15:56:46.000000000 -0400
27994@@ -243,7 +243,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
27995
27996 static void notify_write(struct vc_data *vc, unsigned int unicode)
27997 {
27998- struct vt_notifier_param param = { .vc = vc, unicode = unicode };
27999+ struct vt_notifier_param param = { .vc = vc, .c = unicode };
28000 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
28001 }
28002
28003diff -urNp linux-2.6.32.42/drivers/char/vt_ioctl.c linux-2.6.32.42/drivers/char/vt_ioctl.c
28004--- linux-2.6.32.42/drivers/char/vt_ioctl.c 2011-03-27 14:31:47.000000000 -0400
28005+++ linux-2.6.32.42/drivers/char/vt_ioctl.c 2011-04-17 15:56:46.000000000 -0400
28006@@ -210,9 +210,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
28007 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
28008 return -EFAULT;
28009
28010- if (!capable(CAP_SYS_TTY_CONFIG))
28011- perm = 0;
28012-
28013 switch (cmd) {
28014 case KDGKBENT:
28015 key_map = key_maps[s];
28016@@ -224,8 +221,12 @@ do_kdsk_ioctl(int cmd, struct kbentry __
28017 val = (i ? K_HOLE : K_NOSUCHMAP);
28018 return put_user(val, &user_kbe->kb_value);
28019 case KDSKBENT:
28020+ if (!capable(CAP_SYS_TTY_CONFIG))
28021+ perm = 0;
28022+
28023 if (!perm)
28024 return -EPERM;
28025+
28026 if (!i && v == K_NOSUCHMAP) {
28027 /* deallocate map */
28028 key_map = key_maps[s];
28029@@ -325,9 +326,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
28030 int i, j, k;
28031 int ret;
28032
28033- if (!capable(CAP_SYS_TTY_CONFIG))
28034- perm = 0;
28035-
28036 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
28037 if (!kbs) {
28038 ret = -ENOMEM;
28039@@ -361,6 +359,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
28040 kfree(kbs);
28041 return ((p && *p) ? -EOVERFLOW : 0);
28042 case KDSKBSENT:
28043+ if (!capable(CAP_SYS_TTY_CONFIG))
28044+ perm = 0;
28045+
28046 if (!perm) {
28047 ret = -EPERM;
28048 goto reterr;
28049diff -urNp linux-2.6.32.42/drivers/cpufreq/cpufreq.c linux-2.6.32.42/drivers/cpufreq/cpufreq.c
28050--- linux-2.6.32.42/drivers/cpufreq/cpufreq.c 2011-06-25 12:55:34.000000000 -0400
28051+++ linux-2.6.32.42/drivers/cpufreq/cpufreq.c 2011-06-25 12:56:37.000000000 -0400
28052@@ -750,7 +750,7 @@ static void cpufreq_sysfs_release(struct
28053 complete(&policy->kobj_unregister);
28054 }
28055
28056-static struct sysfs_ops sysfs_ops = {
28057+static const struct sysfs_ops sysfs_ops = {
28058 .show = show,
28059 .store = store,
28060 };
28061diff -urNp linux-2.6.32.42/drivers/cpuidle/sysfs.c linux-2.6.32.42/drivers/cpuidle/sysfs.c
28062--- linux-2.6.32.42/drivers/cpuidle/sysfs.c 2011-03-27 14:31:47.000000000 -0400
28063+++ linux-2.6.32.42/drivers/cpuidle/sysfs.c 2011-04-17 15:56:46.000000000 -0400
28064@@ -191,7 +191,7 @@ static ssize_t cpuidle_store(struct kobj
28065 return ret;
28066 }
28067
28068-static struct sysfs_ops cpuidle_sysfs_ops = {
28069+static const struct sysfs_ops cpuidle_sysfs_ops = {
28070 .show = cpuidle_show,
28071 .store = cpuidle_store,
28072 };
28073@@ -277,7 +277,7 @@ static ssize_t cpuidle_state_show(struct
28074 return ret;
28075 }
28076
28077-static struct sysfs_ops cpuidle_state_sysfs_ops = {
28078+static const struct sysfs_ops cpuidle_state_sysfs_ops = {
28079 .show = cpuidle_state_show,
28080 };
28081
28082@@ -294,7 +294,7 @@ static struct kobj_type ktype_state_cpui
28083 .release = cpuidle_state_sysfs_release,
28084 };
28085
28086-static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
28087+static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
28088 {
28089 kobject_put(&device->kobjs[i]->kobj);
28090 wait_for_completion(&device->kobjs[i]->kobj_unregister);
28091diff -urNp linux-2.6.32.42/drivers/crypto/hifn_795x.c linux-2.6.32.42/drivers/crypto/hifn_795x.c
28092--- linux-2.6.32.42/drivers/crypto/hifn_795x.c 2011-03-27 14:31:47.000000000 -0400
28093+++ linux-2.6.32.42/drivers/crypto/hifn_795x.c 2011-05-16 21:46:57.000000000 -0400
28094@@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device
28095 0xCA, 0x34, 0x2B, 0x2E};
28096 struct scatterlist sg;
28097
28098+ pax_track_stack();
28099+
28100 memset(src, 0, sizeof(src));
28101 memset(ctx.key, 0, sizeof(ctx.key));
28102
28103diff -urNp linux-2.6.32.42/drivers/crypto/padlock-aes.c linux-2.6.32.42/drivers/crypto/padlock-aes.c
28104--- linux-2.6.32.42/drivers/crypto/padlock-aes.c 2011-03-27 14:31:47.000000000 -0400
28105+++ linux-2.6.32.42/drivers/crypto/padlock-aes.c 2011-05-16 21:46:57.000000000 -0400
28106@@ -108,6 +108,8 @@ static int aes_set_key(struct crypto_tfm
28107 struct crypto_aes_ctx gen_aes;
28108 int cpu;
28109
28110+ pax_track_stack();
28111+
28112 if (key_len % 8) {
28113 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
28114 return -EINVAL;
28115diff -urNp linux-2.6.32.42/drivers/dma/ioat/dma.c linux-2.6.32.42/drivers/dma/ioat/dma.c
28116--- linux-2.6.32.42/drivers/dma/ioat/dma.c 2011-03-27 14:31:47.000000000 -0400
28117+++ linux-2.6.32.42/drivers/dma/ioat/dma.c 2011-04-17 15:56:46.000000000 -0400
28118@@ -1146,7 +1146,7 @@ ioat_attr_show(struct kobject *kobj, str
28119 return entry->show(&chan->common, page);
28120 }
28121
28122-struct sysfs_ops ioat_sysfs_ops = {
28123+const struct sysfs_ops ioat_sysfs_ops = {
28124 .show = ioat_attr_show,
28125 };
28126
28127diff -urNp linux-2.6.32.42/drivers/dma/ioat/dma.h linux-2.6.32.42/drivers/dma/ioat/dma.h
28128--- linux-2.6.32.42/drivers/dma/ioat/dma.h 2011-03-27 14:31:47.000000000 -0400
28129+++ linux-2.6.32.42/drivers/dma/ioat/dma.h 2011-04-17 15:56:46.000000000 -0400
28130@@ -347,7 +347,7 @@ bool ioat_cleanup_preamble(struct ioat_c
28131 unsigned long *phys_complete);
28132 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
28133 void ioat_kobject_del(struct ioatdma_device *device);
28134-extern struct sysfs_ops ioat_sysfs_ops;
28135+extern const struct sysfs_ops ioat_sysfs_ops;
28136 extern struct ioat_sysfs_entry ioat_version_attr;
28137 extern struct ioat_sysfs_entry ioat_cap_attr;
28138 #endif /* IOATDMA_H */
28139diff -urNp linux-2.6.32.42/drivers/edac/edac_device_sysfs.c linux-2.6.32.42/drivers/edac/edac_device_sysfs.c
28140--- linux-2.6.32.42/drivers/edac/edac_device_sysfs.c 2011-03-27 14:31:47.000000000 -0400
28141+++ linux-2.6.32.42/drivers/edac/edac_device_sysfs.c 2011-04-17 15:56:46.000000000 -0400
28142@@ -137,7 +137,7 @@ static ssize_t edac_dev_ctl_info_store(s
28143 }
28144
28145 /* edac_dev file operations for an 'ctl_info' */
28146-static struct sysfs_ops device_ctl_info_ops = {
28147+static const struct sysfs_ops device_ctl_info_ops = {
28148 .show = edac_dev_ctl_info_show,
28149 .store = edac_dev_ctl_info_store
28150 };
28151@@ -373,7 +373,7 @@ static ssize_t edac_dev_instance_store(s
28152 }
28153
28154 /* edac_dev file operations for an 'instance' */
28155-static struct sysfs_ops device_instance_ops = {
28156+static const struct sysfs_ops device_instance_ops = {
28157 .show = edac_dev_instance_show,
28158 .store = edac_dev_instance_store
28159 };
28160@@ -476,7 +476,7 @@ static ssize_t edac_dev_block_store(stru
28161 }
28162
28163 /* edac_dev file operations for a 'block' */
28164-static struct sysfs_ops device_block_ops = {
28165+static const struct sysfs_ops device_block_ops = {
28166 .show = edac_dev_block_show,
28167 .store = edac_dev_block_store
28168 };
28169diff -urNp linux-2.6.32.42/drivers/edac/edac_mc_sysfs.c linux-2.6.32.42/drivers/edac/edac_mc_sysfs.c
28170--- linux-2.6.32.42/drivers/edac/edac_mc_sysfs.c 2011-03-27 14:31:47.000000000 -0400
28171+++ linux-2.6.32.42/drivers/edac/edac_mc_sysfs.c 2011-04-17 15:56:46.000000000 -0400
28172@@ -245,7 +245,7 @@ static ssize_t csrowdev_store(struct kob
28173 return -EIO;
28174 }
28175
28176-static struct sysfs_ops csrowfs_ops = {
28177+static const struct sysfs_ops csrowfs_ops = {
28178 .show = csrowdev_show,
28179 .store = csrowdev_store
28180 };
28181@@ -575,7 +575,7 @@ static ssize_t mcidev_store(struct kobje
28182 }
28183
28184 /* Intermediate show/store table */
28185-static struct sysfs_ops mci_ops = {
28186+static const struct sysfs_ops mci_ops = {
28187 .show = mcidev_show,
28188 .store = mcidev_store
28189 };
28190diff -urNp linux-2.6.32.42/drivers/edac/edac_pci_sysfs.c linux-2.6.32.42/drivers/edac/edac_pci_sysfs.c
28191--- linux-2.6.32.42/drivers/edac/edac_pci_sysfs.c 2011-03-27 14:31:47.000000000 -0400
28192+++ linux-2.6.32.42/drivers/edac/edac_pci_sysfs.c 2011-05-04 17:56:20.000000000 -0400
28193@@ -25,8 +25,8 @@ static int edac_pci_log_pe = 1; /* log
28194 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
28195 static int edac_pci_poll_msec = 1000; /* one second workq period */
28196
28197-static atomic_t pci_parity_count = ATOMIC_INIT(0);
28198-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
28199+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
28200+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
28201
28202 static struct kobject *edac_pci_top_main_kobj;
28203 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
28204@@ -121,7 +121,7 @@ static ssize_t edac_pci_instance_store(s
28205 }
28206
28207 /* fs_ops table */
28208-static struct sysfs_ops pci_instance_ops = {
28209+static const struct sysfs_ops pci_instance_ops = {
28210 .show = edac_pci_instance_show,
28211 .store = edac_pci_instance_store
28212 };
28213@@ -261,7 +261,7 @@ static ssize_t edac_pci_dev_store(struct
28214 return -EIO;
28215 }
28216
28217-static struct sysfs_ops edac_pci_sysfs_ops = {
28218+static const struct sysfs_ops edac_pci_sysfs_ops = {
28219 .show = edac_pci_dev_show,
28220 .store = edac_pci_dev_store
28221 };
28222@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(str
28223 edac_printk(KERN_CRIT, EDAC_PCI,
28224 "Signaled System Error on %s\n",
28225 pci_name(dev));
28226- atomic_inc(&pci_nonparity_count);
28227+ atomic_inc_unchecked(&pci_nonparity_count);
28228 }
28229
28230 if (status & (PCI_STATUS_PARITY)) {
28231@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(str
28232 "Master Data Parity Error on %s\n",
28233 pci_name(dev));
28234
28235- atomic_inc(&pci_parity_count);
28236+ atomic_inc_unchecked(&pci_parity_count);
28237 }
28238
28239 if (status & (PCI_STATUS_DETECTED_PARITY)) {
28240@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(str
28241 "Detected Parity Error on %s\n",
28242 pci_name(dev));
28243
28244- atomic_inc(&pci_parity_count);
28245+ atomic_inc_unchecked(&pci_parity_count);
28246 }
28247 }
28248
28249@@ -616,7 +616,7 @@ static void edac_pci_dev_parity_test(str
28250 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
28251 "Signaled System Error on %s\n",
28252 pci_name(dev));
28253- atomic_inc(&pci_nonparity_count);
28254+ atomic_inc_unchecked(&pci_nonparity_count);
28255 }
28256
28257 if (status & (PCI_STATUS_PARITY)) {
28258@@ -624,7 +624,7 @@ static void edac_pci_dev_parity_test(str
28259 "Master Data Parity Error on "
28260 "%s\n", pci_name(dev));
28261
28262- atomic_inc(&pci_parity_count);
28263+ atomic_inc_unchecked(&pci_parity_count);
28264 }
28265
28266 if (status & (PCI_STATUS_DETECTED_PARITY)) {
28267@@ -632,7 +632,7 @@ static void edac_pci_dev_parity_test(str
28268 "Detected Parity Error on %s\n",
28269 pci_name(dev));
28270
28271- atomic_inc(&pci_parity_count);
28272+ atomic_inc_unchecked(&pci_parity_count);
28273 }
28274 }
28275 }
28276@@ -674,7 +674,7 @@ void edac_pci_do_parity_check(void)
28277 if (!check_pci_errors)
28278 return;
28279
28280- before_count = atomic_read(&pci_parity_count);
28281+ before_count = atomic_read_unchecked(&pci_parity_count);
28282
28283 /* scan all PCI devices looking for a Parity Error on devices and
28284 * bridges.
28285@@ -686,7 +686,7 @@ void edac_pci_do_parity_check(void)
28286 /* Only if operator has selected panic on PCI Error */
28287 if (edac_pci_get_panic_on_pe()) {
28288 /* If the count is different 'after' from 'before' */
28289- if (before_count != atomic_read(&pci_parity_count))
28290+ if (before_count != atomic_read_unchecked(&pci_parity_count))
28291 panic("EDAC: PCI Parity Error");
28292 }
28293 }
28294diff -urNp linux-2.6.32.42/drivers/firewire/core-cdev.c linux-2.6.32.42/drivers/firewire/core-cdev.c
28295--- linux-2.6.32.42/drivers/firewire/core-cdev.c 2011-03-27 14:31:47.000000000 -0400
28296+++ linux-2.6.32.42/drivers/firewire/core-cdev.c 2011-04-17 15:56:46.000000000 -0400
28297@@ -1141,8 +1141,7 @@ static int init_iso_resource(struct clie
28298 int ret;
28299
28300 if ((request->channels == 0 && request->bandwidth == 0) ||
28301- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
28302- request->bandwidth < 0)
28303+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
28304 return -EINVAL;
28305
28306 r = kmalloc(sizeof(*r), GFP_KERNEL);
28307diff -urNp linux-2.6.32.42/drivers/firewire/core-transaction.c linux-2.6.32.42/drivers/firewire/core-transaction.c
28308--- linux-2.6.32.42/drivers/firewire/core-transaction.c 2011-03-27 14:31:47.000000000 -0400
28309+++ linux-2.6.32.42/drivers/firewire/core-transaction.c 2011-05-16 21:46:57.000000000 -0400
28310@@ -36,6 +36,7 @@
28311 #include <linux/string.h>
28312 #include <linux/timer.h>
28313 #include <linux/types.h>
28314+#include <linux/sched.h>
28315
28316 #include <asm/byteorder.h>
28317
28318@@ -344,6 +345,8 @@ int fw_run_transaction(struct fw_card *c
28319 struct transaction_callback_data d;
28320 struct fw_transaction t;
28321
28322+ pax_track_stack();
28323+
28324 init_completion(&d.done);
28325 d.payload = payload;
28326 fw_send_request(card, &t, tcode, destination_id, generation, speed,
28327diff -urNp linux-2.6.32.42/drivers/firmware/dmi_scan.c linux-2.6.32.42/drivers/firmware/dmi_scan.c
28328--- linux-2.6.32.42/drivers/firmware/dmi_scan.c 2011-03-27 14:31:47.000000000 -0400
28329+++ linux-2.6.32.42/drivers/firmware/dmi_scan.c 2011-04-17 15:56:46.000000000 -0400
28330@@ -391,11 +391,6 @@ void __init dmi_scan_machine(void)
28331 }
28332 }
28333 else {
28334- /*
28335- * no iounmap() for that ioremap(); it would be a no-op, but
28336- * it's so early in setup that sucker gets confused into doing
28337- * what it shouldn't if we actually call it.
28338- */
28339 p = dmi_ioremap(0xF0000, 0x10000);
28340 if (p == NULL)
28341 goto error;
28342diff -urNp linux-2.6.32.42/drivers/firmware/edd.c linux-2.6.32.42/drivers/firmware/edd.c
28343--- linux-2.6.32.42/drivers/firmware/edd.c 2011-03-27 14:31:47.000000000 -0400
28344+++ linux-2.6.32.42/drivers/firmware/edd.c 2011-04-17 15:56:46.000000000 -0400
28345@@ -122,7 +122,7 @@ edd_attr_show(struct kobject * kobj, str
28346 return ret;
28347 }
28348
28349-static struct sysfs_ops edd_attr_ops = {
28350+static const struct sysfs_ops edd_attr_ops = {
28351 .show = edd_attr_show,
28352 };
28353
28354diff -urNp linux-2.6.32.42/drivers/firmware/efivars.c linux-2.6.32.42/drivers/firmware/efivars.c
28355--- linux-2.6.32.42/drivers/firmware/efivars.c 2011-03-27 14:31:47.000000000 -0400
28356+++ linux-2.6.32.42/drivers/firmware/efivars.c 2011-04-17 15:56:46.000000000 -0400
28357@@ -362,7 +362,7 @@ static ssize_t efivar_attr_store(struct
28358 return ret;
28359 }
28360
28361-static struct sysfs_ops efivar_attr_ops = {
28362+static const struct sysfs_ops efivar_attr_ops = {
28363 .show = efivar_attr_show,
28364 .store = efivar_attr_store,
28365 };
28366diff -urNp linux-2.6.32.42/drivers/firmware/iscsi_ibft.c linux-2.6.32.42/drivers/firmware/iscsi_ibft.c
28367--- linux-2.6.32.42/drivers/firmware/iscsi_ibft.c 2011-03-27 14:31:47.000000000 -0400
28368+++ linux-2.6.32.42/drivers/firmware/iscsi_ibft.c 2011-04-17 15:56:46.000000000 -0400
28369@@ -525,7 +525,7 @@ static ssize_t ibft_show_attribute(struc
28370 return ret;
28371 }
28372
28373-static struct sysfs_ops ibft_attr_ops = {
28374+static const struct sysfs_ops ibft_attr_ops = {
28375 .show = ibft_show_attribute,
28376 };
28377
28378diff -urNp linux-2.6.32.42/drivers/firmware/memmap.c linux-2.6.32.42/drivers/firmware/memmap.c
28379--- linux-2.6.32.42/drivers/firmware/memmap.c 2011-03-27 14:31:47.000000000 -0400
28380+++ linux-2.6.32.42/drivers/firmware/memmap.c 2011-04-17 15:56:46.000000000 -0400
28381@@ -74,7 +74,7 @@ static struct attribute *def_attrs[] = {
28382 NULL
28383 };
28384
28385-static struct sysfs_ops memmap_attr_ops = {
28386+static const struct sysfs_ops memmap_attr_ops = {
28387 .show = memmap_attr_show,
28388 };
28389
28390diff -urNp linux-2.6.32.42/drivers/gpio/vr41xx_giu.c linux-2.6.32.42/drivers/gpio/vr41xx_giu.c
28391--- linux-2.6.32.42/drivers/gpio/vr41xx_giu.c 2011-03-27 14:31:47.000000000 -0400
28392+++ linux-2.6.32.42/drivers/gpio/vr41xx_giu.c 2011-05-04 17:56:28.000000000 -0400
28393@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
28394 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
28395 maskl, pendl, maskh, pendh);
28396
28397- atomic_inc(&irq_err_count);
28398+ atomic_inc_unchecked(&irq_err_count);
28399
28400 return -EINVAL;
28401 }
28402diff -urNp linux-2.6.32.42/drivers/gpu/drm/drm_crtc_helper.c linux-2.6.32.42/drivers/gpu/drm/drm_crtc_helper.c
28403--- linux-2.6.32.42/drivers/gpu/drm/drm_crtc_helper.c 2011-03-27 14:31:47.000000000 -0400
28404+++ linux-2.6.32.42/drivers/gpu/drm/drm_crtc_helper.c 2011-05-16 21:46:57.000000000 -0400
28405@@ -573,7 +573,7 @@ static bool drm_encoder_crtc_ok(struct d
28406 struct drm_crtc *tmp;
28407 int crtc_mask = 1;
28408
28409- WARN(!crtc, "checking null crtc?");
28410+ BUG_ON(!crtc);
28411
28412 dev = crtc->dev;
28413
28414@@ -642,6 +642,8 @@ bool drm_crtc_helper_set_mode(struct drm
28415
28416 adjusted_mode = drm_mode_duplicate(dev, mode);
28417
28418+ pax_track_stack();
28419+
28420 crtc->enabled = drm_helper_crtc_in_use(crtc);
28421
28422 if (!crtc->enabled)
28423diff -urNp linux-2.6.32.42/drivers/gpu/drm/drm_drv.c linux-2.6.32.42/drivers/gpu/drm/drm_drv.c
28424--- linux-2.6.32.42/drivers/gpu/drm/drm_drv.c 2011-03-27 14:31:47.000000000 -0400
28425+++ linux-2.6.32.42/drivers/gpu/drm/drm_drv.c 2011-04-17 15:56:46.000000000 -0400
28426@@ -417,7 +417,7 @@ int drm_ioctl(struct inode *inode, struc
28427 char *kdata = NULL;
28428
28429 atomic_inc(&dev->ioctl_count);
28430- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
28431+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
28432 ++file_priv->ioctl_count;
28433
28434 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
28435diff -urNp linux-2.6.32.42/drivers/gpu/drm/drm_fops.c linux-2.6.32.42/drivers/gpu/drm/drm_fops.c
28436--- linux-2.6.32.42/drivers/gpu/drm/drm_fops.c 2011-03-27 14:31:47.000000000 -0400
28437+++ linux-2.6.32.42/drivers/gpu/drm/drm_fops.c 2011-04-17 15:56:46.000000000 -0400
28438@@ -66,7 +66,7 @@ static int drm_setup(struct drm_device *
28439 }
28440
28441 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
28442- atomic_set(&dev->counts[i], 0);
28443+ atomic_set_unchecked(&dev->counts[i], 0);
28444
28445 dev->sigdata.lock = NULL;
28446
28447@@ -130,9 +130,9 @@ int drm_open(struct inode *inode, struct
28448
28449 retcode = drm_open_helper(inode, filp, dev);
28450 if (!retcode) {
28451- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
28452+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
28453 spin_lock(&dev->count_lock);
28454- if (!dev->open_count++) {
28455+ if (local_inc_return(&dev->open_count) == 1) {
28456 spin_unlock(&dev->count_lock);
28457 retcode = drm_setup(dev);
28458 goto out;
28459@@ -435,7 +435,7 @@ int drm_release(struct inode *inode, str
28460
28461 lock_kernel();
28462
28463- DRM_DEBUG("open_count = %d\n", dev->open_count);
28464+ DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
28465
28466 if (dev->driver->preclose)
28467 dev->driver->preclose(dev, file_priv);
28468@@ -447,7 +447,7 @@ int drm_release(struct inode *inode, str
28469 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
28470 task_pid_nr(current),
28471 (long)old_encode_dev(file_priv->minor->device),
28472- dev->open_count);
28473+ local_read(&dev->open_count));
28474
28475 /* if the master has gone away we can't do anything with the lock */
28476 if (file_priv->minor->master)
28477@@ -524,9 +524,9 @@ int drm_release(struct inode *inode, str
28478 * End inline drm_release
28479 */
28480
28481- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
28482+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
28483 spin_lock(&dev->count_lock);
28484- if (!--dev->open_count) {
28485+ if (local_dec_and_test(&dev->open_count)) {
28486 if (atomic_read(&dev->ioctl_count)) {
28487 DRM_ERROR("Device busy: %d\n",
28488 atomic_read(&dev->ioctl_count));
28489diff -urNp linux-2.6.32.42/drivers/gpu/drm/drm_gem.c linux-2.6.32.42/drivers/gpu/drm/drm_gem.c
28490--- linux-2.6.32.42/drivers/gpu/drm/drm_gem.c 2011-03-27 14:31:47.000000000 -0400
28491+++ linux-2.6.32.42/drivers/gpu/drm/drm_gem.c 2011-04-17 15:56:46.000000000 -0400
28492@@ -83,11 +83,11 @@ drm_gem_init(struct drm_device *dev)
28493 spin_lock_init(&dev->object_name_lock);
28494 idr_init(&dev->object_name_idr);
28495 atomic_set(&dev->object_count, 0);
28496- atomic_set(&dev->object_memory, 0);
28497+ atomic_set_unchecked(&dev->object_memory, 0);
28498 atomic_set(&dev->pin_count, 0);
28499- atomic_set(&dev->pin_memory, 0);
28500+ atomic_set_unchecked(&dev->pin_memory, 0);
28501 atomic_set(&dev->gtt_count, 0);
28502- atomic_set(&dev->gtt_memory, 0);
28503+ atomic_set_unchecked(&dev->gtt_memory, 0);
28504
28505 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
28506 if (!mm) {
28507@@ -150,7 +150,7 @@ drm_gem_object_alloc(struct drm_device *
28508 goto fput;
28509 }
28510 atomic_inc(&dev->object_count);
28511- atomic_add(obj->size, &dev->object_memory);
28512+ atomic_add_unchecked(obj->size, &dev->object_memory);
28513 return obj;
28514 fput:
28515 fput(obj->filp);
28516@@ -429,7 +429,7 @@ drm_gem_object_free(struct kref *kref)
28517
28518 fput(obj->filp);
28519 atomic_dec(&dev->object_count);
28520- atomic_sub(obj->size, &dev->object_memory);
28521+ atomic_sub_unchecked(obj->size, &dev->object_memory);
28522 kfree(obj);
28523 }
28524 EXPORT_SYMBOL(drm_gem_object_free);
28525diff -urNp linux-2.6.32.42/drivers/gpu/drm/drm_info.c linux-2.6.32.42/drivers/gpu/drm/drm_info.c
28526--- linux-2.6.32.42/drivers/gpu/drm/drm_info.c 2011-03-27 14:31:47.000000000 -0400
28527+++ linux-2.6.32.42/drivers/gpu/drm/drm_info.c 2011-04-17 15:56:46.000000000 -0400
28528@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
28529 struct drm_local_map *map;
28530 struct drm_map_list *r_list;
28531
28532- /* Hardcoded from _DRM_FRAME_BUFFER,
28533- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
28534- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
28535- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
28536+ static const char * const types[] = {
28537+ [_DRM_FRAME_BUFFER] = "FB",
28538+ [_DRM_REGISTERS] = "REG",
28539+ [_DRM_SHM] = "SHM",
28540+ [_DRM_AGP] = "AGP",
28541+ [_DRM_SCATTER_GATHER] = "SG",
28542+ [_DRM_CONSISTENT] = "PCI",
28543+ [_DRM_GEM] = "GEM" };
28544 const char *type;
28545 int i;
28546
28547@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
28548 map = r_list->map;
28549 if (!map)
28550 continue;
28551- if (map->type < 0 || map->type > 5)
28552+ if (map->type >= ARRAY_SIZE(types))
28553 type = "??";
28554 else
28555 type = types[map->type];
28556@@ -265,10 +269,10 @@ int drm_gem_object_info(struct seq_file
28557 struct drm_device *dev = node->minor->dev;
28558
28559 seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
28560- seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
28561+ seq_printf(m, "%d object bytes\n", atomic_read_unchecked(&dev->object_memory));
28562 seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
28563- seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
28564- seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
28565+ seq_printf(m, "%d pin bytes\n", atomic_read_unchecked(&dev->pin_memory));
28566+ seq_printf(m, "%d gtt bytes\n", atomic_read_unchecked(&dev->gtt_memory));
28567 seq_printf(m, "%d gtt total\n", dev->gtt_total);
28568 return 0;
28569 }
28570@@ -288,7 +292,11 @@ int drm_vma_info(struct seq_file *m, voi
28571 mutex_lock(&dev->struct_mutex);
28572 seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
28573 atomic_read(&dev->vma_count),
28574+#ifdef CONFIG_GRKERNSEC_HIDESYM
28575+ NULL, 0);
28576+#else
28577 high_memory, (u64)virt_to_phys(high_memory));
28578+#endif
28579
28580 list_for_each_entry(pt, &dev->vmalist, head) {
28581 vma = pt->vma;
28582@@ -296,14 +304,23 @@ int drm_vma_info(struct seq_file *m, voi
28583 continue;
28584 seq_printf(m,
28585 "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
28586- pt->pid, vma->vm_start, vma->vm_end,
28587+ pt->pid,
28588+#ifdef CONFIG_GRKERNSEC_HIDESYM
28589+ 0, 0,
28590+#else
28591+ vma->vm_start, vma->vm_end,
28592+#endif
28593 vma->vm_flags & VM_READ ? 'r' : '-',
28594 vma->vm_flags & VM_WRITE ? 'w' : '-',
28595 vma->vm_flags & VM_EXEC ? 'x' : '-',
28596 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
28597 vma->vm_flags & VM_LOCKED ? 'l' : '-',
28598 vma->vm_flags & VM_IO ? 'i' : '-',
28599+#ifdef CONFIG_GRKERNSEC_HIDESYM
28600+ 0);
28601+#else
28602 vma->vm_pgoff);
28603+#endif
28604
28605 #if defined(__i386__)
28606 pgprot = pgprot_val(vma->vm_page_prot);
28607diff -urNp linux-2.6.32.42/drivers/gpu/drm/drm_ioctl.c linux-2.6.32.42/drivers/gpu/drm/drm_ioctl.c
28608--- linux-2.6.32.42/drivers/gpu/drm/drm_ioctl.c 2011-03-27 14:31:47.000000000 -0400
28609+++ linux-2.6.32.42/drivers/gpu/drm/drm_ioctl.c 2011-04-17 15:56:46.000000000 -0400
28610@@ -283,7 +283,7 @@ int drm_getstats(struct drm_device *dev,
28611 stats->data[i].value =
28612 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
28613 else
28614- stats->data[i].value = atomic_read(&dev->counts[i]);
28615+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
28616 stats->data[i].type = dev->types[i];
28617 }
28618
28619diff -urNp linux-2.6.32.42/drivers/gpu/drm/drm_lock.c linux-2.6.32.42/drivers/gpu/drm/drm_lock.c
28620--- linux-2.6.32.42/drivers/gpu/drm/drm_lock.c 2011-03-27 14:31:47.000000000 -0400
28621+++ linux-2.6.32.42/drivers/gpu/drm/drm_lock.c 2011-04-17 15:56:46.000000000 -0400
28622@@ -87,7 +87,7 @@ int drm_lock(struct drm_device *dev, voi
28623 if (drm_lock_take(&master->lock, lock->context)) {
28624 master->lock.file_priv = file_priv;
28625 master->lock.lock_time = jiffies;
28626- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
28627+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
28628 break; /* Got lock */
28629 }
28630
28631@@ -165,7 +165,7 @@ int drm_unlock(struct drm_device *dev, v
28632 return -EINVAL;
28633 }
28634
28635- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
28636+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
28637
28638 /* kernel_context_switch isn't used by any of the x86 drm
28639 * modules but is required by the Sparc driver.
28640diff -urNp linux-2.6.32.42/drivers/gpu/drm/i810/i810_dma.c linux-2.6.32.42/drivers/gpu/drm/i810/i810_dma.c
28641--- linux-2.6.32.42/drivers/gpu/drm/i810/i810_dma.c 2011-03-27 14:31:47.000000000 -0400
28642+++ linux-2.6.32.42/drivers/gpu/drm/i810/i810_dma.c 2011-04-17 15:56:46.000000000 -0400
28643@@ -952,8 +952,8 @@ static int i810_dma_vertex(struct drm_de
28644 dma->buflist[vertex->idx],
28645 vertex->discard, vertex->used);
28646
28647- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
28648- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
28649+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
28650+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
28651 sarea_priv->last_enqueue = dev_priv->counter - 1;
28652 sarea_priv->last_dispatch = (int)hw_status[5];
28653
28654@@ -1115,8 +1115,8 @@ static int i810_dma_mc(struct drm_device
28655 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
28656 mc->last_render);
28657
28658- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
28659- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
28660+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
28661+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
28662 sarea_priv->last_enqueue = dev_priv->counter - 1;
28663 sarea_priv->last_dispatch = (int)hw_status[5];
28664
28665diff -urNp linux-2.6.32.42/drivers/gpu/drm/i810/i810_drv.h linux-2.6.32.42/drivers/gpu/drm/i810/i810_drv.h
28666--- linux-2.6.32.42/drivers/gpu/drm/i810/i810_drv.h 2011-03-27 14:31:47.000000000 -0400
28667+++ linux-2.6.32.42/drivers/gpu/drm/i810/i810_drv.h 2011-05-04 17:56:28.000000000 -0400
28668@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
28669 int page_flipping;
28670
28671 wait_queue_head_t irq_queue;
28672- atomic_t irq_received;
28673- atomic_t irq_emitted;
28674+ atomic_unchecked_t irq_received;
28675+ atomic_unchecked_t irq_emitted;
28676
28677 int front_offset;
28678 } drm_i810_private_t;
28679diff -urNp linux-2.6.32.42/drivers/gpu/drm/i830/i830_drv.h linux-2.6.32.42/drivers/gpu/drm/i830/i830_drv.h
28680--- linux-2.6.32.42/drivers/gpu/drm/i830/i830_drv.h 2011-03-27 14:31:47.000000000 -0400
28681+++ linux-2.6.32.42/drivers/gpu/drm/i830/i830_drv.h 2011-05-04 17:56:28.000000000 -0400
28682@@ -115,8 +115,8 @@ typedef struct drm_i830_private {
28683 int page_flipping;
28684
28685 wait_queue_head_t irq_queue;
28686- atomic_t irq_received;
28687- atomic_t irq_emitted;
28688+ atomic_unchecked_t irq_received;
28689+ atomic_unchecked_t irq_emitted;
28690
28691 int use_mi_batchbuffer_start;
28692
28693diff -urNp linux-2.6.32.42/drivers/gpu/drm/i830/i830_irq.c linux-2.6.32.42/drivers/gpu/drm/i830/i830_irq.c
28694--- linux-2.6.32.42/drivers/gpu/drm/i830/i830_irq.c 2011-03-27 14:31:47.000000000 -0400
28695+++ linux-2.6.32.42/drivers/gpu/drm/i830/i830_irq.c 2011-05-04 17:56:28.000000000 -0400
28696@@ -47,7 +47,7 @@ irqreturn_t i830_driver_irq_handler(DRM_
28697
28698 I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
28699
28700- atomic_inc(&dev_priv->irq_received);
28701+ atomic_inc_unchecked(&dev_priv->irq_received);
28702 wake_up_interruptible(&dev_priv->irq_queue);
28703
28704 return IRQ_HANDLED;
28705@@ -60,14 +60,14 @@ static int i830_emit_irq(struct drm_devi
28706
28707 DRM_DEBUG("%s\n", __func__);
28708
28709- atomic_inc(&dev_priv->irq_emitted);
28710+ atomic_inc_unchecked(&dev_priv->irq_emitted);
28711
28712 BEGIN_LP_RING(2);
28713 OUT_RING(0);
28714 OUT_RING(GFX_OP_USER_INTERRUPT);
28715 ADVANCE_LP_RING();
28716
28717- return atomic_read(&dev_priv->irq_emitted);
28718+ return atomic_read_unchecked(&dev_priv->irq_emitted);
28719 }
28720
28721 static int i830_wait_irq(struct drm_device * dev, int irq_nr)
28722@@ -79,7 +79,7 @@ static int i830_wait_irq(struct drm_devi
28723
28724 DRM_DEBUG("%s\n", __func__);
28725
28726- if (atomic_read(&dev_priv->irq_received) >= irq_nr)
28727+ if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
28728 return 0;
28729
28730 dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
28731@@ -88,7 +88,7 @@ static int i830_wait_irq(struct drm_devi
28732
28733 for (;;) {
28734 __set_current_state(TASK_INTERRUPTIBLE);
28735- if (atomic_read(&dev_priv->irq_received) >= irq_nr)
28736+ if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
28737 break;
28738 if ((signed)(end - jiffies) <= 0) {
28739 DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
28740@@ -163,8 +163,8 @@ void i830_driver_irq_preinstall(struct d
28741 I830_WRITE16(I830REG_HWSTAM, 0xffff);
28742 I830_WRITE16(I830REG_INT_MASK_R, 0x0);
28743 I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
28744- atomic_set(&dev_priv->irq_received, 0);
28745- atomic_set(&dev_priv->irq_emitted, 0);
28746+ atomic_set_unchecked(&dev_priv->irq_received, 0);
28747+ atomic_set_unchecked(&dev_priv->irq_emitted, 0);
28748 init_waitqueue_head(&dev_priv->irq_queue);
28749 }
28750
28751diff -urNp linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ch7017.c linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ch7017.c
28752--- linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ch7017.c 2011-03-27 14:31:47.000000000 -0400
28753+++ linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ch7017.c 2011-04-17 15:56:46.000000000 -0400
28754@@ -443,7 +443,7 @@ static void ch7017_destroy(struct intel_
28755 }
28756 }
28757
28758-struct intel_dvo_dev_ops ch7017_ops = {
28759+const struct intel_dvo_dev_ops ch7017_ops = {
28760 .init = ch7017_init,
28761 .detect = ch7017_detect,
28762 .mode_valid = ch7017_mode_valid,
28763diff -urNp linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ch7xxx.c linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ch7xxx.c
28764--- linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ch7xxx.c 2011-03-27 14:31:47.000000000 -0400
28765+++ linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ch7xxx.c 2011-04-17 15:56:46.000000000 -0400
28766@@ -356,7 +356,7 @@ static void ch7xxx_destroy(struct intel_
28767 }
28768 }
28769
28770-struct intel_dvo_dev_ops ch7xxx_ops = {
28771+const struct intel_dvo_dev_ops ch7xxx_ops = {
28772 .init = ch7xxx_init,
28773 .detect = ch7xxx_detect,
28774 .mode_valid = ch7xxx_mode_valid,
28775diff -urNp linux-2.6.32.42/drivers/gpu/drm/i915/dvo.h linux-2.6.32.42/drivers/gpu/drm/i915/dvo.h
28776--- linux-2.6.32.42/drivers/gpu/drm/i915/dvo.h 2011-03-27 14:31:47.000000000 -0400
28777+++ linux-2.6.32.42/drivers/gpu/drm/i915/dvo.h 2011-04-17 15:56:46.000000000 -0400
28778@@ -135,23 +135,23 @@ struct intel_dvo_dev_ops {
28779 *
28780 * \return singly-linked list of modes or NULL if no modes found.
28781 */
28782- struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
28783+ struct drm_display_mode *(* const get_modes)(struct intel_dvo_device *dvo);
28784
28785 /**
28786 * Clean up driver-specific bits of the output
28787 */
28788- void (*destroy) (struct intel_dvo_device *dvo);
28789+ void (* const destroy) (struct intel_dvo_device *dvo);
28790
28791 /**
28792 * Debugging hook to dump device registers to log file
28793 */
28794- void (*dump_regs)(struct intel_dvo_device *dvo);
28795+ void (* const dump_regs)(struct intel_dvo_device *dvo);
28796 };
28797
28798-extern struct intel_dvo_dev_ops sil164_ops;
28799-extern struct intel_dvo_dev_ops ch7xxx_ops;
28800-extern struct intel_dvo_dev_ops ivch_ops;
28801-extern struct intel_dvo_dev_ops tfp410_ops;
28802-extern struct intel_dvo_dev_ops ch7017_ops;
28803+extern const struct intel_dvo_dev_ops sil164_ops;
28804+extern const struct intel_dvo_dev_ops ch7xxx_ops;
28805+extern const struct intel_dvo_dev_ops ivch_ops;
28806+extern const struct intel_dvo_dev_ops tfp410_ops;
28807+extern const struct intel_dvo_dev_ops ch7017_ops;
28808
28809 #endif /* _INTEL_DVO_H */
28810diff -urNp linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ivch.c linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ivch.c
28811--- linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ivch.c 2011-03-27 14:31:47.000000000 -0400
28812+++ linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ivch.c 2011-04-17 15:56:46.000000000 -0400
28813@@ -430,7 +430,7 @@ static void ivch_destroy(struct intel_dv
28814 }
28815 }
28816
28817-struct intel_dvo_dev_ops ivch_ops= {
28818+const struct intel_dvo_dev_ops ivch_ops= {
28819 .init = ivch_init,
28820 .dpms = ivch_dpms,
28821 .save = ivch_save,
28822diff -urNp linux-2.6.32.42/drivers/gpu/drm/i915/dvo_sil164.c linux-2.6.32.42/drivers/gpu/drm/i915/dvo_sil164.c
28823--- linux-2.6.32.42/drivers/gpu/drm/i915/dvo_sil164.c 2011-03-27 14:31:47.000000000 -0400
28824+++ linux-2.6.32.42/drivers/gpu/drm/i915/dvo_sil164.c 2011-04-17 15:56:46.000000000 -0400
28825@@ -290,7 +290,7 @@ static void sil164_destroy(struct intel_
28826 }
28827 }
28828
28829-struct intel_dvo_dev_ops sil164_ops = {
28830+const struct intel_dvo_dev_ops sil164_ops = {
28831 .init = sil164_init,
28832 .detect = sil164_detect,
28833 .mode_valid = sil164_mode_valid,
28834diff -urNp linux-2.6.32.42/drivers/gpu/drm/i915/dvo_tfp410.c linux-2.6.32.42/drivers/gpu/drm/i915/dvo_tfp410.c
28835--- linux-2.6.32.42/drivers/gpu/drm/i915/dvo_tfp410.c 2011-03-27 14:31:47.000000000 -0400
28836+++ linux-2.6.32.42/drivers/gpu/drm/i915/dvo_tfp410.c 2011-04-17 15:56:46.000000000 -0400
28837@@ -323,7 +323,7 @@ static void tfp410_destroy(struct intel_
28838 }
28839 }
28840
28841-struct intel_dvo_dev_ops tfp410_ops = {
28842+const struct intel_dvo_dev_ops tfp410_ops = {
28843 .init = tfp410_init,
28844 .detect = tfp410_detect,
28845 .mode_valid = tfp410_mode_valid,
28846diff -urNp linux-2.6.32.42/drivers/gpu/drm/i915/i915_debugfs.c linux-2.6.32.42/drivers/gpu/drm/i915/i915_debugfs.c
28847--- linux-2.6.32.42/drivers/gpu/drm/i915/i915_debugfs.c 2011-03-27 14:31:47.000000000 -0400
28848+++ linux-2.6.32.42/drivers/gpu/drm/i915/i915_debugfs.c 2011-05-04 17:56:28.000000000 -0400
28849@@ -192,7 +192,7 @@ static int i915_interrupt_info(struct se
28850 I915_READ(GTIMR));
28851 }
28852 seq_printf(m, "Interrupts received: %d\n",
28853- atomic_read(&dev_priv->irq_received));
28854+ atomic_read_unchecked(&dev_priv->irq_received));
28855 if (dev_priv->hw_status_page != NULL) {
28856 seq_printf(m, "Current sequence: %d\n",
28857 i915_get_gem_seqno(dev));
28858diff -urNp linux-2.6.32.42/drivers/gpu/drm/i915/i915_drv.c linux-2.6.32.42/drivers/gpu/drm/i915/i915_drv.c
28859--- linux-2.6.32.42/drivers/gpu/drm/i915/i915_drv.c 2011-03-27 14:31:47.000000000 -0400
28860+++ linux-2.6.32.42/drivers/gpu/drm/i915/i915_drv.c 2011-04-17 15:56:46.000000000 -0400
28861@@ -285,7 +285,7 @@ i915_pci_resume(struct pci_dev *pdev)
28862 return i915_resume(dev);
28863 }
28864
28865-static struct vm_operations_struct i915_gem_vm_ops = {
28866+static const struct vm_operations_struct i915_gem_vm_ops = {
28867 .fault = i915_gem_fault,
28868 .open = drm_gem_vm_open,
28869 .close = drm_gem_vm_close,
28870diff -urNp linux-2.6.32.42/drivers/gpu/drm/i915/i915_drv.h linux-2.6.32.42/drivers/gpu/drm/i915/i915_drv.h
28871--- linux-2.6.32.42/drivers/gpu/drm/i915/i915_drv.h 2011-03-27 14:31:47.000000000 -0400
28872+++ linux-2.6.32.42/drivers/gpu/drm/i915/i915_drv.h 2011-05-04 17:56:28.000000000 -0400
28873@@ -197,7 +197,7 @@ typedef struct drm_i915_private {
28874 int page_flipping;
28875
28876 wait_queue_head_t irq_queue;
28877- atomic_t irq_received;
28878+ atomic_unchecked_t irq_received;
28879 /** Protects user_irq_refcount and irq_mask_reg */
28880 spinlock_t user_irq_lock;
28881 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
28882diff -urNp linux-2.6.32.42/drivers/gpu/drm/i915/i915_gem.c linux-2.6.32.42/drivers/gpu/drm/i915/i915_gem.c
28883--- linux-2.6.32.42/drivers/gpu/drm/i915/i915_gem.c 2011-03-27 14:31:47.000000000 -0400
28884+++ linux-2.6.32.42/drivers/gpu/drm/i915/i915_gem.c 2011-04-17 15:56:46.000000000 -0400
28885@@ -102,7 +102,7 @@ i915_gem_get_aperture_ioctl(struct drm_d
28886
28887 args->aper_size = dev->gtt_total;
28888 args->aper_available_size = (args->aper_size -
28889- atomic_read(&dev->pin_memory));
28890+ atomic_read_unchecked(&dev->pin_memory));
28891
28892 return 0;
28893 }
28894@@ -492,6 +492,11 @@ i915_gem_pread_ioctl(struct drm_device *
28895 return -EINVAL;
28896 }
28897
28898+ if (!access_ok(VERIFY_WRITE, (char __user *) (uintptr_t)args->data_ptr, args->size)) {
28899+ drm_gem_object_unreference(obj);
28900+ return -EFAULT;
28901+ }
28902+
28903 if (i915_gem_object_needs_bit17_swizzle(obj)) {
28904 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
28905 } else {
28906@@ -965,6 +970,11 @@ i915_gem_pwrite_ioctl(struct drm_device
28907 return -EINVAL;
28908 }
28909
28910+ if (!access_ok(VERIFY_READ, (char __user *) (uintptr_t)args->data_ptr, args->size)) {
28911+ drm_gem_object_unreference(obj);
28912+ return -EFAULT;
28913+ }
28914+
28915 /* We can only do the GTT pwrite on untiled buffers, as otherwise
28916 * it would end up going through the fenced access, and we'll get
28917 * different detiling behavior between reading and writing.
28918@@ -2054,7 +2064,7 @@ i915_gem_object_unbind(struct drm_gem_ob
28919
28920 if (obj_priv->gtt_space) {
28921 atomic_dec(&dev->gtt_count);
28922- atomic_sub(obj->size, &dev->gtt_memory);
28923+ atomic_sub_unchecked(obj->size, &dev->gtt_memory);
28924
28925 drm_mm_put_block(obj_priv->gtt_space);
28926 obj_priv->gtt_space = NULL;
28927@@ -2697,7 +2707,7 @@ i915_gem_object_bind_to_gtt(struct drm_g
28928 goto search_free;
28929 }
28930 atomic_inc(&dev->gtt_count);
28931- atomic_add(obj->size, &dev->gtt_memory);
28932+ atomic_add_unchecked(obj->size, &dev->gtt_memory);
28933
28934 /* Assert that the object is not currently in any GPU domain. As it
28935 * wasn't in the GTT, there shouldn't be any way it could have been in
28936@@ -3751,9 +3761,9 @@ i915_gem_execbuffer(struct drm_device *d
28937 "%d/%d gtt bytes\n",
28938 atomic_read(&dev->object_count),
28939 atomic_read(&dev->pin_count),
28940- atomic_read(&dev->object_memory),
28941- atomic_read(&dev->pin_memory),
28942- atomic_read(&dev->gtt_memory),
28943+ atomic_read_unchecked(&dev->object_memory),
28944+ atomic_read_unchecked(&dev->pin_memory),
28945+ atomic_read_unchecked(&dev->gtt_memory),
28946 dev->gtt_total);
28947 }
28948 goto err;
28949@@ -3985,7 +3995,7 @@ i915_gem_object_pin(struct drm_gem_objec
28950 */
28951 if (obj_priv->pin_count == 1) {
28952 atomic_inc(&dev->pin_count);
28953- atomic_add(obj->size, &dev->pin_memory);
28954+ atomic_add_unchecked(obj->size, &dev->pin_memory);
28955 if (!obj_priv->active &&
28956 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
28957 !list_empty(&obj_priv->list))
28958@@ -4018,7 +4028,7 @@ i915_gem_object_unpin(struct drm_gem_obj
28959 list_move_tail(&obj_priv->list,
28960 &dev_priv->mm.inactive_list);
28961 atomic_dec(&dev->pin_count);
28962- atomic_sub(obj->size, &dev->pin_memory);
28963+ atomic_sub_unchecked(obj->size, &dev->pin_memory);
28964 }
28965 i915_verify_inactive(dev, __FILE__, __LINE__);
28966 }
28967diff -urNp linux-2.6.32.42/drivers/gpu/drm/i915/i915_irq.c linux-2.6.32.42/drivers/gpu/drm/i915/i915_irq.c
28968--- linux-2.6.32.42/drivers/gpu/drm/i915/i915_irq.c 2011-03-27 14:31:47.000000000 -0400
28969+++ linux-2.6.32.42/drivers/gpu/drm/i915/i915_irq.c 2011-05-04 17:56:28.000000000 -0400
28970@@ -528,7 +528,7 @@ irqreturn_t i915_driver_irq_handler(DRM_
28971 int irq_received;
28972 int ret = IRQ_NONE;
28973
28974- atomic_inc(&dev_priv->irq_received);
28975+ atomic_inc_unchecked(&dev_priv->irq_received);
28976
28977 if (IS_IGDNG(dev))
28978 return igdng_irq_handler(dev);
28979@@ -1021,7 +1021,7 @@ void i915_driver_irq_preinstall(struct d
28980 {
28981 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
28982
28983- atomic_set(&dev_priv->irq_received, 0);
28984+ atomic_set_unchecked(&dev_priv->irq_received, 0);
28985
28986 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
28987 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
28988diff -urNp linux-2.6.32.42/drivers/gpu/drm/mga/mga_drv.h linux-2.6.32.42/drivers/gpu/drm/mga/mga_drv.h
28989--- linux-2.6.32.42/drivers/gpu/drm/mga/mga_drv.h 2011-03-27 14:31:47.000000000 -0400
28990+++ linux-2.6.32.42/drivers/gpu/drm/mga/mga_drv.h 2011-05-04 17:56:28.000000000 -0400
28991@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
28992 u32 clear_cmd;
28993 u32 maccess;
28994
28995- atomic_t vbl_received; /**< Number of vblanks received. */
28996+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
28997 wait_queue_head_t fence_queue;
28998- atomic_t last_fence_retired;
28999+ atomic_unchecked_t last_fence_retired;
29000 u32 next_fence_to_post;
29001
29002 unsigned int fb_cpp;
29003diff -urNp linux-2.6.32.42/drivers/gpu/drm/mga/mga_irq.c linux-2.6.32.42/drivers/gpu/drm/mga/mga_irq.c
29004--- linux-2.6.32.42/drivers/gpu/drm/mga/mga_irq.c 2011-03-27 14:31:47.000000000 -0400
29005+++ linux-2.6.32.42/drivers/gpu/drm/mga/mga_irq.c 2011-05-04 17:56:28.000000000 -0400
29006@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
29007 if (crtc != 0)
29008 return 0;
29009
29010- return atomic_read(&dev_priv->vbl_received);
29011+ return atomic_read_unchecked(&dev_priv->vbl_received);
29012 }
29013
29014
29015@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
29016 /* VBLANK interrupt */
29017 if (status & MGA_VLINEPEN) {
29018 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
29019- atomic_inc(&dev_priv->vbl_received);
29020+ atomic_inc_unchecked(&dev_priv->vbl_received);
29021 drm_handle_vblank(dev, 0);
29022 handled = 1;
29023 }
29024@@ -80,7 +80,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
29025 MGA_WRITE(MGA_PRIMEND, prim_end);
29026 }
29027
29028- atomic_inc(&dev_priv->last_fence_retired);
29029+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
29030 DRM_WAKEUP(&dev_priv->fence_queue);
29031 handled = 1;
29032 }
29033@@ -131,7 +131,7 @@ int mga_driver_fence_wait(struct drm_dev
29034 * using fences.
29035 */
29036 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
29037- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
29038+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
29039 - *sequence) <= (1 << 23)));
29040
29041 *sequence = cur_fence;
29042diff -urNp linux-2.6.32.42/drivers/gpu/drm/r128/r128_cce.c linux-2.6.32.42/drivers/gpu/drm/r128/r128_cce.c
29043--- linux-2.6.32.42/drivers/gpu/drm/r128/r128_cce.c 2011-03-27 14:31:47.000000000 -0400
29044+++ linux-2.6.32.42/drivers/gpu/drm/r128/r128_cce.c 2011-05-04 17:56:28.000000000 -0400
29045@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
29046
29047 /* GH: Simple idle check.
29048 */
29049- atomic_set(&dev_priv->idle_count, 0);
29050+ atomic_set_unchecked(&dev_priv->idle_count, 0);
29051
29052 /* We don't support anything other than bus-mastering ring mode,
29053 * but the ring can be in either AGP or PCI space for the ring
29054diff -urNp linux-2.6.32.42/drivers/gpu/drm/r128/r128_drv.h linux-2.6.32.42/drivers/gpu/drm/r128/r128_drv.h
29055--- linux-2.6.32.42/drivers/gpu/drm/r128/r128_drv.h 2011-03-27 14:31:47.000000000 -0400
29056+++ linux-2.6.32.42/drivers/gpu/drm/r128/r128_drv.h 2011-05-04 17:56:28.000000000 -0400
29057@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
29058 int is_pci;
29059 unsigned long cce_buffers_offset;
29060
29061- atomic_t idle_count;
29062+ atomic_unchecked_t idle_count;
29063
29064 int page_flipping;
29065 int current_page;
29066 u32 crtc_offset;
29067 u32 crtc_offset_cntl;
29068
29069- atomic_t vbl_received;
29070+ atomic_unchecked_t vbl_received;
29071
29072 u32 color_fmt;
29073 unsigned int front_offset;
29074diff -urNp linux-2.6.32.42/drivers/gpu/drm/r128/r128_irq.c linux-2.6.32.42/drivers/gpu/drm/r128/r128_irq.c
29075--- linux-2.6.32.42/drivers/gpu/drm/r128/r128_irq.c 2011-03-27 14:31:47.000000000 -0400
29076+++ linux-2.6.32.42/drivers/gpu/drm/r128/r128_irq.c 2011-05-04 17:56:28.000000000 -0400
29077@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
29078 if (crtc != 0)
29079 return 0;
29080
29081- return atomic_read(&dev_priv->vbl_received);
29082+ return atomic_read_unchecked(&dev_priv->vbl_received);
29083 }
29084
29085 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
29086@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
29087 /* VBLANK interrupt */
29088 if (status & R128_CRTC_VBLANK_INT) {
29089 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
29090- atomic_inc(&dev_priv->vbl_received);
29091+ atomic_inc_unchecked(&dev_priv->vbl_received);
29092 drm_handle_vblank(dev, 0);
29093 return IRQ_HANDLED;
29094 }
29095diff -urNp linux-2.6.32.42/drivers/gpu/drm/r128/r128_state.c linux-2.6.32.42/drivers/gpu/drm/r128/r128_state.c
29096--- linux-2.6.32.42/drivers/gpu/drm/r128/r128_state.c 2011-03-27 14:31:47.000000000 -0400
29097+++ linux-2.6.32.42/drivers/gpu/drm/r128/r128_state.c 2011-05-04 17:56:28.000000000 -0400
29098@@ -323,10 +323,10 @@ static void r128_clear_box(drm_r128_priv
29099
29100 static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv)
29101 {
29102- if (atomic_read(&dev_priv->idle_count) == 0) {
29103+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0) {
29104 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
29105 } else {
29106- atomic_set(&dev_priv->idle_count, 0);
29107+ atomic_set_unchecked(&dev_priv->idle_count, 0);
29108 }
29109 }
29110
29111diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/atom.c linux-2.6.32.42/drivers/gpu/drm/radeon/atom.c
29112--- linux-2.6.32.42/drivers/gpu/drm/radeon/atom.c 2011-05-10 22:12:01.000000000 -0400
29113+++ linux-2.6.32.42/drivers/gpu/drm/radeon/atom.c 2011-05-16 21:46:57.000000000 -0400
29114@@ -1115,6 +1115,8 @@ struct atom_context *atom_parse(struct c
29115 char name[512];
29116 int i;
29117
29118+ pax_track_stack();
29119+
29120 ctx->card = card;
29121 ctx->bios = bios;
29122
29123diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/mkregtable.c linux-2.6.32.42/drivers/gpu/drm/radeon/mkregtable.c
29124--- linux-2.6.32.42/drivers/gpu/drm/radeon/mkregtable.c 2011-03-27 14:31:47.000000000 -0400
29125+++ linux-2.6.32.42/drivers/gpu/drm/radeon/mkregtable.c 2011-04-17 15:56:46.000000000 -0400
29126@@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
29127 regex_t mask_rex;
29128 regmatch_t match[4];
29129 char buf[1024];
29130- size_t end;
29131+ long end;
29132 int len;
29133 int done = 0;
29134 int r;
29135 unsigned o;
29136 struct offset *offset;
29137 char last_reg_s[10];
29138- int last_reg;
29139+ unsigned long last_reg;
29140
29141 if (regcomp
29142 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
29143diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_atombios.c linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_atombios.c
29144--- linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_atombios.c 2011-03-27 14:31:47.000000000 -0400
29145+++ linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_atombios.c 2011-05-16 21:46:57.000000000 -0400
29146@@ -275,6 +275,8 @@ bool radeon_get_atom_connector_info_from
29147 bool linkb;
29148 struct radeon_i2c_bus_rec ddc_bus;
29149
29150+ pax_track_stack();
29151+
29152 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
29153
29154 if (data_offset == 0)
29155@@ -520,13 +522,13 @@ static uint16_t atombios_get_connector_o
29156 }
29157 }
29158
29159-struct bios_connector {
29160+static struct bios_connector {
29161 bool valid;
29162 uint16_t line_mux;
29163 uint16_t devices;
29164 int connector_type;
29165 struct radeon_i2c_bus_rec ddc_bus;
29166-};
29167+} bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
29168
29169 bool radeon_get_atom_connector_info_from_supported_devices_table(struct
29170 drm_device
29171@@ -542,7 +544,6 @@ bool radeon_get_atom_connector_info_from
29172 uint8_t dac;
29173 union atom_supported_devices *supported_devices;
29174 int i, j;
29175- struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
29176
29177 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
29178
29179diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_display.c linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_display.c
29180--- linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_display.c 2011-03-27 14:31:47.000000000 -0400
29181+++ linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_display.c 2011-04-17 15:56:46.000000000 -0400
29182@@ -482,7 +482,7 @@ void radeon_compute_pll(struct radeon_pl
29183
29184 if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
29185 error = freq - current_freq;
29186- error = error < 0 ? 0xffffffff : error;
29187+ error = (int32_t)error < 0 ? 0xffffffff : error;
29188 } else
29189 error = abs(current_freq - freq);
29190 vco_diff = abs(vco - best_vco);
29191diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_drv.h linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_drv.h
29192--- linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_drv.h 2011-03-27 14:31:47.000000000 -0400
29193+++ linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_drv.h 2011-05-04 17:56:28.000000000 -0400
29194@@ -253,7 +253,7 @@ typedef struct drm_radeon_private {
29195
29196 /* SW interrupt */
29197 wait_queue_head_t swi_queue;
29198- atomic_t swi_emitted;
29199+ atomic_unchecked_t swi_emitted;
29200 int vblank_crtc;
29201 uint32_t irq_enable_reg;
29202 uint32_t r500_disp_irq_reg;
29203diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_fence.c linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_fence.c
29204--- linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_fence.c 2011-03-27 14:31:47.000000000 -0400
29205+++ linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_fence.c 2011-05-04 17:56:28.000000000 -0400
29206@@ -47,7 +47,7 @@ int radeon_fence_emit(struct radeon_devi
29207 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
29208 return 0;
29209 }
29210- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
29211+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
29212 if (!rdev->cp.ready) {
29213 /* FIXME: cp is not running assume everythings is done right
29214 * away
29215@@ -364,7 +364,7 @@ int radeon_fence_driver_init(struct rade
29216 return r;
29217 }
29218 WREG32(rdev->fence_drv.scratch_reg, 0);
29219- atomic_set(&rdev->fence_drv.seq, 0);
29220+ atomic_set_unchecked(&rdev->fence_drv.seq, 0);
29221 INIT_LIST_HEAD(&rdev->fence_drv.created);
29222 INIT_LIST_HEAD(&rdev->fence_drv.emited);
29223 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
29224diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/radeon.h linux-2.6.32.42/drivers/gpu/drm/radeon/radeon.h
29225--- linux-2.6.32.42/drivers/gpu/drm/radeon/radeon.h 2011-03-27 14:31:47.000000000 -0400
29226+++ linux-2.6.32.42/drivers/gpu/drm/radeon/radeon.h 2011-05-04 17:56:28.000000000 -0400
29227@@ -149,7 +149,7 @@ int radeon_pm_init(struct radeon_device
29228 */
29229 struct radeon_fence_driver {
29230 uint32_t scratch_reg;
29231- atomic_t seq;
29232+ atomic_unchecked_t seq;
29233 uint32_t last_seq;
29234 unsigned long count_timeout;
29235 wait_queue_head_t queue;
29236diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_ioc32.c linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_ioc32.c
29237--- linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-03-27 14:31:47.000000000 -0400
29238+++ linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-04-23 13:57:24.000000000 -0400
29239@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(str
29240 request = compat_alloc_user_space(sizeof(*request));
29241 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
29242 || __put_user(req32.param, &request->param)
29243- || __put_user((void __user *)(unsigned long)req32.value,
29244+ || __put_user((unsigned long)req32.value,
29245 &request->value))
29246 return -EFAULT;
29247
29248diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_irq.c linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_irq.c
29249--- linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_irq.c 2011-03-27 14:31:47.000000000 -0400
29250+++ linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_irq.c 2011-05-04 17:56:28.000000000 -0400
29251@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
29252 unsigned int ret;
29253 RING_LOCALS;
29254
29255- atomic_inc(&dev_priv->swi_emitted);
29256- ret = atomic_read(&dev_priv->swi_emitted);
29257+ atomic_inc_unchecked(&dev_priv->swi_emitted);
29258+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
29259
29260 BEGIN_RING(4);
29261 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
29262@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
29263 drm_radeon_private_t *dev_priv =
29264 (drm_radeon_private_t *) dev->dev_private;
29265
29266- atomic_set(&dev_priv->swi_emitted, 0);
29267+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
29268 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
29269
29270 dev->max_vblank_count = 0x001fffff;
29271diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_state.c linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_state.c
29272--- linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_state.c 2011-03-27 14:31:47.000000000 -0400
29273+++ linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_state.c 2011-04-17 15:56:46.000000000 -0400
29274@@ -3021,7 +3021,7 @@ static int radeon_cp_getparam(struct drm
29275 {
29276 drm_radeon_private_t *dev_priv = dev->dev_private;
29277 drm_radeon_getparam_t *param = data;
29278- int value;
29279+ int value = 0;
29280
29281 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
29282
29283diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_ttm.c linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_ttm.c
29284--- linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_ttm.c 2011-03-27 14:31:47.000000000 -0400
29285+++ linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_ttm.c 2011-04-17 15:56:46.000000000 -0400
29286@@ -535,27 +535,10 @@ void radeon_ttm_fini(struct radeon_devic
29287 DRM_INFO("radeon: ttm finalized\n");
29288 }
29289
29290-static struct vm_operations_struct radeon_ttm_vm_ops;
29291-static const struct vm_operations_struct *ttm_vm_ops = NULL;
29292-
29293-static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
29294-{
29295- struct ttm_buffer_object *bo;
29296- int r;
29297-
29298- bo = (struct ttm_buffer_object *)vma->vm_private_data;
29299- if (bo == NULL) {
29300- return VM_FAULT_NOPAGE;
29301- }
29302- r = ttm_vm_ops->fault(vma, vmf);
29303- return r;
29304-}
29305-
29306 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
29307 {
29308 struct drm_file *file_priv;
29309 struct radeon_device *rdev;
29310- int r;
29311
29312 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
29313 return drm_mmap(filp, vma);
29314@@ -563,20 +546,9 @@ int radeon_mmap(struct file *filp, struc
29315
29316 file_priv = (struct drm_file *)filp->private_data;
29317 rdev = file_priv->minor->dev->dev_private;
29318- if (rdev == NULL) {
29319+ if (!rdev)
29320 return -EINVAL;
29321- }
29322- r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
29323- if (unlikely(r != 0)) {
29324- return r;
29325- }
29326- if (unlikely(ttm_vm_ops == NULL)) {
29327- ttm_vm_ops = vma->vm_ops;
29328- radeon_ttm_vm_ops = *ttm_vm_ops;
29329- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
29330- }
29331- vma->vm_ops = &radeon_ttm_vm_ops;
29332- return 0;
29333+ return ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
29334 }
29335
29336
29337diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/rs690.c linux-2.6.32.42/drivers/gpu/drm/radeon/rs690.c
29338--- linux-2.6.32.42/drivers/gpu/drm/radeon/rs690.c 2011-03-27 14:31:47.000000000 -0400
29339+++ linux-2.6.32.42/drivers/gpu/drm/radeon/rs690.c 2011-04-17 15:56:46.000000000 -0400
29340@@ -302,9 +302,11 @@ void rs690_crtc_bandwidth_compute(struct
29341 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
29342 rdev->pm.sideport_bandwidth.full)
29343 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
29344- read_delay_latency.full = rfixed_const(370 * 800 * 1000);
29345+ read_delay_latency.full = rfixed_const(800 * 1000);
29346 read_delay_latency.full = rfixed_div(read_delay_latency,
29347 rdev->pm.igp_sideport_mclk);
29348+ a.full = rfixed_const(370);
29349+ read_delay_latency.full = rfixed_mul(read_delay_latency, a);
29350 } else {
29351 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
29352 rdev->pm.k8_bandwidth.full)
29353diff -urNp linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_bo.c linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_bo.c
29354--- linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_bo.c 2011-03-27 14:31:47.000000000 -0400
29355+++ linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_bo.c 2011-04-23 12:56:11.000000000 -0400
29356@@ -67,7 +67,7 @@ static struct attribute *ttm_bo_global_a
29357 NULL
29358 };
29359
29360-static struct sysfs_ops ttm_bo_global_ops = {
29361+static const struct sysfs_ops ttm_bo_global_ops = {
29362 .show = &ttm_bo_global_show
29363 };
29364
29365diff -urNp linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_bo_vm.c linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_bo_vm.c
29366--- linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_bo_vm.c 2011-03-27 14:31:47.000000000 -0400
29367+++ linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_bo_vm.c 2011-04-17 15:56:46.000000000 -0400
29368@@ -73,7 +73,7 @@ static int ttm_bo_vm_fault(struct vm_are
29369 {
29370 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
29371 vma->vm_private_data;
29372- struct ttm_bo_device *bdev = bo->bdev;
29373+ struct ttm_bo_device *bdev;
29374 unsigned long bus_base;
29375 unsigned long bus_offset;
29376 unsigned long bus_size;
29377@@ -88,6 +88,10 @@ static int ttm_bo_vm_fault(struct vm_are
29378 unsigned long address = (unsigned long)vmf->virtual_address;
29379 int retval = VM_FAULT_NOPAGE;
29380
29381+ if (!bo)
29382+ return VM_FAULT_NOPAGE;
29383+ bdev = bo->bdev;
29384+
29385 /*
29386 * Work around locking order reversal in fault / nopfn
29387 * between mmap_sem and bo_reserve: Perform a trylock operation
29388diff -urNp linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_global.c linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_global.c
29389--- linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_global.c 2011-03-27 14:31:47.000000000 -0400
29390+++ linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_global.c 2011-04-17 15:56:46.000000000 -0400
29391@@ -36,7 +36,7 @@
29392 struct ttm_global_item {
29393 struct mutex mutex;
29394 void *object;
29395- int refcount;
29396+ atomic_t refcount;
29397 };
29398
29399 static struct ttm_global_item glob[TTM_GLOBAL_NUM];
29400@@ -49,7 +49,7 @@ void ttm_global_init(void)
29401 struct ttm_global_item *item = &glob[i];
29402 mutex_init(&item->mutex);
29403 item->object = NULL;
29404- item->refcount = 0;
29405+ atomic_set(&item->refcount, 0);
29406 }
29407 }
29408
29409@@ -59,7 +59,7 @@ void ttm_global_release(void)
29410 for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
29411 struct ttm_global_item *item = &glob[i];
29412 BUG_ON(item->object != NULL);
29413- BUG_ON(item->refcount != 0);
29414+ BUG_ON(atomic_read(&item->refcount) != 0);
29415 }
29416 }
29417
29418@@ -70,7 +70,7 @@ int ttm_global_item_ref(struct ttm_globa
29419 void *object;
29420
29421 mutex_lock(&item->mutex);
29422- if (item->refcount == 0) {
29423+ if (atomic_read(&item->refcount) == 0) {
29424 item->object = kzalloc(ref->size, GFP_KERNEL);
29425 if (unlikely(item->object == NULL)) {
29426 ret = -ENOMEM;
29427@@ -83,7 +83,7 @@ int ttm_global_item_ref(struct ttm_globa
29428 goto out_err;
29429
29430 }
29431- ++item->refcount;
29432+ atomic_inc(&item->refcount);
29433 ref->object = item->object;
29434 object = item->object;
29435 mutex_unlock(&item->mutex);
29436@@ -100,9 +100,9 @@ void ttm_global_item_unref(struct ttm_gl
29437 struct ttm_global_item *item = &glob[ref->global_type];
29438
29439 mutex_lock(&item->mutex);
29440- BUG_ON(item->refcount == 0);
29441+ BUG_ON(atomic_read(&item->refcount) == 0);
29442 BUG_ON(ref->object != item->object);
29443- if (--item->refcount == 0) {
29444+ if (atomic_dec_and_test(&item->refcount)) {
29445 ref->release(ref);
29446 item->object = NULL;
29447 }
29448diff -urNp linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_memory.c linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_memory.c
29449--- linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_memory.c 2011-03-27 14:31:47.000000000 -0400
29450+++ linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_memory.c 2011-04-17 15:56:46.000000000 -0400
29451@@ -152,7 +152,7 @@ static struct attribute *ttm_mem_zone_at
29452 NULL
29453 };
29454
29455-static struct sysfs_ops ttm_mem_zone_ops = {
29456+static const struct sysfs_ops ttm_mem_zone_ops = {
29457 .show = &ttm_mem_zone_show,
29458 .store = &ttm_mem_zone_store
29459 };
29460diff -urNp linux-2.6.32.42/drivers/gpu/drm/via/via_drv.h linux-2.6.32.42/drivers/gpu/drm/via/via_drv.h
29461--- linux-2.6.32.42/drivers/gpu/drm/via/via_drv.h 2011-03-27 14:31:47.000000000 -0400
29462+++ linux-2.6.32.42/drivers/gpu/drm/via/via_drv.h 2011-05-04 17:56:28.000000000 -0400
29463@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
29464 typedef uint32_t maskarray_t[5];
29465
29466 typedef struct drm_via_irq {
29467- atomic_t irq_received;
29468+ atomic_unchecked_t irq_received;
29469 uint32_t pending_mask;
29470 uint32_t enable_mask;
29471 wait_queue_head_t irq_queue;
29472@@ -75,7 +75,7 @@ typedef struct drm_via_private {
29473 struct timeval last_vblank;
29474 int last_vblank_valid;
29475 unsigned usec_per_vblank;
29476- atomic_t vbl_received;
29477+ atomic_unchecked_t vbl_received;
29478 drm_via_state_t hc_state;
29479 char pci_buf[VIA_PCI_BUF_SIZE];
29480 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
29481diff -urNp linux-2.6.32.42/drivers/gpu/drm/via/via_irq.c linux-2.6.32.42/drivers/gpu/drm/via/via_irq.c
29482--- linux-2.6.32.42/drivers/gpu/drm/via/via_irq.c 2011-03-27 14:31:47.000000000 -0400
29483+++ linux-2.6.32.42/drivers/gpu/drm/via/via_irq.c 2011-05-04 17:56:28.000000000 -0400
29484@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
29485 if (crtc != 0)
29486 return 0;
29487
29488- return atomic_read(&dev_priv->vbl_received);
29489+ return atomic_read_unchecked(&dev_priv->vbl_received);
29490 }
29491
29492 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
29493@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
29494
29495 status = VIA_READ(VIA_REG_INTERRUPT);
29496 if (status & VIA_IRQ_VBLANK_PENDING) {
29497- atomic_inc(&dev_priv->vbl_received);
29498- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
29499+ atomic_inc_unchecked(&dev_priv->vbl_received);
29500+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
29501 do_gettimeofday(&cur_vblank);
29502 if (dev_priv->last_vblank_valid) {
29503 dev_priv->usec_per_vblank =
29504@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
29505 dev_priv->last_vblank = cur_vblank;
29506 dev_priv->last_vblank_valid = 1;
29507 }
29508- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
29509+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
29510 DRM_DEBUG("US per vblank is: %u\n",
29511 dev_priv->usec_per_vblank);
29512 }
29513@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
29514
29515 for (i = 0; i < dev_priv->num_irqs; ++i) {
29516 if (status & cur_irq->pending_mask) {
29517- atomic_inc(&cur_irq->irq_received);
29518+ atomic_inc_unchecked(&cur_irq->irq_received);
29519 DRM_WAKEUP(&cur_irq->irq_queue);
29520 handled = 1;
29521 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
29522@@ -244,11 +244,11 @@ via_driver_irq_wait(struct drm_device *
29523 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
29524 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
29525 masks[irq][4]));
29526- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
29527+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
29528 } else {
29529 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
29530 (((cur_irq_sequence =
29531- atomic_read(&cur_irq->irq_received)) -
29532+ atomic_read_unchecked(&cur_irq->irq_received)) -
29533 *sequence) <= (1 << 23)));
29534 }
29535 *sequence = cur_irq_sequence;
29536@@ -286,7 +286,7 @@ void via_driver_irq_preinstall(struct dr
29537 }
29538
29539 for (i = 0; i < dev_priv->num_irqs; ++i) {
29540- atomic_set(&cur_irq->irq_received, 0);
29541+ atomic_set_unchecked(&cur_irq->irq_received, 0);
29542 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
29543 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
29544 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
29545@@ -368,7 +368,7 @@ int via_wait_irq(struct drm_device *dev,
29546 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
29547 case VIA_IRQ_RELATIVE:
29548 irqwait->request.sequence +=
29549- atomic_read(&cur_irq->irq_received);
29550+ atomic_read_unchecked(&cur_irq->irq_received);
29551 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
29552 case VIA_IRQ_ABSOLUTE:
29553 break;
29554diff -urNp linux-2.6.32.42/drivers/hid/hid-core.c linux-2.6.32.42/drivers/hid/hid-core.c
29555--- linux-2.6.32.42/drivers/hid/hid-core.c 2011-05-10 22:12:01.000000000 -0400
29556+++ linux-2.6.32.42/drivers/hid/hid-core.c 2011-05-10 22:12:32.000000000 -0400
29557@@ -1752,7 +1752,7 @@ static bool hid_ignore(struct hid_device
29558
29559 int hid_add_device(struct hid_device *hdev)
29560 {
29561- static atomic_t id = ATOMIC_INIT(0);
29562+ static atomic_unchecked_t id = ATOMIC_INIT(0);
29563 int ret;
29564
29565 if (WARN_ON(hdev->status & HID_STAT_ADDED))
29566@@ -1766,7 +1766,7 @@ int hid_add_device(struct hid_device *hd
29567 /* XXX hack, any other cleaner solution after the driver core
29568 * is converted to allow more than 20 bytes as the device name? */
29569 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
29570- hdev->vendor, hdev->product, atomic_inc_return(&id));
29571+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
29572
29573 ret = device_add(&hdev->dev);
29574 if (!ret)
29575diff -urNp linux-2.6.32.42/drivers/hid/usbhid/hiddev.c linux-2.6.32.42/drivers/hid/usbhid/hiddev.c
29576--- linux-2.6.32.42/drivers/hid/usbhid/hiddev.c 2011-03-27 14:31:47.000000000 -0400
29577+++ linux-2.6.32.42/drivers/hid/usbhid/hiddev.c 2011-04-17 15:56:46.000000000 -0400
29578@@ -617,7 +617,7 @@ static long hiddev_ioctl(struct file *fi
29579 return put_user(HID_VERSION, (int __user *)arg);
29580
29581 case HIDIOCAPPLICATION:
29582- if (arg < 0 || arg >= hid->maxapplication)
29583+ if (arg >= hid->maxapplication)
29584 return -EINVAL;
29585
29586 for (i = 0; i < hid->maxcollection; i++)
29587diff -urNp linux-2.6.32.42/drivers/hwmon/lis3lv02d.c linux-2.6.32.42/drivers/hwmon/lis3lv02d.c
29588--- linux-2.6.32.42/drivers/hwmon/lis3lv02d.c 2011-03-27 14:31:47.000000000 -0400
29589+++ linux-2.6.32.42/drivers/hwmon/lis3lv02d.c 2011-05-04 17:56:28.000000000 -0400
29590@@ -146,7 +146,7 @@ static irqreturn_t lis302dl_interrupt(in
29591 * the lid is closed. This leads to interrupts as soon as a little move
29592 * is done.
29593 */
29594- atomic_inc(&lis3_dev.count);
29595+ atomic_inc_unchecked(&lis3_dev.count);
29596
29597 wake_up_interruptible(&lis3_dev.misc_wait);
29598 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
29599@@ -160,7 +160,7 @@ static int lis3lv02d_misc_open(struct in
29600 if (test_and_set_bit(0, &lis3_dev.misc_opened))
29601 return -EBUSY; /* already open */
29602
29603- atomic_set(&lis3_dev.count, 0);
29604+ atomic_set_unchecked(&lis3_dev.count, 0);
29605
29606 /*
29607 * The sensor can generate interrupts for free-fall and direction
29608@@ -206,7 +206,7 @@ static ssize_t lis3lv02d_misc_read(struc
29609 add_wait_queue(&lis3_dev.misc_wait, &wait);
29610 while (true) {
29611 set_current_state(TASK_INTERRUPTIBLE);
29612- data = atomic_xchg(&lis3_dev.count, 0);
29613+ data = atomic_xchg_unchecked(&lis3_dev.count, 0);
29614 if (data)
29615 break;
29616
29617@@ -244,7 +244,7 @@ out:
29618 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
29619 {
29620 poll_wait(file, &lis3_dev.misc_wait, wait);
29621- if (atomic_read(&lis3_dev.count))
29622+ if (atomic_read_unchecked(&lis3_dev.count))
29623 return POLLIN | POLLRDNORM;
29624 return 0;
29625 }
29626diff -urNp linux-2.6.32.42/drivers/hwmon/lis3lv02d.h linux-2.6.32.42/drivers/hwmon/lis3lv02d.h
29627--- linux-2.6.32.42/drivers/hwmon/lis3lv02d.h 2011-03-27 14:31:47.000000000 -0400
29628+++ linux-2.6.32.42/drivers/hwmon/lis3lv02d.h 2011-05-04 17:56:28.000000000 -0400
29629@@ -201,7 +201,7 @@ struct lis3lv02d {
29630
29631 struct input_polled_dev *idev; /* input device */
29632 struct platform_device *pdev; /* platform device */
29633- atomic_t count; /* interrupt count after last read */
29634+ atomic_unchecked_t count; /* interrupt count after last read */
29635 int xcalib; /* calibrated null value for x */
29636 int ycalib; /* calibrated null value for y */
29637 int zcalib; /* calibrated null value for z */
29638diff -urNp linux-2.6.32.42/drivers/hwmon/sht15.c linux-2.6.32.42/drivers/hwmon/sht15.c
29639--- linux-2.6.32.42/drivers/hwmon/sht15.c 2011-03-27 14:31:47.000000000 -0400
29640+++ linux-2.6.32.42/drivers/hwmon/sht15.c 2011-05-04 17:56:28.000000000 -0400
29641@@ -112,7 +112,7 @@ struct sht15_data {
29642 int supply_uV;
29643 int supply_uV_valid;
29644 struct work_struct update_supply_work;
29645- atomic_t interrupt_handled;
29646+ atomic_unchecked_t interrupt_handled;
29647 };
29648
29649 /**
29650@@ -245,13 +245,13 @@ static inline int sht15_update_single_va
29651 return ret;
29652
29653 gpio_direction_input(data->pdata->gpio_data);
29654- atomic_set(&data->interrupt_handled, 0);
29655+ atomic_set_unchecked(&data->interrupt_handled, 0);
29656
29657 enable_irq(gpio_to_irq(data->pdata->gpio_data));
29658 if (gpio_get_value(data->pdata->gpio_data) == 0) {
29659 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
29660 /* Only relevant if the interrupt hasn't occured. */
29661- if (!atomic_read(&data->interrupt_handled))
29662+ if (!atomic_read_unchecked(&data->interrupt_handled))
29663 schedule_work(&data->read_work);
29664 }
29665 ret = wait_event_timeout(data->wait_queue,
29666@@ -398,7 +398,7 @@ static irqreturn_t sht15_interrupt_fired
29667 struct sht15_data *data = d;
29668 /* First disable the interrupt */
29669 disable_irq_nosync(irq);
29670- atomic_inc(&data->interrupt_handled);
29671+ atomic_inc_unchecked(&data->interrupt_handled);
29672 /* Then schedule a reading work struct */
29673 if (data->flag != SHT15_READING_NOTHING)
29674 schedule_work(&data->read_work);
29675@@ -449,11 +449,11 @@ static void sht15_bh_read_data(struct wo
29676 here as could have gone low in meantime so verify
29677 it hasn't!
29678 */
29679- atomic_set(&data->interrupt_handled, 0);
29680+ atomic_set_unchecked(&data->interrupt_handled, 0);
29681 enable_irq(gpio_to_irq(data->pdata->gpio_data));
29682 /* If still not occured or another handler has been scheduled */
29683 if (gpio_get_value(data->pdata->gpio_data)
29684- || atomic_read(&data->interrupt_handled))
29685+ || atomic_read_unchecked(&data->interrupt_handled))
29686 return;
29687 }
29688 /* Read the data back from the device */
29689diff -urNp linux-2.6.32.42/drivers/hwmon/w83791d.c linux-2.6.32.42/drivers/hwmon/w83791d.c
29690--- linux-2.6.32.42/drivers/hwmon/w83791d.c 2011-03-27 14:31:47.000000000 -0400
29691+++ linux-2.6.32.42/drivers/hwmon/w83791d.c 2011-04-17 15:56:46.000000000 -0400
29692@@ -330,8 +330,8 @@ static int w83791d_detect(struct i2c_cli
29693 struct i2c_board_info *info);
29694 static int w83791d_remove(struct i2c_client *client);
29695
29696-static int w83791d_read(struct i2c_client *client, u8 register);
29697-static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
29698+static int w83791d_read(struct i2c_client *client, u8 reg);
29699+static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
29700 static struct w83791d_data *w83791d_update_device(struct device *dev);
29701
29702 #ifdef DEBUG
29703diff -urNp linux-2.6.32.42/drivers/ide/ide-cd.c linux-2.6.32.42/drivers/ide/ide-cd.c
29704--- linux-2.6.32.42/drivers/ide/ide-cd.c 2011-03-27 14:31:47.000000000 -0400
29705+++ linux-2.6.32.42/drivers/ide/ide-cd.c 2011-04-17 15:56:46.000000000 -0400
29706@@ -774,7 +774,7 @@ static void cdrom_do_block_pc(ide_drive_
29707 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
29708 if ((unsigned long)buf & alignment
29709 || blk_rq_bytes(rq) & q->dma_pad_mask
29710- || object_is_on_stack(buf))
29711+ || object_starts_on_stack(buf))
29712 drive->dma = 0;
29713 }
29714 }
29715diff -urNp linux-2.6.32.42/drivers/ide/ide-floppy.c linux-2.6.32.42/drivers/ide/ide-floppy.c
29716--- linux-2.6.32.42/drivers/ide/ide-floppy.c 2011-03-27 14:31:47.000000000 -0400
29717+++ linux-2.6.32.42/drivers/ide/ide-floppy.c 2011-05-16 21:46:57.000000000 -0400
29718@@ -373,6 +373,8 @@ static int ide_floppy_get_capacity(ide_d
29719 u8 pc_buf[256], header_len, desc_cnt;
29720 int i, rc = 1, blocks, length;
29721
29722+ pax_track_stack();
29723+
29724 ide_debug_log(IDE_DBG_FUNC, "enter");
29725
29726 drive->bios_cyl = 0;
29727diff -urNp linux-2.6.32.42/drivers/ide/setup-pci.c linux-2.6.32.42/drivers/ide/setup-pci.c
29728--- linux-2.6.32.42/drivers/ide/setup-pci.c 2011-03-27 14:31:47.000000000 -0400
29729+++ linux-2.6.32.42/drivers/ide/setup-pci.c 2011-05-16 21:46:57.000000000 -0400
29730@@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
29731 int ret, i, n_ports = dev2 ? 4 : 2;
29732 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
29733
29734+ pax_track_stack();
29735+
29736 for (i = 0; i < n_ports / 2; i++) {
29737 ret = ide_setup_pci_controller(pdev[i], d, !i);
29738 if (ret < 0)
29739diff -urNp linux-2.6.32.42/drivers/ieee1394/dv1394.c linux-2.6.32.42/drivers/ieee1394/dv1394.c
29740--- linux-2.6.32.42/drivers/ieee1394/dv1394.c 2011-03-27 14:31:47.000000000 -0400
29741+++ linux-2.6.32.42/drivers/ieee1394/dv1394.c 2011-04-23 12:56:11.000000000 -0400
29742@@ -739,7 +739,7 @@ static void frame_prepare(struct video_c
29743 based upon DIF section and sequence
29744 */
29745
29746-static void inline
29747+static inline void
29748 frame_put_packet (struct frame *f, struct packet *p)
29749 {
29750 int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */
29751diff -urNp linux-2.6.32.42/drivers/ieee1394/hosts.c linux-2.6.32.42/drivers/ieee1394/hosts.c
29752--- linux-2.6.32.42/drivers/ieee1394/hosts.c 2011-03-27 14:31:47.000000000 -0400
29753+++ linux-2.6.32.42/drivers/ieee1394/hosts.c 2011-04-17 15:56:46.000000000 -0400
29754@@ -78,6 +78,7 @@ static int dummy_isoctl(struct hpsb_iso
29755 }
29756
29757 static struct hpsb_host_driver dummy_driver = {
29758+ .name = "dummy",
29759 .transmit_packet = dummy_transmit_packet,
29760 .devctl = dummy_devctl,
29761 .isoctl = dummy_isoctl
29762diff -urNp linux-2.6.32.42/drivers/ieee1394/init_ohci1394_dma.c linux-2.6.32.42/drivers/ieee1394/init_ohci1394_dma.c
29763--- linux-2.6.32.42/drivers/ieee1394/init_ohci1394_dma.c 2011-03-27 14:31:47.000000000 -0400
29764+++ linux-2.6.32.42/drivers/ieee1394/init_ohci1394_dma.c 2011-04-17 15:56:46.000000000 -0400
29765@@ -257,7 +257,7 @@ void __init init_ohci1394_dma_on_all_con
29766 for (func = 0; func < 8; func++) {
29767 u32 class = read_pci_config(num,slot,func,
29768 PCI_CLASS_REVISION);
29769- if ((class == 0xffffffff))
29770+ if (class == 0xffffffff)
29771 continue; /* No device at this func */
29772
29773 if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI)
29774diff -urNp linux-2.6.32.42/drivers/ieee1394/ohci1394.c linux-2.6.32.42/drivers/ieee1394/ohci1394.c
29775--- linux-2.6.32.42/drivers/ieee1394/ohci1394.c 2011-03-27 14:31:47.000000000 -0400
29776+++ linux-2.6.32.42/drivers/ieee1394/ohci1394.c 2011-04-23 12:56:11.000000000 -0400
29777@@ -147,9 +147,9 @@ printk(level "%s: " fmt "\n" , OHCI1394_
29778 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
29779
29780 /* Module Parameters */
29781-static int phys_dma = 1;
29782+static int phys_dma;
29783 module_param(phys_dma, int, 0444);
29784-MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
29785+MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 0).");
29786
29787 static void dma_trm_tasklet(unsigned long data);
29788 static void dma_trm_reset(struct dma_trm_ctx *d);
29789diff -urNp linux-2.6.32.42/drivers/ieee1394/sbp2.c linux-2.6.32.42/drivers/ieee1394/sbp2.c
29790--- linux-2.6.32.42/drivers/ieee1394/sbp2.c 2011-03-27 14:31:47.000000000 -0400
29791+++ linux-2.6.32.42/drivers/ieee1394/sbp2.c 2011-04-23 12:56:11.000000000 -0400
29792@@ -2111,7 +2111,7 @@ MODULE_DESCRIPTION("IEEE-1394 SBP-2 prot
29793 MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
29794 MODULE_LICENSE("GPL");
29795
29796-static int sbp2_module_init(void)
29797+static int __init sbp2_module_init(void)
29798 {
29799 int ret;
29800
29801diff -urNp linux-2.6.32.42/drivers/infiniband/core/cm.c linux-2.6.32.42/drivers/infiniband/core/cm.c
29802--- linux-2.6.32.42/drivers/infiniband/core/cm.c 2011-03-27 14:31:47.000000000 -0400
29803+++ linux-2.6.32.42/drivers/infiniband/core/cm.c 2011-04-17 15:56:46.000000000 -0400
29804@@ -112,7 +112,7 @@ static char const counter_group_names[CM
29805
29806 struct cm_counter_group {
29807 struct kobject obj;
29808- atomic_long_t counter[CM_ATTR_COUNT];
29809+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
29810 };
29811
29812 struct cm_counter_attribute {
29813@@ -1386,7 +1386,7 @@ static void cm_dup_req_handler(struct cm
29814 struct ib_mad_send_buf *msg = NULL;
29815 int ret;
29816
29817- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29818+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29819 counter[CM_REQ_COUNTER]);
29820
29821 /* Quick state check to discard duplicate REQs. */
29822@@ -1764,7 +1764,7 @@ static void cm_dup_rep_handler(struct cm
29823 if (!cm_id_priv)
29824 return;
29825
29826- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29827+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29828 counter[CM_REP_COUNTER]);
29829 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
29830 if (ret)
29831@@ -1931,7 +1931,7 @@ static int cm_rtu_handler(struct cm_work
29832 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
29833 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
29834 spin_unlock_irq(&cm_id_priv->lock);
29835- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29836+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29837 counter[CM_RTU_COUNTER]);
29838 goto out;
29839 }
29840@@ -2110,7 +2110,7 @@ static int cm_dreq_handler(struct cm_wor
29841 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
29842 dreq_msg->local_comm_id);
29843 if (!cm_id_priv) {
29844- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29845+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29846 counter[CM_DREQ_COUNTER]);
29847 cm_issue_drep(work->port, work->mad_recv_wc);
29848 return -EINVAL;
29849@@ -2131,7 +2131,7 @@ static int cm_dreq_handler(struct cm_wor
29850 case IB_CM_MRA_REP_RCVD:
29851 break;
29852 case IB_CM_TIMEWAIT:
29853- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29854+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29855 counter[CM_DREQ_COUNTER]);
29856 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
29857 goto unlock;
29858@@ -2145,7 +2145,7 @@ static int cm_dreq_handler(struct cm_wor
29859 cm_free_msg(msg);
29860 goto deref;
29861 case IB_CM_DREQ_RCVD:
29862- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29863+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29864 counter[CM_DREQ_COUNTER]);
29865 goto unlock;
29866 default:
29867@@ -2501,7 +2501,7 @@ static int cm_mra_handler(struct cm_work
29868 ib_modify_mad(cm_id_priv->av.port->mad_agent,
29869 cm_id_priv->msg, timeout)) {
29870 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
29871- atomic_long_inc(&work->port->
29872+ atomic_long_inc_unchecked(&work->port->
29873 counter_group[CM_RECV_DUPLICATES].
29874 counter[CM_MRA_COUNTER]);
29875 goto out;
29876@@ -2510,7 +2510,7 @@ static int cm_mra_handler(struct cm_work
29877 break;
29878 case IB_CM_MRA_REQ_RCVD:
29879 case IB_CM_MRA_REP_RCVD:
29880- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29881+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29882 counter[CM_MRA_COUNTER]);
29883 /* fall through */
29884 default:
29885@@ -2672,7 +2672,7 @@ static int cm_lap_handler(struct cm_work
29886 case IB_CM_LAP_IDLE:
29887 break;
29888 case IB_CM_MRA_LAP_SENT:
29889- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29890+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29891 counter[CM_LAP_COUNTER]);
29892 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
29893 goto unlock;
29894@@ -2688,7 +2688,7 @@ static int cm_lap_handler(struct cm_work
29895 cm_free_msg(msg);
29896 goto deref;
29897 case IB_CM_LAP_RCVD:
29898- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29899+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29900 counter[CM_LAP_COUNTER]);
29901 goto unlock;
29902 default:
29903@@ -2972,7 +2972,7 @@ static int cm_sidr_req_handler(struct cm
29904 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
29905 if (cur_cm_id_priv) {
29906 spin_unlock_irq(&cm.lock);
29907- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29908+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29909 counter[CM_SIDR_REQ_COUNTER]);
29910 goto out; /* Duplicate message. */
29911 }
29912@@ -3184,10 +3184,10 @@ static void cm_send_handler(struct ib_ma
29913 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
29914 msg->retries = 1;
29915
29916- atomic_long_add(1 + msg->retries,
29917+ atomic_long_add_unchecked(1 + msg->retries,
29918 &port->counter_group[CM_XMIT].counter[attr_index]);
29919 if (msg->retries)
29920- atomic_long_add(msg->retries,
29921+ atomic_long_add_unchecked(msg->retries,
29922 &port->counter_group[CM_XMIT_RETRIES].
29923 counter[attr_index]);
29924
29925@@ -3397,7 +3397,7 @@ static void cm_recv_handler(struct ib_ma
29926 }
29927
29928 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
29929- atomic_long_inc(&port->counter_group[CM_RECV].
29930+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
29931 counter[attr_id - CM_ATTR_ID_OFFSET]);
29932
29933 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
29934@@ -3595,10 +3595,10 @@ static ssize_t cm_show_counter(struct ko
29935 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
29936
29937 return sprintf(buf, "%ld\n",
29938- atomic_long_read(&group->counter[cm_attr->index]));
29939+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
29940 }
29941
29942-static struct sysfs_ops cm_counter_ops = {
29943+static const struct sysfs_ops cm_counter_ops = {
29944 .show = cm_show_counter
29945 };
29946
29947diff -urNp linux-2.6.32.42/drivers/infiniband/core/fmr_pool.c linux-2.6.32.42/drivers/infiniband/core/fmr_pool.c
29948--- linux-2.6.32.42/drivers/infiniband/core/fmr_pool.c 2011-03-27 14:31:47.000000000 -0400
29949+++ linux-2.6.32.42/drivers/infiniband/core/fmr_pool.c 2011-05-04 17:56:28.000000000 -0400
29950@@ -97,8 +97,8 @@ struct ib_fmr_pool {
29951
29952 struct task_struct *thread;
29953
29954- atomic_t req_ser;
29955- atomic_t flush_ser;
29956+ atomic_unchecked_t req_ser;
29957+ atomic_unchecked_t flush_ser;
29958
29959 wait_queue_head_t force_wait;
29960 };
29961@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
29962 struct ib_fmr_pool *pool = pool_ptr;
29963
29964 do {
29965- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
29966+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
29967 ib_fmr_batch_release(pool);
29968
29969- atomic_inc(&pool->flush_ser);
29970+ atomic_inc_unchecked(&pool->flush_ser);
29971 wake_up_interruptible(&pool->force_wait);
29972
29973 if (pool->flush_function)
29974@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
29975 }
29976
29977 set_current_state(TASK_INTERRUPTIBLE);
29978- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
29979+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
29980 !kthread_should_stop())
29981 schedule();
29982 __set_current_state(TASK_RUNNING);
29983@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
29984 pool->dirty_watermark = params->dirty_watermark;
29985 pool->dirty_len = 0;
29986 spin_lock_init(&pool->pool_lock);
29987- atomic_set(&pool->req_ser, 0);
29988- atomic_set(&pool->flush_ser, 0);
29989+ atomic_set_unchecked(&pool->req_ser, 0);
29990+ atomic_set_unchecked(&pool->flush_ser, 0);
29991 init_waitqueue_head(&pool->force_wait);
29992
29993 pool->thread = kthread_run(ib_fmr_cleanup_thread,
29994@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
29995 }
29996 spin_unlock_irq(&pool->pool_lock);
29997
29998- serial = atomic_inc_return(&pool->req_ser);
29999+ serial = atomic_inc_return_unchecked(&pool->req_ser);
30000 wake_up_process(pool->thread);
30001
30002 if (wait_event_interruptible(pool->force_wait,
30003- atomic_read(&pool->flush_ser) - serial >= 0))
30004+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
30005 return -EINTR;
30006
30007 return 0;
30008@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
30009 } else {
30010 list_add_tail(&fmr->list, &pool->dirty_list);
30011 if (++pool->dirty_len >= pool->dirty_watermark) {
30012- atomic_inc(&pool->req_ser);
30013+ atomic_inc_unchecked(&pool->req_ser);
30014 wake_up_process(pool->thread);
30015 }
30016 }
30017diff -urNp linux-2.6.32.42/drivers/infiniband/core/sysfs.c linux-2.6.32.42/drivers/infiniband/core/sysfs.c
30018--- linux-2.6.32.42/drivers/infiniband/core/sysfs.c 2011-03-27 14:31:47.000000000 -0400
30019+++ linux-2.6.32.42/drivers/infiniband/core/sysfs.c 2011-04-17 15:56:46.000000000 -0400
30020@@ -79,7 +79,7 @@ static ssize_t port_attr_show(struct kob
30021 return port_attr->show(p, port_attr, buf);
30022 }
30023
30024-static struct sysfs_ops port_sysfs_ops = {
30025+static const struct sysfs_ops port_sysfs_ops = {
30026 .show = port_attr_show
30027 };
30028
30029diff -urNp linux-2.6.32.42/drivers/infiniband/core/uverbs_marshall.c linux-2.6.32.42/drivers/infiniband/core/uverbs_marshall.c
30030--- linux-2.6.32.42/drivers/infiniband/core/uverbs_marshall.c 2011-03-27 14:31:47.000000000 -0400
30031+++ linux-2.6.32.42/drivers/infiniband/core/uverbs_marshall.c 2011-04-17 15:56:46.000000000 -0400
30032@@ -40,18 +40,21 @@ void ib_copy_ah_attr_to_user(struct ib_u
30033 dst->grh.sgid_index = src->grh.sgid_index;
30034 dst->grh.hop_limit = src->grh.hop_limit;
30035 dst->grh.traffic_class = src->grh.traffic_class;
30036+ memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
30037 dst->dlid = src->dlid;
30038 dst->sl = src->sl;
30039 dst->src_path_bits = src->src_path_bits;
30040 dst->static_rate = src->static_rate;
30041 dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0;
30042 dst->port_num = src->port_num;
30043+ dst->reserved = 0;
30044 }
30045 EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
30046
30047 void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
30048 struct ib_qp_attr *src)
30049 {
30050+ dst->qp_state = src->qp_state;
30051 dst->cur_qp_state = src->cur_qp_state;
30052 dst->path_mtu = src->path_mtu;
30053 dst->path_mig_state = src->path_mig_state;
30054@@ -83,6 +86,7 @@ void ib_copy_qp_attr_to_user(struct ib_u
30055 dst->rnr_retry = src->rnr_retry;
30056 dst->alt_port_num = src->alt_port_num;
30057 dst->alt_timeout = src->alt_timeout;
30058+ memset(dst->reserved, 0, sizeof(dst->reserved));
30059 }
30060 EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
30061
30062diff -urNp linux-2.6.32.42/drivers/infiniband/hw/ipath/ipath_fs.c linux-2.6.32.42/drivers/infiniband/hw/ipath/ipath_fs.c
30063--- linux-2.6.32.42/drivers/infiniband/hw/ipath/ipath_fs.c 2011-03-27 14:31:47.000000000 -0400
30064+++ linux-2.6.32.42/drivers/infiniband/hw/ipath/ipath_fs.c 2011-05-16 21:46:57.000000000 -0400
30065@@ -110,6 +110,8 @@ static ssize_t atomic_counters_read(stru
30066 struct infinipath_counters counters;
30067 struct ipath_devdata *dd;
30068
30069+ pax_track_stack();
30070+
30071 dd = file->f_path.dentry->d_inode->i_private;
30072 dd->ipath_f_read_counters(dd, &counters);
30073
30074diff -urNp linux-2.6.32.42/drivers/infiniband/hw/nes/nes.c linux-2.6.32.42/drivers/infiniband/hw/nes/nes.c
30075--- linux-2.6.32.42/drivers/infiniband/hw/nes/nes.c 2011-03-27 14:31:47.000000000 -0400
30076+++ linux-2.6.32.42/drivers/infiniband/hw/nes/nes.c 2011-05-04 17:56:28.000000000 -0400
30077@@ -102,7 +102,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
30078 LIST_HEAD(nes_adapter_list);
30079 static LIST_HEAD(nes_dev_list);
30080
30081-atomic_t qps_destroyed;
30082+atomic_unchecked_t qps_destroyed;
30083
30084 static unsigned int ee_flsh_adapter;
30085 static unsigned int sysfs_nonidx_addr;
30086@@ -259,7 +259,7 @@ static void nes_cqp_rem_ref_callback(str
30087 struct nes_adapter *nesadapter = nesdev->nesadapter;
30088 u32 qp_id;
30089
30090- atomic_inc(&qps_destroyed);
30091+ atomic_inc_unchecked(&qps_destroyed);
30092
30093 /* Free the control structures */
30094
30095diff -urNp linux-2.6.32.42/drivers/infiniband/hw/nes/nes_cm.c linux-2.6.32.42/drivers/infiniband/hw/nes/nes_cm.c
30096--- linux-2.6.32.42/drivers/infiniband/hw/nes/nes_cm.c 2011-03-27 14:31:47.000000000 -0400
30097+++ linux-2.6.32.42/drivers/infiniband/hw/nes/nes_cm.c 2011-05-04 17:56:28.000000000 -0400
30098@@ -69,11 +69,11 @@ u32 cm_packets_received;
30099 u32 cm_listens_created;
30100 u32 cm_listens_destroyed;
30101 u32 cm_backlog_drops;
30102-atomic_t cm_loopbacks;
30103-atomic_t cm_nodes_created;
30104-atomic_t cm_nodes_destroyed;
30105-atomic_t cm_accel_dropped_pkts;
30106-atomic_t cm_resets_recvd;
30107+atomic_unchecked_t cm_loopbacks;
30108+atomic_unchecked_t cm_nodes_created;
30109+atomic_unchecked_t cm_nodes_destroyed;
30110+atomic_unchecked_t cm_accel_dropped_pkts;
30111+atomic_unchecked_t cm_resets_recvd;
30112
30113 static inline int mini_cm_accelerated(struct nes_cm_core *,
30114 struct nes_cm_node *);
30115@@ -149,13 +149,13 @@ static struct nes_cm_ops nes_cm_api = {
30116
30117 static struct nes_cm_core *g_cm_core;
30118
30119-atomic_t cm_connects;
30120-atomic_t cm_accepts;
30121-atomic_t cm_disconnects;
30122-atomic_t cm_closes;
30123-atomic_t cm_connecteds;
30124-atomic_t cm_connect_reqs;
30125-atomic_t cm_rejects;
30126+atomic_unchecked_t cm_connects;
30127+atomic_unchecked_t cm_accepts;
30128+atomic_unchecked_t cm_disconnects;
30129+atomic_unchecked_t cm_closes;
30130+atomic_unchecked_t cm_connecteds;
30131+atomic_unchecked_t cm_connect_reqs;
30132+atomic_unchecked_t cm_rejects;
30133
30134
30135 /**
30136@@ -1195,7 +1195,7 @@ static struct nes_cm_node *make_cm_node(
30137 cm_node->rem_mac);
30138
30139 add_hte_node(cm_core, cm_node);
30140- atomic_inc(&cm_nodes_created);
30141+ atomic_inc_unchecked(&cm_nodes_created);
30142
30143 return cm_node;
30144 }
30145@@ -1253,7 +1253,7 @@ static int rem_ref_cm_node(struct nes_cm
30146 }
30147
30148 atomic_dec(&cm_core->node_cnt);
30149- atomic_inc(&cm_nodes_destroyed);
30150+ atomic_inc_unchecked(&cm_nodes_destroyed);
30151 nesqp = cm_node->nesqp;
30152 if (nesqp) {
30153 nesqp->cm_node = NULL;
30154@@ -1320,7 +1320,7 @@ static int process_options(struct nes_cm
30155
30156 static void drop_packet(struct sk_buff *skb)
30157 {
30158- atomic_inc(&cm_accel_dropped_pkts);
30159+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
30160 dev_kfree_skb_any(skb);
30161 }
30162
30163@@ -1377,7 +1377,7 @@ static void handle_rst_pkt(struct nes_cm
30164
30165 int reset = 0; /* whether to send reset in case of err.. */
30166 int passive_state;
30167- atomic_inc(&cm_resets_recvd);
30168+ atomic_inc_unchecked(&cm_resets_recvd);
30169 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
30170 " refcnt=%d\n", cm_node, cm_node->state,
30171 atomic_read(&cm_node->ref_count));
30172@@ -2000,7 +2000,7 @@ static struct nes_cm_node *mini_cm_conne
30173 rem_ref_cm_node(cm_node->cm_core, cm_node);
30174 return NULL;
30175 }
30176- atomic_inc(&cm_loopbacks);
30177+ atomic_inc_unchecked(&cm_loopbacks);
30178 loopbackremotenode->loopbackpartner = cm_node;
30179 loopbackremotenode->tcp_cntxt.rcv_wscale =
30180 NES_CM_DEFAULT_RCV_WND_SCALE;
30181@@ -2262,7 +2262,7 @@ static int mini_cm_recv_pkt(struct nes_c
30182 add_ref_cm_node(cm_node);
30183 } else if (cm_node->state == NES_CM_STATE_TSA) {
30184 rem_ref_cm_node(cm_core, cm_node);
30185- atomic_inc(&cm_accel_dropped_pkts);
30186+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
30187 dev_kfree_skb_any(skb);
30188 break;
30189 }
30190@@ -2568,7 +2568,7 @@ static int nes_cm_disconn_true(struct ne
30191
30192 if ((cm_id) && (cm_id->event_handler)) {
30193 if (issue_disconn) {
30194- atomic_inc(&cm_disconnects);
30195+ atomic_inc_unchecked(&cm_disconnects);
30196 cm_event.event = IW_CM_EVENT_DISCONNECT;
30197 cm_event.status = disconn_status;
30198 cm_event.local_addr = cm_id->local_addr;
30199@@ -2590,7 +2590,7 @@ static int nes_cm_disconn_true(struct ne
30200 }
30201
30202 if (issue_close) {
30203- atomic_inc(&cm_closes);
30204+ atomic_inc_unchecked(&cm_closes);
30205 nes_disconnect(nesqp, 1);
30206
30207 cm_id->provider_data = nesqp;
30208@@ -2710,7 +2710,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
30209
30210 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
30211 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
30212- atomic_inc(&cm_accepts);
30213+ atomic_inc_unchecked(&cm_accepts);
30214
30215 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
30216 atomic_read(&nesvnic->netdev->refcnt));
30217@@ -2919,7 +2919,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
30218
30219 struct nes_cm_core *cm_core;
30220
30221- atomic_inc(&cm_rejects);
30222+ atomic_inc_unchecked(&cm_rejects);
30223 cm_node = (struct nes_cm_node *) cm_id->provider_data;
30224 loopback = cm_node->loopbackpartner;
30225 cm_core = cm_node->cm_core;
30226@@ -2982,7 +2982,7 @@ int nes_connect(struct iw_cm_id *cm_id,
30227 ntohl(cm_id->local_addr.sin_addr.s_addr),
30228 ntohs(cm_id->local_addr.sin_port));
30229
30230- atomic_inc(&cm_connects);
30231+ atomic_inc_unchecked(&cm_connects);
30232 nesqp->active_conn = 1;
30233
30234 /* cache the cm_id in the qp */
30235@@ -3195,7 +3195,7 @@ static void cm_event_connected(struct ne
30236 if (nesqp->destroyed) {
30237 return;
30238 }
30239- atomic_inc(&cm_connecteds);
30240+ atomic_inc_unchecked(&cm_connecteds);
30241 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
30242 " local port 0x%04X. jiffies = %lu.\n",
30243 nesqp->hwqp.qp_id,
30244@@ -3403,7 +3403,7 @@ static void cm_event_reset(struct nes_cm
30245
30246 ret = cm_id->event_handler(cm_id, &cm_event);
30247 cm_id->add_ref(cm_id);
30248- atomic_inc(&cm_closes);
30249+ atomic_inc_unchecked(&cm_closes);
30250 cm_event.event = IW_CM_EVENT_CLOSE;
30251 cm_event.status = IW_CM_EVENT_STATUS_OK;
30252 cm_event.provider_data = cm_id->provider_data;
30253@@ -3439,7 +3439,7 @@ static void cm_event_mpa_req(struct nes_
30254 return;
30255 cm_id = cm_node->cm_id;
30256
30257- atomic_inc(&cm_connect_reqs);
30258+ atomic_inc_unchecked(&cm_connect_reqs);
30259 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
30260 cm_node, cm_id, jiffies);
30261
30262@@ -3477,7 +3477,7 @@ static void cm_event_mpa_reject(struct n
30263 return;
30264 cm_id = cm_node->cm_id;
30265
30266- atomic_inc(&cm_connect_reqs);
30267+ atomic_inc_unchecked(&cm_connect_reqs);
30268 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
30269 cm_node, cm_id, jiffies);
30270
30271diff -urNp linux-2.6.32.42/drivers/infiniband/hw/nes/nes.h linux-2.6.32.42/drivers/infiniband/hw/nes/nes.h
30272--- linux-2.6.32.42/drivers/infiniband/hw/nes/nes.h 2011-03-27 14:31:47.000000000 -0400
30273+++ linux-2.6.32.42/drivers/infiniband/hw/nes/nes.h 2011-05-04 17:56:28.000000000 -0400
30274@@ -174,17 +174,17 @@ extern unsigned int nes_debug_level;
30275 extern unsigned int wqm_quanta;
30276 extern struct list_head nes_adapter_list;
30277
30278-extern atomic_t cm_connects;
30279-extern atomic_t cm_accepts;
30280-extern atomic_t cm_disconnects;
30281-extern atomic_t cm_closes;
30282-extern atomic_t cm_connecteds;
30283-extern atomic_t cm_connect_reqs;
30284-extern atomic_t cm_rejects;
30285-extern atomic_t mod_qp_timouts;
30286-extern atomic_t qps_created;
30287-extern atomic_t qps_destroyed;
30288-extern atomic_t sw_qps_destroyed;
30289+extern atomic_unchecked_t cm_connects;
30290+extern atomic_unchecked_t cm_accepts;
30291+extern atomic_unchecked_t cm_disconnects;
30292+extern atomic_unchecked_t cm_closes;
30293+extern atomic_unchecked_t cm_connecteds;
30294+extern atomic_unchecked_t cm_connect_reqs;
30295+extern atomic_unchecked_t cm_rejects;
30296+extern atomic_unchecked_t mod_qp_timouts;
30297+extern atomic_unchecked_t qps_created;
30298+extern atomic_unchecked_t qps_destroyed;
30299+extern atomic_unchecked_t sw_qps_destroyed;
30300 extern u32 mh_detected;
30301 extern u32 mh_pauses_sent;
30302 extern u32 cm_packets_sent;
30303@@ -196,11 +196,11 @@ extern u32 cm_packets_retrans;
30304 extern u32 cm_listens_created;
30305 extern u32 cm_listens_destroyed;
30306 extern u32 cm_backlog_drops;
30307-extern atomic_t cm_loopbacks;
30308-extern atomic_t cm_nodes_created;
30309-extern atomic_t cm_nodes_destroyed;
30310-extern atomic_t cm_accel_dropped_pkts;
30311-extern atomic_t cm_resets_recvd;
30312+extern atomic_unchecked_t cm_loopbacks;
30313+extern atomic_unchecked_t cm_nodes_created;
30314+extern atomic_unchecked_t cm_nodes_destroyed;
30315+extern atomic_unchecked_t cm_accel_dropped_pkts;
30316+extern atomic_unchecked_t cm_resets_recvd;
30317
30318 extern u32 int_mod_timer_init;
30319 extern u32 int_mod_cq_depth_256;
30320diff -urNp linux-2.6.32.42/drivers/infiniband/hw/nes/nes_nic.c linux-2.6.32.42/drivers/infiniband/hw/nes/nes_nic.c
30321--- linux-2.6.32.42/drivers/infiniband/hw/nes/nes_nic.c 2011-03-27 14:31:47.000000000 -0400
30322+++ linux-2.6.32.42/drivers/infiniband/hw/nes/nes_nic.c 2011-05-04 17:56:28.000000000 -0400
30323@@ -1210,17 +1210,17 @@ static void nes_netdev_get_ethtool_stats
30324 target_stat_values[++index] = mh_detected;
30325 target_stat_values[++index] = mh_pauses_sent;
30326 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
30327- target_stat_values[++index] = atomic_read(&cm_connects);
30328- target_stat_values[++index] = atomic_read(&cm_accepts);
30329- target_stat_values[++index] = atomic_read(&cm_disconnects);
30330- target_stat_values[++index] = atomic_read(&cm_connecteds);
30331- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
30332- target_stat_values[++index] = atomic_read(&cm_rejects);
30333- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
30334- target_stat_values[++index] = atomic_read(&qps_created);
30335- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
30336- target_stat_values[++index] = atomic_read(&qps_destroyed);
30337- target_stat_values[++index] = atomic_read(&cm_closes);
30338+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
30339+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
30340+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
30341+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
30342+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
30343+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
30344+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
30345+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
30346+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
30347+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
30348+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
30349 target_stat_values[++index] = cm_packets_sent;
30350 target_stat_values[++index] = cm_packets_bounced;
30351 target_stat_values[++index] = cm_packets_created;
30352@@ -1230,11 +1230,11 @@ static void nes_netdev_get_ethtool_stats
30353 target_stat_values[++index] = cm_listens_created;
30354 target_stat_values[++index] = cm_listens_destroyed;
30355 target_stat_values[++index] = cm_backlog_drops;
30356- target_stat_values[++index] = atomic_read(&cm_loopbacks);
30357- target_stat_values[++index] = atomic_read(&cm_nodes_created);
30358- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
30359- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
30360- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
30361+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
30362+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
30363+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
30364+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
30365+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
30366 target_stat_values[++index] = int_mod_timer_init;
30367 target_stat_values[++index] = int_mod_cq_depth_1;
30368 target_stat_values[++index] = int_mod_cq_depth_4;
30369diff -urNp linux-2.6.32.42/drivers/infiniband/hw/nes/nes_verbs.c linux-2.6.32.42/drivers/infiniband/hw/nes/nes_verbs.c
30370--- linux-2.6.32.42/drivers/infiniband/hw/nes/nes_verbs.c 2011-03-27 14:31:47.000000000 -0400
30371+++ linux-2.6.32.42/drivers/infiniband/hw/nes/nes_verbs.c 2011-05-04 17:56:28.000000000 -0400
30372@@ -45,9 +45,9 @@
30373
30374 #include <rdma/ib_umem.h>
30375
30376-atomic_t mod_qp_timouts;
30377-atomic_t qps_created;
30378-atomic_t sw_qps_destroyed;
30379+atomic_unchecked_t mod_qp_timouts;
30380+atomic_unchecked_t qps_created;
30381+atomic_unchecked_t sw_qps_destroyed;
30382
30383 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
30384
30385@@ -1240,7 +1240,7 @@ static struct ib_qp *nes_create_qp(struc
30386 if (init_attr->create_flags)
30387 return ERR_PTR(-EINVAL);
30388
30389- atomic_inc(&qps_created);
30390+ atomic_inc_unchecked(&qps_created);
30391 switch (init_attr->qp_type) {
30392 case IB_QPT_RC:
30393 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
30394@@ -1568,7 +1568,7 @@ static int nes_destroy_qp(struct ib_qp *
30395 struct iw_cm_event cm_event;
30396 int ret;
30397
30398- atomic_inc(&sw_qps_destroyed);
30399+ atomic_inc_unchecked(&sw_qps_destroyed);
30400 nesqp->destroyed = 1;
30401
30402 /* Blow away the connection if it exists. */
30403diff -urNp linux-2.6.32.42/drivers/input/gameport/gameport.c linux-2.6.32.42/drivers/input/gameport/gameport.c
30404--- linux-2.6.32.42/drivers/input/gameport/gameport.c 2011-03-27 14:31:47.000000000 -0400
30405+++ linux-2.6.32.42/drivers/input/gameport/gameport.c 2011-05-04 17:56:28.000000000 -0400
30406@@ -515,13 +515,13 @@ EXPORT_SYMBOL(gameport_set_phys);
30407 */
30408 static void gameport_init_port(struct gameport *gameport)
30409 {
30410- static atomic_t gameport_no = ATOMIC_INIT(0);
30411+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
30412
30413 __module_get(THIS_MODULE);
30414
30415 mutex_init(&gameport->drv_mutex);
30416 device_initialize(&gameport->dev);
30417- dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return(&gameport_no) - 1);
30418+ dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
30419 gameport->dev.bus = &gameport_bus;
30420 gameport->dev.release = gameport_release_port;
30421 if (gameport->parent)
30422diff -urNp linux-2.6.32.42/drivers/input/input.c linux-2.6.32.42/drivers/input/input.c
30423--- linux-2.6.32.42/drivers/input/input.c 2011-03-27 14:31:47.000000000 -0400
30424+++ linux-2.6.32.42/drivers/input/input.c 2011-05-04 17:56:28.000000000 -0400
30425@@ -1558,7 +1558,7 @@ EXPORT_SYMBOL(input_set_capability);
30426 */
30427 int input_register_device(struct input_dev *dev)
30428 {
30429- static atomic_t input_no = ATOMIC_INIT(0);
30430+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
30431 struct input_handler *handler;
30432 const char *path;
30433 int error;
30434@@ -1585,7 +1585,7 @@ int input_register_device(struct input_d
30435 dev->setkeycode = input_default_setkeycode;
30436
30437 dev_set_name(&dev->dev, "input%ld",
30438- (unsigned long) atomic_inc_return(&input_no) - 1);
30439+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
30440
30441 error = device_add(&dev->dev);
30442 if (error)
30443diff -urNp linux-2.6.32.42/drivers/input/joystick/sidewinder.c linux-2.6.32.42/drivers/input/joystick/sidewinder.c
30444--- linux-2.6.32.42/drivers/input/joystick/sidewinder.c 2011-03-27 14:31:47.000000000 -0400
30445+++ linux-2.6.32.42/drivers/input/joystick/sidewinder.c 2011-05-18 20:09:36.000000000 -0400
30446@@ -30,6 +30,7 @@
30447 #include <linux/kernel.h>
30448 #include <linux/module.h>
30449 #include <linux/slab.h>
30450+#include <linux/sched.h>
30451 #include <linux/init.h>
30452 #include <linux/input.h>
30453 #include <linux/gameport.h>
30454@@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
30455 unsigned char buf[SW_LENGTH];
30456 int i;
30457
30458+ pax_track_stack();
30459+
30460 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
30461
30462 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
30463diff -urNp linux-2.6.32.42/drivers/input/joystick/xpad.c linux-2.6.32.42/drivers/input/joystick/xpad.c
30464--- linux-2.6.32.42/drivers/input/joystick/xpad.c 2011-03-27 14:31:47.000000000 -0400
30465+++ linux-2.6.32.42/drivers/input/joystick/xpad.c 2011-05-04 17:56:28.000000000 -0400
30466@@ -621,7 +621,7 @@ static void xpad_led_set(struct led_clas
30467
30468 static int xpad_led_probe(struct usb_xpad *xpad)
30469 {
30470- static atomic_t led_seq = ATOMIC_INIT(0);
30471+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
30472 long led_no;
30473 struct xpad_led *led;
30474 struct led_classdev *led_cdev;
30475@@ -634,7 +634,7 @@ static int xpad_led_probe(struct usb_xpa
30476 if (!led)
30477 return -ENOMEM;
30478
30479- led_no = (long)atomic_inc_return(&led_seq) - 1;
30480+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
30481
30482 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
30483 led->xpad = xpad;
30484diff -urNp linux-2.6.32.42/drivers/input/serio/serio.c linux-2.6.32.42/drivers/input/serio/serio.c
30485--- linux-2.6.32.42/drivers/input/serio/serio.c 2011-03-27 14:31:47.000000000 -0400
30486+++ linux-2.6.32.42/drivers/input/serio/serio.c 2011-05-04 17:56:28.000000000 -0400
30487@@ -527,7 +527,7 @@ static void serio_release_port(struct de
30488 */
30489 static void serio_init_port(struct serio *serio)
30490 {
30491- static atomic_t serio_no = ATOMIC_INIT(0);
30492+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
30493
30494 __module_get(THIS_MODULE);
30495
30496@@ -536,7 +536,7 @@ static void serio_init_port(struct serio
30497 mutex_init(&serio->drv_mutex);
30498 device_initialize(&serio->dev);
30499 dev_set_name(&serio->dev, "serio%ld",
30500- (long)atomic_inc_return(&serio_no) - 1);
30501+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
30502 serio->dev.bus = &serio_bus;
30503 serio->dev.release = serio_release_port;
30504 if (serio->parent) {
30505diff -urNp linux-2.6.32.42/drivers/isdn/gigaset/common.c linux-2.6.32.42/drivers/isdn/gigaset/common.c
30506--- linux-2.6.32.42/drivers/isdn/gigaset/common.c 2011-03-27 14:31:47.000000000 -0400
30507+++ linux-2.6.32.42/drivers/isdn/gigaset/common.c 2011-04-17 15:56:46.000000000 -0400
30508@@ -712,7 +712,7 @@ struct cardstate *gigaset_initcs(struct
30509 cs->commands_pending = 0;
30510 cs->cur_at_seq = 0;
30511 cs->gotfwver = -1;
30512- cs->open_count = 0;
30513+ local_set(&cs->open_count, 0);
30514 cs->dev = NULL;
30515 cs->tty = NULL;
30516 cs->tty_dev = NULL;
30517diff -urNp linux-2.6.32.42/drivers/isdn/gigaset/gigaset.h linux-2.6.32.42/drivers/isdn/gigaset/gigaset.h
30518--- linux-2.6.32.42/drivers/isdn/gigaset/gigaset.h 2011-03-27 14:31:47.000000000 -0400
30519+++ linux-2.6.32.42/drivers/isdn/gigaset/gigaset.h 2011-04-17 15:56:46.000000000 -0400
30520@@ -34,6 +34,7 @@
30521 #include <linux/tty_driver.h>
30522 #include <linux/list.h>
30523 #include <asm/atomic.h>
30524+#include <asm/local.h>
30525
30526 #define GIG_VERSION {0,5,0,0}
30527 #define GIG_COMPAT {0,4,0,0}
30528@@ -446,7 +447,7 @@ struct cardstate {
30529 spinlock_t cmdlock;
30530 unsigned curlen, cmdbytes;
30531
30532- unsigned open_count;
30533+ local_t open_count;
30534 struct tty_struct *tty;
30535 struct tasklet_struct if_wake_tasklet;
30536 unsigned control_state;
30537diff -urNp linux-2.6.32.42/drivers/isdn/gigaset/interface.c linux-2.6.32.42/drivers/isdn/gigaset/interface.c
30538--- linux-2.6.32.42/drivers/isdn/gigaset/interface.c 2011-03-27 14:31:47.000000000 -0400
30539+++ linux-2.6.32.42/drivers/isdn/gigaset/interface.c 2011-04-17 15:56:46.000000000 -0400
30540@@ -165,9 +165,7 @@ static int if_open(struct tty_struct *tt
30541 return -ERESTARTSYS; // FIXME -EINTR?
30542 tty->driver_data = cs;
30543
30544- ++cs->open_count;
30545-
30546- if (cs->open_count == 1) {
30547+ if (local_inc_return(&cs->open_count) == 1) {
30548 spin_lock_irqsave(&cs->lock, flags);
30549 cs->tty = tty;
30550 spin_unlock_irqrestore(&cs->lock, flags);
30551@@ -195,10 +193,10 @@ static void if_close(struct tty_struct *
30552
30553 if (!cs->connected)
30554 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30555- else if (!cs->open_count)
30556+ else if (!local_read(&cs->open_count))
30557 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30558 else {
30559- if (!--cs->open_count) {
30560+ if (!local_dec_return(&cs->open_count)) {
30561 spin_lock_irqsave(&cs->lock, flags);
30562 cs->tty = NULL;
30563 spin_unlock_irqrestore(&cs->lock, flags);
30564@@ -233,7 +231,7 @@ static int if_ioctl(struct tty_struct *t
30565 if (!cs->connected) {
30566 gig_dbg(DEBUG_IF, "not connected");
30567 retval = -ENODEV;
30568- } else if (!cs->open_count)
30569+ } else if (!local_read(&cs->open_count))
30570 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30571 else {
30572 retval = 0;
30573@@ -361,7 +359,7 @@ static int if_write(struct tty_struct *t
30574 if (!cs->connected) {
30575 gig_dbg(DEBUG_IF, "not connected");
30576 retval = -ENODEV;
30577- } else if (!cs->open_count)
30578+ } else if (!local_read(&cs->open_count))
30579 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30580 else if (cs->mstate != MS_LOCKED) {
30581 dev_warn(cs->dev, "can't write to unlocked device\n");
30582@@ -395,7 +393,7 @@ static int if_write_room(struct tty_stru
30583 if (!cs->connected) {
30584 gig_dbg(DEBUG_IF, "not connected");
30585 retval = -ENODEV;
30586- } else if (!cs->open_count)
30587+ } else if (!local_read(&cs->open_count))
30588 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30589 else if (cs->mstate != MS_LOCKED) {
30590 dev_warn(cs->dev, "can't write to unlocked device\n");
30591@@ -425,7 +423,7 @@ static int if_chars_in_buffer(struct tty
30592
30593 if (!cs->connected)
30594 gig_dbg(DEBUG_IF, "not connected");
30595- else if (!cs->open_count)
30596+ else if (!local_read(&cs->open_count))
30597 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30598 else if (cs->mstate != MS_LOCKED)
30599 dev_warn(cs->dev, "can't write to unlocked device\n");
30600@@ -453,7 +451,7 @@ static void if_throttle(struct tty_struc
30601
30602 if (!cs->connected)
30603 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30604- else if (!cs->open_count)
30605+ else if (!local_read(&cs->open_count))
30606 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30607 else {
30608 //FIXME
30609@@ -478,7 +476,7 @@ static void if_unthrottle(struct tty_str
30610
30611 if (!cs->connected)
30612 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30613- else if (!cs->open_count)
30614+ else if (!local_read(&cs->open_count))
30615 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30616 else {
30617 //FIXME
30618@@ -510,7 +508,7 @@ static void if_set_termios(struct tty_st
30619 goto out;
30620 }
30621
30622- if (!cs->open_count) {
30623+ if (!local_read(&cs->open_count)) {
30624 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30625 goto out;
30626 }
30627diff -urNp linux-2.6.32.42/drivers/isdn/hardware/avm/b1.c linux-2.6.32.42/drivers/isdn/hardware/avm/b1.c
30628--- linux-2.6.32.42/drivers/isdn/hardware/avm/b1.c 2011-03-27 14:31:47.000000000 -0400
30629+++ linux-2.6.32.42/drivers/isdn/hardware/avm/b1.c 2011-04-17 15:56:46.000000000 -0400
30630@@ -173,7 +173,7 @@ int b1_load_t4file(avmcard *card, capilo
30631 }
30632 if (left) {
30633 if (t4file->user) {
30634- if (copy_from_user(buf, dp, left))
30635+ if (left > sizeof buf || copy_from_user(buf, dp, left))
30636 return -EFAULT;
30637 } else {
30638 memcpy(buf, dp, left);
30639@@ -221,7 +221,7 @@ int b1_load_config(avmcard *card, capilo
30640 }
30641 if (left) {
30642 if (config->user) {
30643- if (copy_from_user(buf, dp, left))
30644+ if (left > sizeof buf || copy_from_user(buf, dp, left))
30645 return -EFAULT;
30646 } else {
30647 memcpy(buf, dp, left);
30648diff -urNp linux-2.6.32.42/drivers/isdn/hardware/eicon/capidtmf.c linux-2.6.32.42/drivers/isdn/hardware/eicon/capidtmf.c
30649--- linux-2.6.32.42/drivers/isdn/hardware/eicon/capidtmf.c 2011-03-27 14:31:47.000000000 -0400
30650+++ linux-2.6.32.42/drivers/isdn/hardware/eicon/capidtmf.c 2011-05-16 21:46:57.000000000 -0400
30651@@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
30652 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
30653 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
30654
30655+ pax_track_stack();
30656
30657 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
30658 {
30659diff -urNp linux-2.6.32.42/drivers/isdn/hardware/eicon/capifunc.c linux-2.6.32.42/drivers/isdn/hardware/eicon/capifunc.c
30660--- linux-2.6.32.42/drivers/isdn/hardware/eicon/capifunc.c 2011-03-27 14:31:47.000000000 -0400
30661+++ linux-2.6.32.42/drivers/isdn/hardware/eicon/capifunc.c 2011-05-16 21:46:57.000000000 -0400
30662@@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
30663 IDI_SYNC_REQ req;
30664 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30665
30666+ pax_track_stack();
30667+
30668 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30669
30670 for (x = 0; x < MAX_DESCRIPTORS; x++) {
30671diff -urNp linux-2.6.32.42/drivers/isdn/hardware/eicon/diddfunc.c linux-2.6.32.42/drivers/isdn/hardware/eicon/diddfunc.c
30672--- linux-2.6.32.42/drivers/isdn/hardware/eicon/diddfunc.c 2011-03-27 14:31:47.000000000 -0400
30673+++ linux-2.6.32.42/drivers/isdn/hardware/eicon/diddfunc.c 2011-05-16 21:46:57.000000000 -0400
30674@@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
30675 IDI_SYNC_REQ req;
30676 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30677
30678+ pax_track_stack();
30679+
30680 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30681
30682 for (x = 0; x < MAX_DESCRIPTORS; x++) {
30683diff -urNp linux-2.6.32.42/drivers/isdn/hardware/eicon/divasfunc.c linux-2.6.32.42/drivers/isdn/hardware/eicon/divasfunc.c
30684--- linux-2.6.32.42/drivers/isdn/hardware/eicon/divasfunc.c 2011-03-27 14:31:47.000000000 -0400
30685+++ linux-2.6.32.42/drivers/isdn/hardware/eicon/divasfunc.c 2011-05-16 21:46:57.000000000 -0400
30686@@ -161,6 +161,8 @@ static int DIVA_INIT_FUNCTION connect_di
30687 IDI_SYNC_REQ req;
30688 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30689
30690+ pax_track_stack();
30691+
30692 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30693
30694 for (x = 0; x < MAX_DESCRIPTORS; x++) {
30695diff -urNp linux-2.6.32.42/drivers/isdn/hardware/eicon/idifunc.c linux-2.6.32.42/drivers/isdn/hardware/eicon/idifunc.c
30696--- linux-2.6.32.42/drivers/isdn/hardware/eicon/idifunc.c 2011-03-27 14:31:47.000000000 -0400
30697+++ linux-2.6.32.42/drivers/isdn/hardware/eicon/idifunc.c 2011-05-16 21:46:57.000000000 -0400
30698@@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
30699 IDI_SYNC_REQ req;
30700 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30701
30702+ pax_track_stack();
30703+
30704 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30705
30706 for (x = 0; x < MAX_DESCRIPTORS; x++) {
30707diff -urNp linux-2.6.32.42/drivers/isdn/hardware/eicon/message.c linux-2.6.32.42/drivers/isdn/hardware/eicon/message.c
30708--- linux-2.6.32.42/drivers/isdn/hardware/eicon/message.c 2011-03-27 14:31:47.000000000 -0400
30709+++ linux-2.6.32.42/drivers/isdn/hardware/eicon/message.c 2011-05-16 21:46:57.000000000 -0400
30710@@ -4889,6 +4889,8 @@ static void sig_ind(PLCI *plci)
30711 dword d;
30712 word w;
30713
30714+ pax_track_stack();
30715+
30716 a = plci->adapter;
30717 Id = ((word)plci->Id<<8)|a->Id;
30718 PUT_WORD(&SS_Ind[4],0x0000);
30719@@ -7484,6 +7486,8 @@ static word add_b1(PLCI *plci, API_PARSE
30720 word j, n, w;
30721 dword d;
30722
30723+ pax_track_stack();
30724+
30725
30726 for(i=0;i<8;i++) bp_parms[i].length = 0;
30727 for(i=0;i<2;i++) global_config[i].length = 0;
30728@@ -7958,6 +7962,8 @@ static word add_b23(PLCI *plci, API_PARS
30729 const byte llc3[] = {4,3,2,2,6,6,0};
30730 const byte header[] = {0,2,3,3,0,0,0};
30731
30732+ pax_track_stack();
30733+
30734 for(i=0;i<8;i++) bp_parms[i].length = 0;
30735 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
30736 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
30737@@ -14761,6 +14767,8 @@ static void group_optimization(DIVA_CAPI
30738 word appl_number_group_type[MAX_APPL];
30739 PLCI *auxplci;
30740
30741+ pax_track_stack();
30742+
30743 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
30744
30745 if(!a->group_optimization_enabled)
30746diff -urNp linux-2.6.32.42/drivers/isdn/hardware/eicon/mntfunc.c linux-2.6.32.42/drivers/isdn/hardware/eicon/mntfunc.c
30747--- linux-2.6.32.42/drivers/isdn/hardware/eicon/mntfunc.c 2011-03-27 14:31:47.000000000 -0400
30748+++ linux-2.6.32.42/drivers/isdn/hardware/eicon/mntfunc.c 2011-05-16 21:46:57.000000000 -0400
30749@@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
30750 IDI_SYNC_REQ req;
30751 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30752
30753+ pax_track_stack();
30754+
30755 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30756
30757 for (x = 0; x < MAX_DESCRIPTORS; x++) {
30758diff -urNp linux-2.6.32.42/drivers/isdn/i4l/isdn_common.c linux-2.6.32.42/drivers/isdn/i4l/isdn_common.c
30759--- linux-2.6.32.42/drivers/isdn/i4l/isdn_common.c 2011-03-27 14:31:47.000000000 -0400
30760+++ linux-2.6.32.42/drivers/isdn/i4l/isdn_common.c 2011-05-16 21:46:57.000000000 -0400
30761@@ -1290,6 +1290,8 @@ isdn_ioctl(struct inode *inode, struct f
30762 } iocpar;
30763 void __user *argp = (void __user *)arg;
30764
30765+ pax_track_stack();
30766+
30767 #define name iocpar.name
30768 #define bname iocpar.bname
30769 #define iocts iocpar.iocts
30770diff -urNp linux-2.6.32.42/drivers/isdn/icn/icn.c linux-2.6.32.42/drivers/isdn/icn/icn.c
30771--- linux-2.6.32.42/drivers/isdn/icn/icn.c 2011-03-27 14:31:47.000000000 -0400
30772+++ linux-2.6.32.42/drivers/isdn/icn/icn.c 2011-04-17 15:56:46.000000000 -0400
30773@@ -1044,7 +1044,7 @@ icn_writecmd(const u_char * buf, int len
30774 if (count > len)
30775 count = len;
30776 if (user) {
30777- if (copy_from_user(msg, buf, count))
30778+ if (count > sizeof msg || copy_from_user(msg, buf, count))
30779 return -EFAULT;
30780 } else
30781 memcpy(msg, buf, count);
30782diff -urNp linux-2.6.32.42/drivers/isdn/mISDN/socket.c linux-2.6.32.42/drivers/isdn/mISDN/socket.c
30783--- linux-2.6.32.42/drivers/isdn/mISDN/socket.c 2011-03-27 14:31:47.000000000 -0400
30784+++ linux-2.6.32.42/drivers/isdn/mISDN/socket.c 2011-04-17 15:56:46.000000000 -0400
30785@@ -391,6 +391,7 @@ data_sock_ioctl(struct socket *sock, uns
30786 if (dev) {
30787 struct mISDN_devinfo di;
30788
30789+ memset(&di, 0, sizeof(di));
30790 di.id = dev->id;
30791 di.Dprotocols = dev->Dprotocols;
30792 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
30793@@ -671,6 +672,7 @@ base_sock_ioctl(struct socket *sock, uns
30794 if (dev) {
30795 struct mISDN_devinfo di;
30796
30797+ memset(&di, 0, sizeof(di));
30798 di.id = dev->id;
30799 di.Dprotocols = dev->Dprotocols;
30800 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
30801diff -urNp linux-2.6.32.42/drivers/isdn/sc/interrupt.c linux-2.6.32.42/drivers/isdn/sc/interrupt.c
30802--- linux-2.6.32.42/drivers/isdn/sc/interrupt.c 2011-03-27 14:31:47.000000000 -0400
30803+++ linux-2.6.32.42/drivers/isdn/sc/interrupt.c 2011-04-17 15:56:46.000000000 -0400
30804@@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy,
30805 }
30806 else if(callid>=0x0000 && callid<=0x7FFF)
30807 {
30808+ int len;
30809+
30810 pr_debug("%s: Got Incoming Call\n",
30811 sc_adapter[card]->devicename);
30812- strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4]));
30813- strcpy(setup.eazmsn,
30814- sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn);
30815+ len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]),
30816+ sizeof(setup.phone));
30817+ if (len >= sizeof(setup.phone))
30818+ continue;
30819+ len = strlcpy(setup.eazmsn,
30820+ sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
30821+ sizeof(setup.eazmsn));
30822+ if (len >= sizeof(setup.eazmsn))
30823+ continue;
30824 setup.si1 = 7;
30825 setup.si2 = 0;
30826 setup.plan = 0;
30827@@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy,
30828 * Handle a GetMyNumber Rsp
30829 */
30830 if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){
30831- strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array);
30832+ strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
30833+ rcvmsg.msg_data.byte_array,
30834+ sizeof(rcvmsg.msg_data.byte_array));
30835 continue;
30836 }
30837
30838diff -urNp linux-2.6.32.42/drivers/lguest/core.c linux-2.6.32.42/drivers/lguest/core.c
30839--- linux-2.6.32.42/drivers/lguest/core.c 2011-03-27 14:31:47.000000000 -0400
30840+++ linux-2.6.32.42/drivers/lguest/core.c 2011-04-17 15:56:46.000000000 -0400
30841@@ -91,9 +91,17 @@ static __init int map_switcher(void)
30842 * it's worked so far. The end address needs +1 because __get_vm_area
30843 * allocates an extra guard page, so we need space for that.
30844 */
30845+
30846+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
30847+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
30848+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
30849+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
30850+#else
30851 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
30852 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
30853 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
30854+#endif
30855+
30856 if (!switcher_vma) {
30857 err = -ENOMEM;
30858 printk("lguest: could not map switcher pages high\n");
30859@@ -118,7 +126,7 @@ static __init int map_switcher(void)
30860 * Now the Switcher is mapped at the right address, we can't fail!
30861 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
30862 */
30863- memcpy(switcher_vma->addr, start_switcher_text,
30864+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
30865 end_switcher_text - start_switcher_text);
30866
30867 printk(KERN_INFO "lguest: mapped switcher at %p\n",
30868diff -urNp linux-2.6.32.42/drivers/lguest/x86/core.c linux-2.6.32.42/drivers/lguest/x86/core.c
30869--- linux-2.6.32.42/drivers/lguest/x86/core.c 2011-03-27 14:31:47.000000000 -0400
30870+++ linux-2.6.32.42/drivers/lguest/x86/core.c 2011-04-17 15:56:46.000000000 -0400
30871@@ -59,7 +59,7 @@ static struct {
30872 /* Offset from where switcher.S was compiled to where we've copied it */
30873 static unsigned long switcher_offset(void)
30874 {
30875- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
30876+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
30877 }
30878
30879 /* This cpu's struct lguest_pages. */
30880@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
30881 * These copies are pretty cheap, so we do them unconditionally: */
30882 /* Save the current Host top-level page directory.
30883 */
30884+
30885+#ifdef CONFIG_PAX_PER_CPU_PGD
30886+ pages->state.host_cr3 = read_cr3();
30887+#else
30888 pages->state.host_cr3 = __pa(current->mm->pgd);
30889+#endif
30890+
30891 /*
30892 * Set up the Guest's page tables to see this CPU's pages (and no
30893 * other CPU's pages).
30894@@ -535,7 +541,7 @@ void __init lguest_arch_host_init(void)
30895 * compiled-in switcher code and the high-mapped copy we just made.
30896 */
30897 for (i = 0; i < IDT_ENTRIES; i++)
30898- default_idt_entries[i] += switcher_offset();
30899+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
30900
30901 /*
30902 * Set up the Switcher's per-cpu areas.
30903@@ -618,7 +624,7 @@ void __init lguest_arch_host_init(void)
30904 * it will be undisturbed when we switch. To change %cs and jump we
30905 * need this structure to feed to Intel's "lcall" instruction.
30906 */
30907- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
30908+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
30909 lguest_entry.segment = LGUEST_CS;
30910
30911 /*
30912diff -urNp linux-2.6.32.42/drivers/lguest/x86/switcher_32.S linux-2.6.32.42/drivers/lguest/x86/switcher_32.S
30913--- linux-2.6.32.42/drivers/lguest/x86/switcher_32.S 2011-03-27 14:31:47.000000000 -0400
30914+++ linux-2.6.32.42/drivers/lguest/x86/switcher_32.S 2011-04-17 15:56:46.000000000 -0400
30915@@ -87,6 +87,7 @@
30916 #include <asm/page.h>
30917 #include <asm/segment.h>
30918 #include <asm/lguest.h>
30919+#include <asm/processor-flags.h>
30920
30921 // We mark the start of the code to copy
30922 // It's placed in .text tho it's never run here
30923@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
30924 // Changes type when we load it: damn Intel!
30925 // For after we switch over our page tables
30926 // That entry will be read-only: we'd crash.
30927+
30928+#ifdef CONFIG_PAX_KERNEXEC
30929+ mov %cr0, %edx
30930+ xor $X86_CR0_WP, %edx
30931+ mov %edx, %cr0
30932+#endif
30933+
30934 movl $(GDT_ENTRY_TSS*8), %edx
30935 ltr %dx
30936
30937@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
30938 // Let's clear it again for our return.
30939 // The GDT descriptor of the Host
30940 // Points to the table after two "size" bytes
30941- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
30942+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
30943 // Clear "used" from type field (byte 5, bit 2)
30944- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
30945+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
30946+
30947+#ifdef CONFIG_PAX_KERNEXEC
30948+ mov %cr0, %eax
30949+ xor $X86_CR0_WP, %eax
30950+ mov %eax, %cr0
30951+#endif
30952
30953 // Once our page table's switched, the Guest is live!
30954 // The Host fades as we run this final step.
30955@@ -295,13 +309,12 @@ deliver_to_host:
30956 // I consulted gcc, and it gave
30957 // These instructions, which I gladly credit:
30958 leal (%edx,%ebx,8), %eax
30959- movzwl (%eax),%edx
30960- movl 4(%eax), %eax
30961- xorw %ax, %ax
30962- orl %eax, %edx
30963+ movl 4(%eax), %edx
30964+ movw (%eax), %dx
30965 // Now the address of the handler's in %edx
30966 // We call it now: its "iret" drops us home.
30967- jmp *%edx
30968+ ljmp $__KERNEL_CS, $1f
30969+1: jmp *%edx
30970
30971 // Every interrupt can come to us here
30972 // But we must truly tell each apart.
30973diff -urNp linux-2.6.32.42/drivers/macintosh/via-pmu-backlight.c linux-2.6.32.42/drivers/macintosh/via-pmu-backlight.c
30974--- linux-2.6.32.42/drivers/macintosh/via-pmu-backlight.c 2011-03-27 14:31:47.000000000 -0400
30975+++ linux-2.6.32.42/drivers/macintosh/via-pmu-backlight.c 2011-04-17 15:56:46.000000000 -0400
30976@@ -15,7 +15,7 @@
30977
30978 #define MAX_PMU_LEVEL 0xFF
30979
30980-static struct backlight_ops pmu_backlight_data;
30981+static const struct backlight_ops pmu_backlight_data;
30982 static DEFINE_SPINLOCK(pmu_backlight_lock);
30983 static int sleeping, uses_pmu_bl;
30984 static u8 bl_curve[FB_BACKLIGHT_LEVELS];
30985@@ -115,7 +115,7 @@ static int pmu_backlight_get_brightness(
30986 return bd->props.brightness;
30987 }
30988
30989-static struct backlight_ops pmu_backlight_data = {
30990+static const struct backlight_ops pmu_backlight_data = {
30991 .get_brightness = pmu_backlight_get_brightness,
30992 .update_status = pmu_backlight_update_status,
30993
30994diff -urNp linux-2.6.32.42/drivers/macintosh/via-pmu.c linux-2.6.32.42/drivers/macintosh/via-pmu.c
30995--- linux-2.6.32.42/drivers/macintosh/via-pmu.c 2011-03-27 14:31:47.000000000 -0400
30996+++ linux-2.6.32.42/drivers/macintosh/via-pmu.c 2011-04-17 15:56:46.000000000 -0400
30997@@ -2232,7 +2232,7 @@ static int pmu_sleep_valid(suspend_state
30998 && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0);
30999 }
31000
31001-static struct platform_suspend_ops pmu_pm_ops = {
31002+static const struct platform_suspend_ops pmu_pm_ops = {
31003 .enter = powerbook_sleep,
31004 .valid = pmu_sleep_valid,
31005 };
31006diff -urNp linux-2.6.32.42/drivers/md/dm.c linux-2.6.32.42/drivers/md/dm.c
31007--- linux-2.6.32.42/drivers/md/dm.c 2011-03-27 14:31:47.000000000 -0400
31008+++ linux-2.6.32.42/drivers/md/dm.c 2011-05-04 17:56:28.000000000 -0400
31009@@ -163,9 +163,9 @@ struct mapped_device {
31010 /*
31011 * Event handling.
31012 */
31013- atomic_t event_nr;
31014+ atomic_unchecked_t event_nr;
31015 wait_queue_head_t eventq;
31016- atomic_t uevent_seq;
31017+ atomic_unchecked_t uevent_seq;
31018 struct list_head uevent_list;
31019 spinlock_t uevent_lock; /* Protect access to uevent_list */
31020
31021@@ -1770,8 +1770,8 @@ static struct mapped_device *alloc_dev(i
31022 rwlock_init(&md->map_lock);
31023 atomic_set(&md->holders, 1);
31024 atomic_set(&md->open_count, 0);
31025- atomic_set(&md->event_nr, 0);
31026- atomic_set(&md->uevent_seq, 0);
31027+ atomic_set_unchecked(&md->event_nr, 0);
31028+ atomic_set_unchecked(&md->uevent_seq, 0);
31029 INIT_LIST_HEAD(&md->uevent_list);
31030 spin_lock_init(&md->uevent_lock);
31031
31032@@ -1921,7 +1921,7 @@ static void event_callback(void *context
31033
31034 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
31035
31036- atomic_inc(&md->event_nr);
31037+ atomic_inc_unchecked(&md->event_nr);
31038 wake_up(&md->eventq);
31039 }
31040
31041@@ -2556,18 +2556,18 @@ void dm_kobject_uevent(struct mapped_dev
31042
31043 uint32_t dm_next_uevent_seq(struct mapped_device *md)
31044 {
31045- return atomic_add_return(1, &md->uevent_seq);
31046+ return atomic_add_return_unchecked(1, &md->uevent_seq);
31047 }
31048
31049 uint32_t dm_get_event_nr(struct mapped_device *md)
31050 {
31051- return atomic_read(&md->event_nr);
31052+ return atomic_read_unchecked(&md->event_nr);
31053 }
31054
31055 int dm_wait_event(struct mapped_device *md, int event_nr)
31056 {
31057 return wait_event_interruptible(md->eventq,
31058- (event_nr != atomic_read(&md->event_nr)));
31059+ (event_nr != atomic_read_unchecked(&md->event_nr)));
31060 }
31061
31062 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
31063diff -urNp linux-2.6.32.42/drivers/md/dm-ioctl.c linux-2.6.32.42/drivers/md/dm-ioctl.c
31064--- linux-2.6.32.42/drivers/md/dm-ioctl.c 2011-03-27 14:31:47.000000000 -0400
31065+++ linux-2.6.32.42/drivers/md/dm-ioctl.c 2011-04-17 15:56:46.000000000 -0400
31066@@ -1437,7 +1437,7 @@ static int validate_params(uint cmd, str
31067 cmd == DM_LIST_VERSIONS_CMD)
31068 return 0;
31069
31070- if ((cmd == DM_DEV_CREATE_CMD)) {
31071+ if (cmd == DM_DEV_CREATE_CMD) {
31072 if (!*param->name) {
31073 DMWARN("name not supplied when creating device");
31074 return -EINVAL;
31075diff -urNp linux-2.6.32.42/drivers/md/dm-raid1.c linux-2.6.32.42/drivers/md/dm-raid1.c
31076--- linux-2.6.32.42/drivers/md/dm-raid1.c 2011-03-27 14:31:47.000000000 -0400
31077+++ linux-2.6.32.42/drivers/md/dm-raid1.c 2011-05-04 17:56:28.000000000 -0400
31078@@ -41,7 +41,7 @@ enum dm_raid1_error {
31079
31080 struct mirror {
31081 struct mirror_set *ms;
31082- atomic_t error_count;
31083+ atomic_unchecked_t error_count;
31084 unsigned long error_type;
31085 struct dm_dev *dev;
31086 sector_t offset;
31087@@ -203,7 +203,7 @@ static void fail_mirror(struct mirror *m
31088 * simple way to tell if a device has encountered
31089 * errors.
31090 */
31091- atomic_inc(&m->error_count);
31092+ atomic_inc_unchecked(&m->error_count);
31093
31094 if (test_and_set_bit(error_type, &m->error_type))
31095 return;
31096@@ -225,7 +225,7 @@ static void fail_mirror(struct mirror *m
31097 }
31098
31099 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
31100- if (!atomic_read(&new->error_count)) {
31101+ if (!atomic_read_unchecked(&new->error_count)) {
31102 set_default_mirror(new);
31103 break;
31104 }
31105@@ -363,7 +363,7 @@ static struct mirror *choose_mirror(stru
31106 struct mirror *m = get_default_mirror(ms);
31107
31108 do {
31109- if (likely(!atomic_read(&m->error_count)))
31110+ if (likely(!atomic_read_unchecked(&m->error_count)))
31111 return m;
31112
31113 if (m-- == ms->mirror)
31114@@ -377,7 +377,7 @@ static int default_ok(struct mirror *m)
31115 {
31116 struct mirror *default_mirror = get_default_mirror(m->ms);
31117
31118- return !atomic_read(&default_mirror->error_count);
31119+ return !atomic_read_unchecked(&default_mirror->error_count);
31120 }
31121
31122 static int mirror_available(struct mirror_set *ms, struct bio *bio)
31123@@ -484,7 +484,7 @@ static void do_reads(struct mirror_set *
31124 */
31125 if (likely(region_in_sync(ms, region, 1)))
31126 m = choose_mirror(ms, bio->bi_sector);
31127- else if (m && atomic_read(&m->error_count))
31128+ else if (m && atomic_read_unchecked(&m->error_count))
31129 m = NULL;
31130
31131 if (likely(m))
31132@@ -855,7 +855,7 @@ static int get_mirror(struct mirror_set
31133 }
31134
31135 ms->mirror[mirror].ms = ms;
31136- atomic_set(&(ms->mirror[mirror].error_count), 0);
31137+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
31138 ms->mirror[mirror].error_type = 0;
31139 ms->mirror[mirror].offset = offset;
31140
31141@@ -1241,7 +1241,7 @@ static void mirror_resume(struct dm_targ
31142 */
31143 static char device_status_char(struct mirror *m)
31144 {
31145- if (!atomic_read(&(m->error_count)))
31146+ if (!atomic_read_unchecked(&(m->error_count)))
31147 return 'A';
31148
31149 return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
31150diff -urNp linux-2.6.32.42/drivers/md/dm-stripe.c linux-2.6.32.42/drivers/md/dm-stripe.c
31151--- linux-2.6.32.42/drivers/md/dm-stripe.c 2011-03-27 14:31:47.000000000 -0400
31152+++ linux-2.6.32.42/drivers/md/dm-stripe.c 2011-05-04 17:56:28.000000000 -0400
31153@@ -20,7 +20,7 @@ struct stripe {
31154 struct dm_dev *dev;
31155 sector_t physical_start;
31156
31157- atomic_t error_count;
31158+ atomic_unchecked_t error_count;
31159 };
31160
31161 struct stripe_c {
31162@@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *
31163 kfree(sc);
31164 return r;
31165 }
31166- atomic_set(&(sc->stripe[i].error_count), 0);
31167+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
31168 }
31169
31170 ti->private = sc;
31171@@ -257,7 +257,7 @@ static int stripe_status(struct dm_targe
31172 DMEMIT("%d ", sc->stripes);
31173 for (i = 0; i < sc->stripes; i++) {
31174 DMEMIT("%s ", sc->stripe[i].dev->name);
31175- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
31176+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
31177 'D' : 'A';
31178 }
31179 buffer[i] = '\0';
31180@@ -304,8 +304,8 @@ static int stripe_end_io(struct dm_targe
31181 */
31182 for (i = 0; i < sc->stripes; i++)
31183 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
31184- atomic_inc(&(sc->stripe[i].error_count));
31185- if (atomic_read(&(sc->stripe[i].error_count)) <
31186+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
31187+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
31188 DM_IO_ERROR_THRESHOLD)
31189 queue_work(kstriped, &sc->kstriped_ws);
31190 }
31191diff -urNp linux-2.6.32.42/drivers/md/dm-sysfs.c linux-2.6.32.42/drivers/md/dm-sysfs.c
31192--- linux-2.6.32.42/drivers/md/dm-sysfs.c 2011-03-27 14:31:47.000000000 -0400
31193+++ linux-2.6.32.42/drivers/md/dm-sysfs.c 2011-04-17 15:56:46.000000000 -0400
31194@@ -75,7 +75,7 @@ static struct attribute *dm_attrs[] = {
31195 NULL,
31196 };
31197
31198-static struct sysfs_ops dm_sysfs_ops = {
31199+static const struct sysfs_ops dm_sysfs_ops = {
31200 .show = dm_attr_show,
31201 };
31202
31203diff -urNp linux-2.6.32.42/drivers/md/dm-table.c linux-2.6.32.42/drivers/md/dm-table.c
31204--- linux-2.6.32.42/drivers/md/dm-table.c 2011-06-25 12:55:34.000000000 -0400
31205+++ linux-2.6.32.42/drivers/md/dm-table.c 2011-06-25 12:56:37.000000000 -0400
31206@@ -376,7 +376,7 @@ static int device_area_is_invalid(struct
31207 if (!dev_size)
31208 return 0;
31209
31210- if ((start >= dev_size) || (start + len > dev_size)) {
31211+ if ((start >= dev_size) || (len > dev_size - start)) {
31212 DMWARN("%s: %s too small for target: "
31213 "start=%llu, len=%llu, dev_size=%llu",
31214 dm_device_name(ti->table->md), bdevname(bdev, b),
31215diff -urNp linux-2.6.32.42/drivers/md/md.c linux-2.6.32.42/drivers/md/md.c
31216--- linux-2.6.32.42/drivers/md/md.c 2011-06-25 12:55:34.000000000 -0400
31217+++ linux-2.6.32.42/drivers/md/md.c 2011-06-25 12:56:37.000000000 -0400
31218@@ -153,10 +153,10 @@ static int start_readonly;
31219 * start build, activate spare
31220 */
31221 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
31222-static atomic_t md_event_count;
31223+static atomic_unchecked_t md_event_count;
31224 void md_new_event(mddev_t *mddev)
31225 {
31226- atomic_inc(&md_event_count);
31227+ atomic_inc_unchecked(&md_event_count);
31228 wake_up(&md_event_waiters);
31229 }
31230 EXPORT_SYMBOL_GPL(md_new_event);
31231@@ -166,7 +166,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
31232 */
31233 static void md_new_event_inintr(mddev_t *mddev)
31234 {
31235- atomic_inc(&md_event_count);
31236+ atomic_inc_unchecked(&md_event_count);
31237 wake_up(&md_event_waiters);
31238 }
31239
31240@@ -1218,7 +1218,7 @@ static int super_1_load(mdk_rdev_t *rdev
31241
31242 rdev->preferred_minor = 0xffff;
31243 rdev->data_offset = le64_to_cpu(sb->data_offset);
31244- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
31245+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
31246
31247 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
31248 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
31249@@ -1392,7 +1392,7 @@ static void super_1_sync(mddev_t *mddev,
31250 else
31251 sb->resync_offset = cpu_to_le64(0);
31252
31253- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
31254+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
31255
31256 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
31257 sb->size = cpu_to_le64(mddev->dev_sectors);
31258@@ -2214,7 +2214,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
31259 static ssize_t
31260 errors_show(mdk_rdev_t *rdev, char *page)
31261 {
31262- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
31263+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
31264 }
31265
31266 static ssize_t
31267@@ -2223,7 +2223,7 @@ errors_store(mdk_rdev_t *rdev, const cha
31268 char *e;
31269 unsigned long n = simple_strtoul(buf, &e, 10);
31270 if (*buf && (*e == 0 || *e == '\n')) {
31271- atomic_set(&rdev->corrected_errors, n);
31272+ atomic_set_unchecked(&rdev->corrected_errors, n);
31273 return len;
31274 }
31275 return -EINVAL;
31276@@ -2517,7 +2517,7 @@ static void rdev_free(struct kobject *ko
31277 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
31278 kfree(rdev);
31279 }
31280-static struct sysfs_ops rdev_sysfs_ops = {
31281+static const struct sysfs_ops rdev_sysfs_ops = {
31282 .show = rdev_attr_show,
31283 .store = rdev_attr_store,
31284 };
31285@@ -2566,8 +2566,8 @@ static mdk_rdev_t *md_import_device(dev_
31286 rdev->data_offset = 0;
31287 rdev->sb_events = 0;
31288 atomic_set(&rdev->nr_pending, 0);
31289- atomic_set(&rdev->read_errors, 0);
31290- atomic_set(&rdev->corrected_errors, 0);
31291+ atomic_set_unchecked(&rdev->read_errors, 0);
31292+ atomic_set_unchecked(&rdev->corrected_errors, 0);
31293
31294 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
31295 if (!size) {
31296@@ -3887,7 +3887,7 @@ static void md_free(struct kobject *ko)
31297 kfree(mddev);
31298 }
31299
31300-static struct sysfs_ops md_sysfs_ops = {
31301+static const struct sysfs_ops md_sysfs_ops = {
31302 .show = md_attr_show,
31303 .store = md_attr_store,
31304 };
31305@@ -4474,7 +4474,8 @@ out:
31306 err = 0;
31307 blk_integrity_unregister(disk);
31308 md_new_event(mddev);
31309- sysfs_notify_dirent(mddev->sysfs_state);
31310+ if (mddev->sysfs_state)
31311+ sysfs_notify_dirent(mddev->sysfs_state);
31312 return err;
31313 }
31314
31315@@ -5954,7 +5955,7 @@ static int md_seq_show(struct seq_file *
31316
31317 spin_unlock(&pers_lock);
31318 seq_printf(seq, "\n");
31319- mi->event = atomic_read(&md_event_count);
31320+ mi->event = atomic_read_unchecked(&md_event_count);
31321 return 0;
31322 }
31323 if (v == (void*)2) {
31324@@ -6043,7 +6044,7 @@ static int md_seq_show(struct seq_file *
31325 chunk_kb ? "KB" : "B");
31326 if (bitmap->file) {
31327 seq_printf(seq, ", file: ");
31328- seq_path(seq, &bitmap->file->f_path, " \t\n");
31329+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
31330 }
31331
31332 seq_printf(seq, "\n");
31333@@ -6077,7 +6078,7 @@ static int md_seq_open(struct inode *ino
31334 else {
31335 struct seq_file *p = file->private_data;
31336 p->private = mi;
31337- mi->event = atomic_read(&md_event_count);
31338+ mi->event = atomic_read_unchecked(&md_event_count);
31339 }
31340 return error;
31341 }
31342@@ -6093,7 +6094,7 @@ static unsigned int mdstat_poll(struct f
31343 /* always allow read */
31344 mask = POLLIN | POLLRDNORM;
31345
31346- if (mi->event != atomic_read(&md_event_count))
31347+ if (mi->event != atomic_read_unchecked(&md_event_count))
31348 mask |= POLLERR | POLLPRI;
31349 return mask;
31350 }
31351@@ -6137,7 +6138,7 @@ static int is_mddev_idle(mddev_t *mddev,
31352 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
31353 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
31354 (int)part_stat_read(&disk->part0, sectors[1]) -
31355- atomic_read(&disk->sync_io);
31356+ atomic_read_unchecked(&disk->sync_io);
31357 /* sync IO will cause sync_io to increase before the disk_stats
31358 * as sync_io is counted when a request starts, and
31359 * disk_stats is counted when it completes.
31360diff -urNp linux-2.6.32.42/drivers/md/md.h linux-2.6.32.42/drivers/md/md.h
31361--- linux-2.6.32.42/drivers/md/md.h 2011-03-27 14:31:47.000000000 -0400
31362+++ linux-2.6.32.42/drivers/md/md.h 2011-05-04 17:56:20.000000000 -0400
31363@@ -94,10 +94,10 @@ struct mdk_rdev_s
31364 * only maintained for arrays that
31365 * support hot removal
31366 */
31367- atomic_t read_errors; /* number of consecutive read errors that
31368+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
31369 * we have tried to ignore.
31370 */
31371- atomic_t corrected_errors; /* number of corrected read errors,
31372+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
31373 * for reporting to userspace and storing
31374 * in superblock.
31375 */
31376@@ -304,7 +304,7 @@ static inline void rdev_dec_pending(mdk_
31377
31378 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
31379 {
31380- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
31381+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
31382 }
31383
31384 struct mdk_personality
31385diff -urNp linux-2.6.32.42/drivers/md/raid10.c linux-2.6.32.42/drivers/md/raid10.c
31386--- linux-2.6.32.42/drivers/md/raid10.c 2011-03-27 14:31:47.000000000 -0400
31387+++ linux-2.6.32.42/drivers/md/raid10.c 2011-05-04 17:56:28.000000000 -0400
31388@@ -1255,7 +1255,7 @@ static void end_sync_read(struct bio *bi
31389 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
31390 set_bit(R10BIO_Uptodate, &r10_bio->state);
31391 else {
31392- atomic_add(r10_bio->sectors,
31393+ atomic_add_unchecked(r10_bio->sectors,
31394 &conf->mirrors[d].rdev->corrected_errors);
31395 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
31396 md_error(r10_bio->mddev,
31397@@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf,
31398 test_bit(In_sync, &rdev->flags)) {
31399 atomic_inc(&rdev->nr_pending);
31400 rcu_read_unlock();
31401- atomic_add(s, &rdev->corrected_errors);
31402+ atomic_add_unchecked(s, &rdev->corrected_errors);
31403 if (sync_page_io(rdev->bdev,
31404 r10_bio->devs[sl].addr +
31405 sect + rdev->data_offset,
31406diff -urNp linux-2.6.32.42/drivers/md/raid1.c linux-2.6.32.42/drivers/md/raid1.c
31407--- linux-2.6.32.42/drivers/md/raid1.c 2011-03-27 14:31:47.000000000 -0400
31408+++ linux-2.6.32.42/drivers/md/raid1.c 2011-05-04 17:56:28.000000000 -0400
31409@@ -1415,7 +1415,7 @@ static void sync_request_write(mddev_t *
31410 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
31411 continue;
31412 rdev = conf->mirrors[d].rdev;
31413- atomic_add(s, &rdev->corrected_errors);
31414+ atomic_add_unchecked(s, &rdev->corrected_errors);
31415 if (sync_page_io(rdev->bdev,
31416 sect + rdev->data_offset,
31417 s<<9,
31418@@ -1564,7 +1564,7 @@ static void fix_read_error(conf_t *conf,
31419 /* Well, this device is dead */
31420 md_error(mddev, rdev);
31421 else {
31422- atomic_add(s, &rdev->corrected_errors);
31423+ atomic_add_unchecked(s, &rdev->corrected_errors);
31424 printk(KERN_INFO
31425 "raid1:%s: read error corrected "
31426 "(%d sectors at %llu on %s)\n",
31427diff -urNp linux-2.6.32.42/drivers/md/raid5.c linux-2.6.32.42/drivers/md/raid5.c
31428--- linux-2.6.32.42/drivers/md/raid5.c 2011-06-25 12:55:34.000000000 -0400
31429+++ linux-2.6.32.42/drivers/md/raid5.c 2011-06-25 12:58:39.000000000 -0400
31430@@ -482,7 +482,7 @@ static void ops_run_io(struct stripe_hea
31431 bi->bi_next = NULL;
31432 if ((rw & WRITE) &&
31433 test_bit(R5_ReWrite, &sh->dev[i].flags))
31434- atomic_add(STRIPE_SECTORS,
31435+ atomic_add_unchecked(STRIPE_SECTORS,
31436 &rdev->corrected_errors);
31437 generic_make_request(bi);
31438 } else {
31439@@ -1517,15 +1517,15 @@ static void raid5_end_read_request(struc
31440 clear_bit(R5_ReadError, &sh->dev[i].flags);
31441 clear_bit(R5_ReWrite, &sh->dev[i].flags);
31442 }
31443- if (atomic_read(&conf->disks[i].rdev->read_errors))
31444- atomic_set(&conf->disks[i].rdev->read_errors, 0);
31445+ if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
31446+ atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
31447 } else {
31448 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
31449 int retry = 0;
31450 rdev = conf->disks[i].rdev;
31451
31452 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
31453- atomic_inc(&rdev->read_errors);
31454+ atomic_inc_unchecked(&rdev->read_errors);
31455 if (conf->mddev->degraded >= conf->max_degraded)
31456 printk_rl(KERN_WARNING
31457 "raid5:%s: read error not correctable "
31458@@ -1543,7 +1543,7 @@ static void raid5_end_read_request(struc
31459 (unsigned long long)(sh->sector
31460 + rdev->data_offset),
31461 bdn);
31462- else if (atomic_read(&rdev->read_errors)
31463+ else if (atomic_read_unchecked(&rdev->read_errors)
31464 > conf->max_nr_stripes)
31465 printk(KERN_WARNING
31466 "raid5:%s: Too many read errors, failing device %s.\n",
31467@@ -1870,6 +1870,7 @@ static sector_t compute_blocknr(struct s
31468 sector_t r_sector;
31469 struct stripe_head sh2;
31470
31471+ pax_track_stack();
31472
31473 chunk_offset = sector_div(new_sector, sectors_per_chunk);
31474 stripe = new_sector;
31475diff -urNp linux-2.6.32.42/drivers/media/common/saa7146_hlp.c linux-2.6.32.42/drivers/media/common/saa7146_hlp.c
31476--- linux-2.6.32.42/drivers/media/common/saa7146_hlp.c 2011-03-27 14:31:47.000000000 -0400
31477+++ linux-2.6.32.42/drivers/media/common/saa7146_hlp.c 2011-05-16 21:46:57.000000000 -0400
31478@@ -353,6 +353,8 @@ static void calculate_clipping_registers
31479
31480 int x[32], y[32], w[32], h[32];
31481
31482+ pax_track_stack();
31483+
31484 /* clear out memory */
31485 memset(&line_list[0], 0x00, sizeof(u32)*32);
31486 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
31487diff -urNp linux-2.6.32.42/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-2.6.32.42/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
31488--- linux-2.6.32.42/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-03-27 14:31:47.000000000 -0400
31489+++ linux-2.6.32.42/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-05-16 21:46:57.000000000 -0400
31490@@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
31491 u8 buf[HOST_LINK_BUF_SIZE];
31492 int i;
31493
31494+ pax_track_stack();
31495+
31496 dprintk("%s\n", __func__);
31497
31498 /* check if we have space for a link buf in the rx_buffer */
31499@@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
31500 unsigned long timeout;
31501 int written;
31502
31503+ pax_track_stack();
31504+
31505 dprintk("%s\n", __func__);
31506
31507 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
31508diff -urNp linux-2.6.32.42/drivers/media/dvb/dvb-core/dvbdev.c linux-2.6.32.42/drivers/media/dvb/dvb-core/dvbdev.c
31509--- linux-2.6.32.42/drivers/media/dvb/dvb-core/dvbdev.c 2011-03-27 14:31:47.000000000 -0400
31510+++ linux-2.6.32.42/drivers/media/dvb/dvb-core/dvbdev.c 2011-04-17 15:56:46.000000000 -0400
31511@@ -191,6 +191,7 @@ int dvb_register_device(struct dvb_adapt
31512 const struct dvb_device *template, void *priv, int type)
31513 {
31514 struct dvb_device *dvbdev;
31515+ /* cannot be const */
31516 struct file_operations *dvbdevfops;
31517 struct device *clsdev;
31518 int minor;
31519diff -urNp linux-2.6.32.42/drivers/media/dvb/dvb-usb/dib0700_core.c linux-2.6.32.42/drivers/media/dvb/dvb-usb/dib0700_core.c
31520--- linux-2.6.32.42/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-03-27 14:31:47.000000000 -0400
31521+++ linux-2.6.32.42/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-05-16 21:46:57.000000000 -0400
31522@@ -332,6 +332,8 @@ int dib0700_download_firmware(struct usb
31523
31524 u8 buf[260];
31525
31526+ pax_track_stack();
31527+
31528 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
31529 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",hx.addr, hx.len, hx.chk);
31530
31531diff -urNp linux-2.6.32.42/drivers/media/dvb/frontends/or51211.c linux-2.6.32.42/drivers/media/dvb/frontends/or51211.c
31532--- linux-2.6.32.42/drivers/media/dvb/frontends/or51211.c 2011-03-27 14:31:47.000000000 -0400
31533+++ linux-2.6.32.42/drivers/media/dvb/frontends/or51211.c 2011-05-16 21:46:57.000000000 -0400
31534@@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
31535 u8 tudata[585];
31536 int i;
31537
31538+ pax_track_stack();
31539+
31540 dprintk("Firmware is %zd bytes\n",fw->size);
31541
31542 /* Get eprom data */
31543diff -urNp linux-2.6.32.42/drivers/media/radio/radio-cadet.c linux-2.6.32.42/drivers/media/radio/radio-cadet.c
31544--- linux-2.6.32.42/drivers/media/radio/radio-cadet.c 2011-03-27 14:31:47.000000000 -0400
31545+++ linux-2.6.32.42/drivers/media/radio/radio-cadet.c 2011-04-17 15:56:46.000000000 -0400
31546@@ -347,7 +347,7 @@ static ssize_t cadet_read(struct file *f
31547 while (i < count && dev->rdsin != dev->rdsout)
31548 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
31549
31550- if (copy_to_user(data, readbuf, i))
31551+ if (i > sizeof readbuf || copy_to_user(data, readbuf, i))
31552 return -EFAULT;
31553 return i;
31554 }
31555diff -urNp linux-2.6.32.42/drivers/media/video/cx18/cx18-driver.c linux-2.6.32.42/drivers/media/video/cx18/cx18-driver.c
31556--- linux-2.6.32.42/drivers/media/video/cx18/cx18-driver.c 2011-03-27 14:31:47.000000000 -0400
31557+++ linux-2.6.32.42/drivers/media/video/cx18/cx18-driver.c 2011-05-16 21:46:57.000000000 -0400
31558@@ -56,7 +56,7 @@ static struct pci_device_id cx18_pci_tbl
31559
31560 MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
31561
31562-static atomic_t cx18_instance = ATOMIC_INIT(0);
31563+static atomic_unchecked_t cx18_instance = ATOMIC_INIT(0);
31564
31565 /* Parameter declarations */
31566 static int cardtype[CX18_MAX_CARDS];
31567@@ -288,6 +288,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
31568 struct i2c_client c;
31569 u8 eedata[256];
31570
31571+ pax_track_stack();
31572+
31573 memset(&c, 0, sizeof(c));
31574 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
31575 c.adapter = &cx->i2c_adap[0];
31576@@ -800,7 +802,7 @@ static int __devinit cx18_probe(struct p
31577 struct cx18 *cx;
31578
31579 /* FIXME - module parameter arrays constrain max instances */
31580- i = atomic_inc_return(&cx18_instance) - 1;
31581+ i = atomic_inc_return_unchecked(&cx18_instance) - 1;
31582 if (i >= CX18_MAX_CARDS) {
31583 printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
31584 "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
31585diff -urNp linux-2.6.32.42/drivers/media/video/ivtv/ivtv-driver.c linux-2.6.32.42/drivers/media/video/ivtv/ivtv-driver.c
31586--- linux-2.6.32.42/drivers/media/video/ivtv/ivtv-driver.c 2011-03-27 14:31:47.000000000 -0400
31587+++ linux-2.6.32.42/drivers/media/video/ivtv/ivtv-driver.c 2011-05-04 17:56:28.000000000 -0400
31588@@ -79,7 +79,7 @@ static struct pci_device_id ivtv_pci_tbl
31589 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
31590
31591 /* ivtv instance counter */
31592-static atomic_t ivtv_instance = ATOMIC_INIT(0);
31593+static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
31594
31595 /* Parameter declarations */
31596 static int cardtype[IVTV_MAX_CARDS];
31597diff -urNp linux-2.6.32.42/drivers/media/video/omap24xxcam.c linux-2.6.32.42/drivers/media/video/omap24xxcam.c
31598--- linux-2.6.32.42/drivers/media/video/omap24xxcam.c 2011-03-27 14:31:47.000000000 -0400
31599+++ linux-2.6.32.42/drivers/media/video/omap24xxcam.c 2011-05-04 17:56:28.000000000 -0400
31600@@ -401,7 +401,7 @@ static void omap24xxcam_vbq_complete(str
31601 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
31602
31603 do_gettimeofday(&vb->ts);
31604- vb->field_count = atomic_add_return(2, &fh->field_count);
31605+ vb->field_count = atomic_add_return_unchecked(2, &fh->field_count);
31606 if (csr & csr_error) {
31607 vb->state = VIDEOBUF_ERROR;
31608 if (!atomic_read(&fh->cam->in_reset)) {
31609diff -urNp linux-2.6.32.42/drivers/media/video/omap24xxcam.h linux-2.6.32.42/drivers/media/video/omap24xxcam.h
31610--- linux-2.6.32.42/drivers/media/video/omap24xxcam.h 2011-03-27 14:31:47.000000000 -0400
31611+++ linux-2.6.32.42/drivers/media/video/omap24xxcam.h 2011-05-04 17:56:28.000000000 -0400
31612@@ -533,7 +533,7 @@ struct omap24xxcam_fh {
31613 spinlock_t vbq_lock; /* spinlock for the videobuf queue */
31614 struct videobuf_queue vbq;
31615 struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
31616- atomic_t field_count; /* field counter for videobuf_buffer */
31617+ atomic_unchecked_t field_count; /* field counter for videobuf_buffer */
31618 /* accessing cam here doesn't need serialisation: it's constant */
31619 struct omap24xxcam_device *cam;
31620 };
31621diff -urNp linux-2.6.32.42/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-2.6.32.42/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
31622--- linux-2.6.32.42/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-03-27 14:31:47.000000000 -0400
31623+++ linux-2.6.32.42/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-05-16 21:46:57.000000000 -0400
31624@@ -119,6 +119,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw
31625 u8 *eeprom;
31626 struct tveeprom tvdata;
31627
31628+ pax_track_stack();
31629+
31630 memset(&tvdata,0,sizeof(tvdata));
31631
31632 eeprom = pvr2_eeprom_fetch(hdw);
31633diff -urNp linux-2.6.32.42/drivers/media/video/saa7134/saa6752hs.c linux-2.6.32.42/drivers/media/video/saa7134/saa6752hs.c
31634--- linux-2.6.32.42/drivers/media/video/saa7134/saa6752hs.c 2011-03-27 14:31:47.000000000 -0400
31635+++ linux-2.6.32.42/drivers/media/video/saa7134/saa6752hs.c 2011-05-16 21:46:57.000000000 -0400
31636@@ -683,6 +683,8 @@ static int saa6752hs_init(struct v4l2_su
31637 unsigned char localPAT[256];
31638 unsigned char localPMT[256];
31639
31640+ pax_track_stack();
31641+
31642 /* Set video format - must be done first as it resets other settings */
31643 set_reg8(client, 0x41, h->video_format);
31644
31645diff -urNp linux-2.6.32.42/drivers/media/video/saa7164/saa7164-cmd.c linux-2.6.32.42/drivers/media/video/saa7164/saa7164-cmd.c
31646--- linux-2.6.32.42/drivers/media/video/saa7164/saa7164-cmd.c 2011-03-27 14:31:47.000000000 -0400
31647+++ linux-2.6.32.42/drivers/media/video/saa7164/saa7164-cmd.c 2011-05-16 21:46:57.000000000 -0400
31648@@ -87,6 +87,8 @@ int saa7164_irq_dequeue(struct saa7164_d
31649 wait_queue_head_t *q = 0;
31650 dprintk(DBGLVL_CMD, "%s()\n", __func__);
31651
31652+ pax_track_stack();
31653+
31654 /* While any outstand message on the bus exists... */
31655 do {
31656
31657@@ -126,6 +128,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
31658 u8 tmp[512];
31659 dprintk(DBGLVL_CMD, "%s()\n", __func__);
31660
31661+ pax_track_stack();
31662+
31663 while (loop) {
31664
31665 tmComResInfo_t tRsp = { 0, 0, 0, 0, 0, 0 };
31666diff -urNp linux-2.6.32.42/drivers/media/video/usbvideo/konicawc.c linux-2.6.32.42/drivers/media/video/usbvideo/konicawc.c
31667--- linux-2.6.32.42/drivers/media/video/usbvideo/konicawc.c 2011-03-27 14:31:47.000000000 -0400
31668+++ linux-2.6.32.42/drivers/media/video/usbvideo/konicawc.c 2011-04-17 15:56:46.000000000 -0400
31669@@ -225,7 +225,7 @@ static void konicawc_register_input(stru
31670 int error;
31671
31672 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
31673- strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
31674+ strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
31675
31676 cam->input = input_dev = input_allocate_device();
31677 if (!input_dev) {
31678diff -urNp linux-2.6.32.42/drivers/media/video/usbvideo/quickcam_messenger.c linux-2.6.32.42/drivers/media/video/usbvideo/quickcam_messenger.c
31679--- linux-2.6.32.42/drivers/media/video/usbvideo/quickcam_messenger.c 2011-03-27 14:31:47.000000000 -0400
31680+++ linux-2.6.32.42/drivers/media/video/usbvideo/quickcam_messenger.c 2011-04-17 15:56:46.000000000 -0400
31681@@ -89,7 +89,7 @@ static void qcm_register_input(struct qc
31682 int error;
31683
31684 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
31685- strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
31686+ strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
31687
31688 cam->input = input_dev = input_allocate_device();
31689 if (!input_dev) {
31690diff -urNp linux-2.6.32.42/drivers/media/video/usbvision/usbvision-core.c linux-2.6.32.42/drivers/media/video/usbvision/usbvision-core.c
31691--- linux-2.6.32.42/drivers/media/video/usbvision/usbvision-core.c 2011-03-27 14:31:47.000000000 -0400
31692+++ linux-2.6.32.42/drivers/media/video/usbvision/usbvision-core.c 2011-05-16 21:46:57.000000000 -0400
31693@@ -820,6 +820,8 @@ static enum ParseState usbvision_parse_c
31694 unsigned char rv, gv, bv;
31695 static unsigned char *Y, *U, *V;
31696
31697+ pax_track_stack();
31698+
31699 frame = usbvision->curFrame;
31700 imageSize = frame->frmwidth * frame->frmheight;
31701 if ( (frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
31702diff -urNp linux-2.6.32.42/drivers/media/video/v4l2-device.c linux-2.6.32.42/drivers/media/video/v4l2-device.c
31703--- linux-2.6.32.42/drivers/media/video/v4l2-device.c 2011-03-27 14:31:47.000000000 -0400
31704+++ linux-2.6.32.42/drivers/media/video/v4l2-device.c 2011-05-04 17:56:28.000000000 -0400
31705@@ -50,9 +50,9 @@ int v4l2_device_register(struct device *
31706 EXPORT_SYMBOL_GPL(v4l2_device_register);
31707
31708 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
31709- atomic_t *instance)
31710+ atomic_unchecked_t *instance)
31711 {
31712- int num = atomic_inc_return(instance) - 1;
31713+ int num = atomic_inc_return_unchecked(instance) - 1;
31714 int len = strlen(basename);
31715
31716 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
31717diff -urNp linux-2.6.32.42/drivers/media/video/videobuf-dma-sg.c linux-2.6.32.42/drivers/media/video/videobuf-dma-sg.c
31718--- linux-2.6.32.42/drivers/media/video/videobuf-dma-sg.c 2011-03-27 14:31:47.000000000 -0400
31719+++ linux-2.6.32.42/drivers/media/video/videobuf-dma-sg.c 2011-05-16 21:46:57.000000000 -0400
31720@@ -693,6 +693,8 @@ void *videobuf_sg_alloc(size_t size)
31721 {
31722 struct videobuf_queue q;
31723
31724+ pax_track_stack();
31725+
31726 /* Required to make generic handler to call __videobuf_alloc */
31727 q.int_ops = &sg_ops;
31728
31729diff -urNp linux-2.6.32.42/drivers/message/fusion/mptbase.c linux-2.6.32.42/drivers/message/fusion/mptbase.c
31730--- linux-2.6.32.42/drivers/message/fusion/mptbase.c 2011-03-27 14:31:47.000000000 -0400
31731+++ linux-2.6.32.42/drivers/message/fusion/mptbase.c 2011-04-17 15:56:46.000000000 -0400
31732@@ -6709,8 +6709,14 @@ procmpt_iocinfo_read(char *buf, char **s
31733 len += sprintf(buf+len, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
31734 len += sprintf(buf+len, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
31735
31736+#ifdef CONFIG_GRKERNSEC_HIDESYM
31737+ len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
31738+ NULL, NULL);
31739+#else
31740 len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
31741 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
31742+#endif
31743+
31744 /*
31745 * Rounding UP to nearest 4-kB boundary here...
31746 */
31747diff -urNp linux-2.6.32.42/drivers/message/fusion/mptsas.c linux-2.6.32.42/drivers/message/fusion/mptsas.c
31748--- linux-2.6.32.42/drivers/message/fusion/mptsas.c 2011-03-27 14:31:47.000000000 -0400
31749+++ linux-2.6.32.42/drivers/message/fusion/mptsas.c 2011-04-17 15:56:46.000000000 -0400
31750@@ -436,6 +436,23 @@ mptsas_is_end_device(struct mptsas_devin
31751 return 0;
31752 }
31753
31754+static inline void
31755+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
31756+{
31757+ if (phy_info->port_details) {
31758+ phy_info->port_details->rphy = rphy;
31759+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
31760+ ioc->name, rphy));
31761+ }
31762+
31763+ if (rphy) {
31764+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
31765+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
31766+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
31767+ ioc->name, rphy, rphy->dev.release));
31768+ }
31769+}
31770+
31771 /* no mutex */
31772 static void
31773 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
31774@@ -474,23 +491,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
31775 return NULL;
31776 }
31777
31778-static inline void
31779-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
31780-{
31781- if (phy_info->port_details) {
31782- phy_info->port_details->rphy = rphy;
31783- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
31784- ioc->name, rphy));
31785- }
31786-
31787- if (rphy) {
31788- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
31789- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
31790- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
31791- ioc->name, rphy, rphy->dev.release));
31792- }
31793-}
31794-
31795 static inline struct sas_port *
31796 mptsas_get_port(struct mptsas_phyinfo *phy_info)
31797 {
31798diff -urNp linux-2.6.32.42/drivers/message/fusion/mptscsih.c linux-2.6.32.42/drivers/message/fusion/mptscsih.c
31799--- linux-2.6.32.42/drivers/message/fusion/mptscsih.c 2011-03-27 14:31:47.000000000 -0400
31800+++ linux-2.6.32.42/drivers/message/fusion/mptscsih.c 2011-04-17 15:56:46.000000000 -0400
31801@@ -1248,15 +1248,16 @@ mptscsih_info(struct Scsi_Host *SChost)
31802
31803 h = shost_priv(SChost);
31804
31805- if (h) {
31806- if (h->info_kbuf == NULL)
31807- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
31808- return h->info_kbuf;
31809- h->info_kbuf[0] = '\0';
31810+ if (!h)
31811+ return NULL;
31812
31813- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
31814- h->info_kbuf[size-1] = '\0';
31815- }
31816+ if (h->info_kbuf == NULL)
31817+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
31818+ return h->info_kbuf;
31819+ h->info_kbuf[0] = '\0';
31820+
31821+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
31822+ h->info_kbuf[size-1] = '\0';
31823
31824 return h->info_kbuf;
31825 }
31826diff -urNp linux-2.6.32.42/drivers/message/i2o/i2o_config.c linux-2.6.32.42/drivers/message/i2o/i2o_config.c
31827--- linux-2.6.32.42/drivers/message/i2o/i2o_config.c 2011-03-27 14:31:47.000000000 -0400
31828+++ linux-2.6.32.42/drivers/message/i2o/i2o_config.c 2011-05-16 21:46:57.000000000 -0400
31829@@ -787,6 +787,8 @@ static int i2o_cfg_passthru(unsigned lon
31830 struct i2o_message *msg;
31831 unsigned int iop;
31832
31833+ pax_track_stack();
31834+
31835 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
31836 return -EFAULT;
31837
31838diff -urNp linux-2.6.32.42/drivers/message/i2o/i2o_proc.c linux-2.6.32.42/drivers/message/i2o/i2o_proc.c
31839--- linux-2.6.32.42/drivers/message/i2o/i2o_proc.c 2011-03-27 14:31:47.000000000 -0400
31840+++ linux-2.6.32.42/drivers/message/i2o/i2o_proc.c 2011-04-17 15:56:46.000000000 -0400
31841@@ -259,13 +259,6 @@ static char *scsi_devices[] = {
31842 "Array Controller Device"
31843 };
31844
31845-static char *chtostr(u8 * chars, int n)
31846-{
31847- char tmp[256];
31848- tmp[0] = 0;
31849- return strncat(tmp, (char *)chars, n);
31850-}
31851-
31852 static int i2o_report_query_status(struct seq_file *seq, int block_status,
31853 char *group)
31854 {
31855@@ -842,8 +835,7 @@ static int i2o_seq_show_ddm_table(struct
31856
31857 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
31858 seq_printf(seq, "%-#8x", ddm_table.module_id);
31859- seq_printf(seq, "%-29s",
31860- chtostr(ddm_table.module_name_version, 28));
31861+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
31862 seq_printf(seq, "%9d ", ddm_table.data_size);
31863 seq_printf(seq, "%8d", ddm_table.code_size);
31864
31865@@ -944,8 +936,8 @@ static int i2o_seq_show_drivers_stored(s
31866
31867 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
31868 seq_printf(seq, "%-#8x", dst->module_id);
31869- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
31870- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
31871+ seq_printf(seq, "%-.28s", dst->module_name_version);
31872+ seq_printf(seq, "%-.8s", dst->date);
31873 seq_printf(seq, "%8d ", dst->module_size);
31874 seq_printf(seq, "%8d ", dst->mpb_size);
31875 seq_printf(seq, "0x%04x", dst->module_flags);
31876@@ -1276,14 +1268,10 @@ static int i2o_seq_show_dev_identity(str
31877 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
31878 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
31879 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
31880- seq_printf(seq, "Vendor info : %s\n",
31881- chtostr((u8 *) (work32 + 2), 16));
31882- seq_printf(seq, "Product info : %s\n",
31883- chtostr((u8 *) (work32 + 6), 16));
31884- seq_printf(seq, "Description : %s\n",
31885- chtostr((u8 *) (work32 + 10), 16));
31886- seq_printf(seq, "Product rev. : %s\n",
31887- chtostr((u8 *) (work32 + 14), 8));
31888+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
31889+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
31890+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
31891+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
31892
31893 seq_printf(seq, "Serial number : ");
31894 print_serial_number(seq, (u8 *) (work32 + 16),
31895@@ -1328,10 +1316,8 @@ static int i2o_seq_show_ddm_identity(str
31896 }
31897
31898 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
31899- seq_printf(seq, "Module name : %s\n",
31900- chtostr(result.module_name, 24));
31901- seq_printf(seq, "Module revision : %s\n",
31902- chtostr(result.module_rev, 8));
31903+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
31904+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
31905
31906 seq_printf(seq, "Serial number : ");
31907 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
31908@@ -1362,14 +1348,10 @@ static int i2o_seq_show_uinfo(struct seq
31909 return 0;
31910 }
31911
31912- seq_printf(seq, "Device name : %s\n",
31913- chtostr(result.device_name, 64));
31914- seq_printf(seq, "Service name : %s\n",
31915- chtostr(result.service_name, 64));
31916- seq_printf(seq, "Physical name : %s\n",
31917- chtostr(result.physical_location, 64));
31918- seq_printf(seq, "Instance number : %s\n",
31919- chtostr(result.instance_number, 4));
31920+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
31921+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
31922+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
31923+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
31924
31925 return 0;
31926 }
31927diff -urNp linux-2.6.32.42/drivers/message/i2o/iop.c linux-2.6.32.42/drivers/message/i2o/iop.c
31928--- linux-2.6.32.42/drivers/message/i2o/iop.c 2011-03-27 14:31:47.000000000 -0400
31929+++ linux-2.6.32.42/drivers/message/i2o/iop.c 2011-05-04 17:56:28.000000000 -0400
31930@@ -110,10 +110,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
31931
31932 spin_lock_irqsave(&c->context_list_lock, flags);
31933
31934- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
31935- atomic_inc(&c->context_list_counter);
31936+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
31937+ atomic_inc_unchecked(&c->context_list_counter);
31938
31939- entry->context = atomic_read(&c->context_list_counter);
31940+ entry->context = atomic_read_unchecked(&c->context_list_counter);
31941
31942 list_add(&entry->list, &c->context_list);
31943
31944@@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(voi
31945
31946 #if BITS_PER_LONG == 64
31947 spin_lock_init(&c->context_list_lock);
31948- atomic_set(&c->context_list_counter, 0);
31949+ atomic_set_unchecked(&c->context_list_counter, 0);
31950 INIT_LIST_HEAD(&c->context_list);
31951 #endif
31952
31953diff -urNp linux-2.6.32.42/drivers/mfd/wm8350-i2c.c linux-2.6.32.42/drivers/mfd/wm8350-i2c.c
31954--- linux-2.6.32.42/drivers/mfd/wm8350-i2c.c 2011-03-27 14:31:47.000000000 -0400
31955+++ linux-2.6.32.42/drivers/mfd/wm8350-i2c.c 2011-05-16 21:46:57.000000000 -0400
31956@@ -43,6 +43,8 @@ static int wm8350_i2c_write_device(struc
31957 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
31958 int ret;
31959
31960+ pax_track_stack();
31961+
31962 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
31963 return -EINVAL;
31964
31965diff -urNp linux-2.6.32.42/drivers/misc/kgdbts.c linux-2.6.32.42/drivers/misc/kgdbts.c
31966--- linux-2.6.32.42/drivers/misc/kgdbts.c 2011-03-27 14:31:47.000000000 -0400
31967+++ linux-2.6.32.42/drivers/misc/kgdbts.c 2011-04-17 15:56:46.000000000 -0400
31968@@ -118,7 +118,7 @@
31969 } while (0)
31970 #define MAX_CONFIG_LEN 40
31971
31972-static struct kgdb_io kgdbts_io_ops;
31973+static const struct kgdb_io kgdbts_io_ops;
31974 static char get_buf[BUFMAX];
31975 static int get_buf_cnt;
31976 static char put_buf[BUFMAX];
31977@@ -1102,7 +1102,7 @@ static void kgdbts_post_exp_handler(void
31978 module_put(THIS_MODULE);
31979 }
31980
31981-static struct kgdb_io kgdbts_io_ops = {
31982+static const struct kgdb_io kgdbts_io_ops = {
31983 .name = "kgdbts",
31984 .read_char = kgdbts_get_char,
31985 .write_char = kgdbts_put_char,
31986diff -urNp linux-2.6.32.42/drivers/misc/sgi-gru/gruhandles.c linux-2.6.32.42/drivers/misc/sgi-gru/gruhandles.c
31987--- linux-2.6.32.42/drivers/misc/sgi-gru/gruhandles.c 2011-03-27 14:31:47.000000000 -0400
31988+++ linux-2.6.32.42/drivers/misc/sgi-gru/gruhandles.c 2011-04-17 15:56:46.000000000 -0400
31989@@ -39,8 +39,8 @@ struct mcs_op_statistic mcs_op_statistic
31990
31991 static void update_mcs_stats(enum mcs_op op, unsigned long clks)
31992 {
31993- atomic_long_inc(&mcs_op_statistics[op].count);
31994- atomic_long_add(clks, &mcs_op_statistics[op].total);
31995+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
31996+ atomic_long_add_unchecked(clks, &mcs_op_statistics[op].total);
31997 if (mcs_op_statistics[op].max < clks)
31998 mcs_op_statistics[op].max = clks;
31999 }
32000diff -urNp linux-2.6.32.42/drivers/misc/sgi-gru/gruprocfs.c linux-2.6.32.42/drivers/misc/sgi-gru/gruprocfs.c
32001--- linux-2.6.32.42/drivers/misc/sgi-gru/gruprocfs.c 2011-03-27 14:31:47.000000000 -0400
32002+++ linux-2.6.32.42/drivers/misc/sgi-gru/gruprocfs.c 2011-04-17 15:56:46.000000000 -0400
32003@@ -32,9 +32,9 @@
32004
32005 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
32006
32007-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
32008+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
32009 {
32010- unsigned long val = atomic_long_read(v);
32011+ unsigned long val = atomic_long_read_unchecked(v);
32012
32013 if (val)
32014 seq_printf(s, "%16lu %s\n", val, id);
32015@@ -136,8 +136,8 @@ static int mcs_statistics_show(struct se
32016 "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"};
32017
32018 for (op = 0; op < mcsop_last; op++) {
32019- count = atomic_long_read(&mcs_op_statistics[op].count);
32020- total = atomic_long_read(&mcs_op_statistics[op].total);
32021+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
32022+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
32023 max = mcs_op_statistics[op].max;
32024 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
32025 count ? total / count : 0, max);
32026diff -urNp linux-2.6.32.42/drivers/misc/sgi-gru/grutables.h linux-2.6.32.42/drivers/misc/sgi-gru/grutables.h
32027--- linux-2.6.32.42/drivers/misc/sgi-gru/grutables.h 2011-03-27 14:31:47.000000000 -0400
32028+++ linux-2.6.32.42/drivers/misc/sgi-gru/grutables.h 2011-04-17 15:56:46.000000000 -0400
32029@@ -167,84 +167,84 @@ extern unsigned int gru_max_gids;
32030 * GRU statistics.
32031 */
32032 struct gru_stats_s {
32033- atomic_long_t vdata_alloc;
32034- atomic_long_t vdata_free;
32035- atomic_long_t gts_alloc;
32036- atomic_long_t gts_free;
32037- atomic_long_t vdata_double_alloc;
32038- atomic_long_t gts_double_allocate;
32039- atomic_long_t assign_context;
32040- atomic_long_t assign_context_failed;
32041- atomic_long_t free_context;
32042- atomic_long_t load_user_context;
32043- atomic_long_t load_kernel_context;
32044- atomic_long_t lock_kernel_context;
32045- atomic_long_t unlock_kernel_context;
32046- atomic_long_t steal_user_context;
32047- atomic_long_t steal_kernel_context;
32048- atomic_long_t steal_context_failed;
32049- atomic_long_t nopfn;
32050- atomic_long_t break_cow;
32051- atomic_long_t asid_new;
32052- atomic_long_t asid_next;
32053- atomic_long_t asid_wrap;
32054- atomic_long_t asid_reuse;
32055- atomic_long_t intr;
32056- atomic_long_t intr_mm_lock_failed;
32057- atomic_long_t call_os;
32058- atomic_long_t call_os_offnode_reference;
32059- atomic_long_t call_os_check_for_bug;
32060- atomic_long_t call_os_wait_queue;
32061- atomic_long_t user_flush_tlb;
32062- atomic_long_t user_unload_context;
32063- atomic_long_t user_exception;
32064- atomic_long_t set_context_option;
32065- atomic_long_t migrate_check;
32066- atomic_long_t migrated_retarget;
32067- atomic_long_t migrated_unload;
32068- atomic_long_t migrated_unload_delay;
32069- atomic_long_t migrated_nopfn_retarget;
32070- atomic_long_t migrated_nopfn_unload;
32071- atomic_long_t tlb_dropin;
32072- atomic_long_t tlb_dropin_fail_no_asid;
32073- atomic_long_t tlb_dropin_fail_upm;
32074- atomic_long_t tlb_dropin_fail_invalid;
32075- atomic_long_t tlb_dropin_fail_range_active;
32076- atomic_long_t tlb_dropin_fail_idle;
32077- atomic_long_t tlb_dropin_fail_fmm;
32078- atomic_long_t tlb_dropin_fail_no_exception;
32079- atomic_long_t tlb_dropin_fail_no_exception_war;
32080- atomic_long_t tfh_stale_on_fault;
32081- atomic_long_t mmu_invalidate_range;
32082- atomic_long_t mmu_invalidate_page;
32083- atomic_long_t mmu_clear_flush_young;
32084- atomic_long_t flush_tlb;
32085- atomic_long_t flush_tlb_gru;
32086- atomic_long_t flush_tlb_gru_tgh;
32087- atomic_long_t flush_tlb_gru_zero_asid;
32088-
32089- atomic_long_t copy_gpa;
32090-
32091- atomic_long_t mesq_receive;
32092- atomic_long_t mesq_receive_none;
32093- atomic_long_t mesq_send;
32094- atomic_long_t mesq_send_failed;
32095- atomic_long_t mesq_noop;
32096- atomic_long_t mesq_send_unexpected_error;
32097- atomic_long_t mesq_send_lb_overflow;
32098- atomic_long_t mesq_send_qlimit_reached;
32099- atomic_long_t mesq_send_amo_nacked;
32100- atomic_long_t mesq_send_put_nacked;
32101- atomic_long_t mesq_qf_not_full;
32102- atomic_long_t mesq_qf_locked;
32103- atomic_long_t mesq_qf_noop_not_full;
32104- atomic_long_t mesq_qf_switch_head_failed;
32105- atomic_long_t mesq_qf_unexpected_error;
32106- atomic_long_t mesq_noop_unexpected_error;
32107- atomic_long_t mesq_noop_lb_overflow;
32108- atomic_long_t mesq_noop_qlimit_reached;
32109- atomic_long_t mesq_noop_amo_nacked;
32110- atomic_long_t mesq_noop_put_nacked;
32111+ atomic_long_unchecked_t vdata_alloc;
32112+ atomic_long_unchecked_t vdata_free;
32113+ atomic_long_unchecked_t gts_alloc;
32114+ atomic_long_unchecked_t gts_free;
32115+ atomic_long_unchecked_t vdata_double_alloc;
32116+ atomic_long_unchecked_t gts_double_allocate;
32117+ atomic_long_unchecked_t assign_context;
32118+ atomic_long_unchecked_t assign_context_failed;
32119+ atomic_long_unchecked_t free_context;
32120+ atomic_long_unchecked_t load_user_context;
32121+ atomic_long_unchecked_t load_kernel_context;
32122+ atomic_long_unchecked_t lock_kernel_context;
32123+ atomic_long_unchecked_t unlock_kernel_context;
32124+ atomic_long_unchecked_t steal_user_context;
32125+ atomic_long_unchecked_t steal_kernel_context;
32126+ atomic_long_unchecked_t steal_context_failed;
32127+ atomic_long_unchecked_t nopfn;
32128+ atomic_long_unchecked_t break_cow;
32129+ atomic_long_unchecked_t asid_new;
32130+ atomic_long_unchecked_t asid_next;
32131+ atomic_long_unchecked_t asid_wrap;
32132+ atomic_long_unchecked_t asid_reuse;
32133+ atomic_long_unchecked_t intr;
32134+ atomic_long_unchecked_t intr_mm_lock_failed;
32135+ atomic_long_unchecked_t call_os;
32136+ atomic_long_unchecked_t call_os_offnode_reference;
32137+ atomic_long_unchecked_t call_os_check_for_bug;
32138+ atomic_long_unchecked_t call_os_wait_queue;
32139+ atomic_long_unchecked_t user_flush_tlb;
32140+ atomic_long_unchecked_t user_unload_context;
32141+ atomic_long_unchecked_t user_exception;
32142+ atomic_long_unchecked_t set_context_option;
32143+ atomic_long_unchecked_t migrate_check;
32144+ atomic_long_unchecked_t migrated_retarget;
32145+ atomic_long_unchecked_t migrated_unload;
32146+ atomic_long_unchecked_t migrated_unload_delay;
32147+ atomic_long_unchecked_t migrated_nopfn_retarget;
32148+ atomic_long_unchecked_t migrated_nopfn_unload;
32149+ atomic_long_unchecked_t tlb_dropin;
32150+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
32151+ atomic_long_unchecked_t tlb_dropin_fail_upm;
32152+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
32153+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
32154+ atomic_long_unchecked_t tlb_dropin_fail_idle;
32155+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
32156+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
32157+ atomic_long_unchecked_t tlb_dropin_fail_no_exception_war;
32158+ atomic_long_unchecked_t tfh_stale_on_fault;
32159+ atomic_long_unchecked_t mmu_invalidate_range;
32160+ atomic_long_unchecked_t mmu_invalidate_page;
32161+ atomic_long_unchecked_t mmu_clear_flush_young;
32162+ atomic_long_unchecked_t flush_tlb;
32163+ atomic_long_unchecked_t flush_tlb_gru;
32164+ atomic_long_unchecked_t flush_tlb_gru_tgh;
32165+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
32166+
32167+ atomic_long_unchecked_t copy_gpa;
32168+
32169+ atomic_long_unchecked_t mesq_receive;
32170+ atomic_long_unchecked_t mesq_receive_none;
32171+ atomic_long_unchecked_t mesq_send;
32172+ atomic_long_unchecked_t mesq_send_failed;
32173+ atomic_long_unchecked_t mesq_noop;
32174+ atomic_long_unchecked_t mesq_send_unexpected_error;
32175+ atomic_long_unchecked_t mesq_send_lb_overflow;
32176+ atomic_long_unchecked_t mesq_send_qlimit_reached;
32177+ atomic_long_unchecked_t mesq_send_amo_nacked;
32178+ atomic_long_unchecked_t mesq_send_put_nacked;
32179+ atomic_long_unchecked_t mesq_qf_not_full;
32180+ atomic_long_unchecked_t mesq_qf_locked;
32181+ atomic_long_unchecked_t mesq_qf_noop_not_full;
32182+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
32183+ atomic_long_unchecked_t mesq_qf_unexpected_error;
32184+ atomic_long_unchecked_t mesq_noop_unexpected_error;
32185+ atomic_long_unchecked_t mesq_noop_lb_overflow;
32186+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
32187+ atomic_long_unchecked_t mesq_noop_amo_nacked;
32188+ atomic_long_unchecked_t mesq_noop_put_nacked;
32189
32190 };
32191
32192@@ -252,8 +252,8 @@ enum mcs_op {cchop_allocate, cchop_start
32193 cchop_deallocate, tghop_invalidate, mcsop_last};
32194
32195 struct mcs_op_statistic {
32196- atomic_long_t count;
32197- atomic_long_t total;
32198+ atomic_long_unchecked_t count;
32199+ atomic_long_unchecked_t total;
32200 unsigned long max;
32201 };
32202
32203@@ -276,7 +276,7 @@ extern struct mcs_op_statistic mcs_op_st
32204
32205 #define STAT(id) do { \
32206 if (gru_options & OPT_STATS) \
32207- atomic_long_inc(&gru_stats.id); \
32208+ atomic_long_inc_unchecked(&gru_stats.id); \
32209 } while (0)
32210
32211 #ifdef CONFIG_SGI_GRU_DEBUG
32212diff -urNp linux-2.6.32.42/drivers/mtd/chips/cfi_cmdset_0001.c linux-2.6.32.42/drivers/mtd/chips/cfi_cmdset_0001.c
32213--- linux-2.6.32.42/drivers/mtd/chips/cfi_cmdset_0001.c 2011-03-27 14:31:47.000000000 -0400
32214+++ linux-2.6.32.42/drivers/mtd/chips/cfi_cmdset_0001.c 2011-05-16 21:46:57.000000000 -0400
32215@@ -743,6 +743,8 @@ static int chip_ready (struct map_info *
32216 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
32217 unsigned long timeo = jiffies + HZ;
32218
32219+ pax_track_stack();
32220+
32221 /* Prevent setting state FL_SYNCING for chip in suspended state. */
32222 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
32223 goto sleep;
32224@@ -1642,6 +1644,8 @@ static int __xipram do_write_buffer(stru
32225 unsigned long initial_adr;
32226 int initial_len = len;
32227
32228+ pax_track_stack();
32229+
32230 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
32231 adr += chip->start;
32232 initial_adr = adr;
32233@@ -1860,6 +1864,8 @@ static int __xipram do_erase_oneblock(st
32234 int retries = 3;
32235 int ret;
32236
32237+ pax_track_stack();
32238+
32239 adr += chip->start;
32240
32241 retry:
32242diff -urNp linux-2.6.32.42/drivers/mtd/chips/cfi_cmdset_0020.c linux-2.6.32.42/drivers/mtd/chips/cfi_cmdset_0020.c
32243--- linux-2.6.32.42/drivers/mtd/chips/cfi_cmdset_0020.c 2011-03-27 14:31:47.000000000 -0400
32244+++ linux-2.6.32.42/drivers/mtd/chips/cfi_cmdset_0020.c 2011-05-16 21:46:57.000000000 -0400
32245@@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
32246 unsigned long cmd_addr;
32247 struct cfi_private *cfi = map->fldrv_priv;
32248
32249+ pax_track_stack();
32250+
32251 adr += chip->start;
32252
32253 /* Ensure cmd read/writes are aligned. */
32254@@ -428,6 +430,8 @@ static inline int do_write_buffer(struct
32255 DECLARE_WAITQUEUE(wait, current);
32256 int wbufsize, z;
32257
32258+ pax_track_stack();
32259+
32260 /* M58LW064A requires bus alignment for buffer wriets -- saw */
32261 if (adr & (map_bankwidth(map)-1))
32262 return -EINVAL;
32263@@ -742,6 +746,8 @@ static inline int do_erase_oneblock(stru
32264 DECLARE_WAITQUEUE(wait, current);
32265 int ret = 0;
32266
32267+ pax_track_stack();
32268+
32269 adr += chip->start;
32270
32271 /* Let's determine this according to the interleave only once */
32272@@ -1047,6 +1053,8 @@ static inline int do_lock_oneblock(struc
32273 unsigned long timeo = jiffies + HZ;
32274 DECLARE_WAITQUEUE(wait, current);
32275
32276+ pax_track_stack();
32277+
32278 adr += chip->start;
32279
32280 /* Let's determine this according to the interleave only once */
32281@@ -1196,6 +1204,8 @@ static inline int do_unlock_oneblock(str
32282 unsigned long timeo = jiffies + HZ;
32283 DECLARE_WAITQUEUE(wait, current);
32284
32285+ pax_track_stack();
32286+
32287 adr += chip->start;
32288
32289 /* Let's determine this according to the interleave only once */
32290diff -urNp linux-2.6.32.42/drivers/mtd/devices/doc2000.c linux-2.6.32.42/drivers/mtd/devices/doc2000.c
32291--- linux-2.6.32.42/drivers/mtd/devices/doc2000.c 2011-03-27 14:31:47.000000000 -0400
32292+++ linux-2.6.32.42/drivers/mtd/devices/doc2000.c 2011-04-17 15:56:46.000000000 -0400
32293@@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
32294
32295 /* The ECC will not be calculated correctly if less than 512 is written */
32296 /* DBB-
32297- if (len != 0x200 && eccbuf)
32298+ if (len != 0x200)
32299 printk(KERN_WARNING
32300 "ECC needs a full sector write (adr: %lx size %lx)\n",
32301 (long) to, (long) len);
32302diff -urNp linux-2.6.32.42/drivers/mtd/devices/doc2001.c linux-2.6.32.42/drivers/mtd/devices/doc2001.c
32303--- linux-2.6.32.42/drivers/mtd/devices/doc2001.c 2011-03-27 14:31:47.000000000 -0400
32304+++ linux-2.6.32.42/drivers/mtd/devices/doc2001.c 2011-04-17 15:56:46.000000000 -0400
32305@@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
32306 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
32307
32308 /* Don't allow read past end of device */
32309- if (from >= this->totlen)
32310+ if (from >= this->totlen || !len)
32311 return -EINVAL;
32312
32313 /* Don't allow a single read to cross a 512-byte block boundary */
32314diff -urNp linux-2.6.32.42/drivers/mtd/ftl.c linux-2.6.32.42/drivers/mtd/ftl.c
32315--- linux-2.6.32.42/drivers/mtd/ftl.c 2011-03-27 14:31:47.000000000 -0400
32316+++ linux-2.6.32.42/drivers/mtd/ftl.c 2011-05-16 21:46:57.000000000 -0400
32317@@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
32318 loff_t offset;
32319 uint16_t srcunitswap = cpu_to_le16(srcunit);
32320
32321+ pax_track_stack();
32322+
32323 eun = &part->EUNInfo[srcunit];
32324 xfer = &part->XferInfo[xferunit];
32325 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
32326diff -urNp linux-2.6.32.42/drivers/mtd/inftlcore.c linux-2.6.32.42/drivers/mtd/inftlcore.c
32327--- linux-2.6.32.42/drivers/mtd/inftlcore.c 2011-03-27 14:31:47.000000000 -0400
32328+++ linux-2.6.32.42/drivers/mtd/inftlcore.c 2011-05-16 21:46:57.000000000 -0400
32329@@ -260,6 +260,8 @@ static u16 INFTL_foldchain(struct INFTLr
32330 struct inftl_oob oob;
32331 size_t retlen;
32332
32333+ pax_track_stack();
32334+
32335 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
32336 "pending=%d)\n", inftl, thisVUC, pendingblock);
32337
32338diff -urNp linux-2.6.32.42/drivers/mtd/inftlmount.c linux-2.6.32.42/drivers/mtd/inftlmount.c
32339--- linux-2.6.32.42/drivers/mtd/inftlmount.c 2011-03-27 14:31:47.000000000 -0400
32340+++ linux-2.6.32.42/drivers/mtd/inftlmount.c 2011-05-16 21:46:57.000000000 -0400
32341@@ -54,6 +54,8 @@ static int find_boot_record(struct INFTL
32342 struct INFTLPartition *ip;
32343 size_t retlen;
32344
32345+ pax_track_stack();
32346+
32347 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
32348
32349 /*
32350diff -urNp linux-2.6.32.42/drivers/mtd/lpddr/qinfo_probe.c linux-2.6.32.42/drivers/mtd/lpddr/qinfo_probe.c
32351--- linux-2.6.32.42/drivers/mtd/lpddr/qinfo_probe.c 2011-03-27 14:31:47.000000000 -0400
32352+++ linux-2.6.32.42/drivers/mtd/lpddr/qinfo_probe.c 2011-05-16 21:46:57.000000000 -0400
32353@@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
32354 {
32355 map_word pfow_val[4];
32356
32357+ pax_track_stack();
32358+
32359 /* Check identification string */
32360 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
32361 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
32362diff -urNp linux-2.6.32.42/drivers/mtd/mtdchar.c linux-2.6.32.42/drivers/mtd/mtdchar.c
32363--- linux-2.6.32.42/drivers/mtd/mtdchar.c 2011-03-27 14:31:47.000000000 -0400
32364+++ linux-2.6.32.42/drivers/mtd/mtdchar.c 2011-05-16 21:46:57.000000000 -0400
32365@@ -460,6 +460,8 @@ static int mtd_ioctl(struct inode *inode
32366 u_long size;
32367 struct mtd_info_user info;
32368
32369+ pax_track_stack();
32370+
32371 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
32372
32373 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
32374diff -urNp linux-2.6.32.42/drivers/mtd/nftlcore.c linux-2.6.32.42/drivers/mtd/nftlcore.c
32375--- linux-2.6.32.42/drivers/mtd/nftlcore.c 2011-03-27 14:31:47.000000000 -0400
32376+++ linux-2.6.32.42/drivers/mtd/nftlcore.c 2011-05-16 21:46:57.000000000 -0400
32377@@ -254,6 +254,8 @@ static u16 NFTL_foldchain (struct NFTLre
32378 int inplace = 1;
32379 size_t retlen;
32380
32381+ pax_track_stack();
32382+
32383 memset(BlockMap, 0xff, sizeof(BlockMap));
32384 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
32385
32386diff -urNp linux-2.6.32.42/drivers/mtd/nftlmount.c linux-2.6.32.42/drivers/mtd/nftlmount.c
32387--- linux-2.6.32.42/drivers/mtd/nftlmount.c 2011-03-27 14:31:47.000000000 -0400
32388+++ linux-2.6.32.42/drivers/mtd/nftlmount.c 2011-05-18 20:09:37.000000000 -0400
32389@@ -23,6 +23,7 @@
32390 #include <asm/errno.h>
32391 #include <linux/delay.h>
32392 #include <linux/slab.h>
32393+#include <linux/sched.h>
32394 #include <linux/mtd/mtd.h>
32395 #include <linux/mtd/nand.h>
32396 #include <linux/mtd/nftl.h>
32397@@ -44,6 +45,8 @@ static int find_boot_record(struct NFTLr
32398 struct mtd_info *mtd = nftl->mbd.mtd;
32399 unsigned int i;
32400
32401+ pax_track_stack();
32402+
32403 /* Assume logical EraseSize == physical erasesize for starting the scan.
32404 We'll sort it out later if we find a MediaHeader which says otherwise */
32405 /* Actually, we won't. The new DiskOnChip driver has already scanned
32406diff -urNp linux-2.6.32.42/drivers/mtd/ubi/build.c linux-2.6.32.42/drivers/mtd/ubi/build.c
32407--- linux-2.6.32.42/drivers/mtd/ubi/build.c 2011-03-27 14:31:47.000000000 -0400
32408+++ linux-2.6.32.42/drivers/mtd/ubi/build.c 2011-04-17 15:56:46.000000000 -0400
32409@@ -1255,7 +1255,7 @@ module_exit(ubi_exit);
32410 static int __init bytes_str_to_int(const char *str)
32411 {
32412 char *endp;
32413- unsigned long result;
32414+ unsigned long result, scale = 1;
32415
32416 result = simple_strtoul(str, &endp, 0);
32417 if (str == endp || result >= INT_MAX) {
32418@@ -1266,11 +1266,11 @@ static int __init bytes_str_to_int(const
32419
32420 switch (*endp) {
32421 case 'G':
32422- result *= 1024;
32423+ scale *= 1024;
32424 case 'M':
32425- result *= 1024;
32426+ scale *= 1024;
32427 case 'K':
32428- result *= 1024;
32429+ scale *= 1024;
32430 if (endp[1] == 'i' && endp[2] == 'B')
32431 endp += 2;
32432 case '\0':
32433@@ -1281,7 +1281,13 @@ static int __init bytes_str_to_int(const
32434 return -EINVAL;
32435 }
32436
32437- return result;
32438+ if ((intoverflow_t)result*scale >= INT_MAX) {
32439+ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
32440+ str);
32441+ return -EINVAL;
32442+ }
32443+
32444+ return result*scale;
32445 }
32446
32447 /**
32448diff -urNp linux-2.6.32.42/drivers/net/bnx2.c linux-2.6.32.42/drivers/net/bnx2.c
32449--- linux-2.6.32.42/drivers/net/bnx2.c 2011-03-27 14:31:47.000000000 -0400
32450+++ linux-2.6.32.42/drivers/net/bnx2.c 2011-05-16 21:46:57.000000000 -0400
32451@@ -5809,6 +5809,8 @@ bnx2_test_nvram(struct bnx2 *bp)
32452 int rc = 0;
32453 u32 magic, csum;
32454
32455+ pax_track_stack();
32456+
32457 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
32458 goto test_nvram_done;
32459
32460diff -urNp linux-2.6.32.42/drivers/net/cxgb3/t3_hw.c linux-2.6.32.42/drivers/net/cxgb3/t3_hw.c
32461--- linux-2.6.32.42/drivers/net/cxgb3/t3_hw.c 2011-03-27 14:31:47.000000000 -0400
32462+++ linux-2.6.32.42/drivers/net/cxgb3/t3_hw.c 2011-05-16 21:46:57.000000000 -0400
32463@@ -699,6 +699,8 @@ static int get_vpd_params(struct adapter
32464 int i, addr, ret;
32465 struct t3_vpd vpd;
32466
32467+ pax_track_stack();
32468+
32469 /*
32470 * Card information is normally at VPD_BASE but some early cards had
32471 * it at 0.
32472diff -urNp linux-2.6.32.42/drivers/net/e1000e/82571.c linux-2.6.32.42/drivers/net/e1000e/82571.c
32473--- linux-2.6.32.42/drivers/net/e1000e/82571.c 2011-03-27 14:31:47.000000000 -0400
32474+++ linux-2.6.32.42/drivers/net/e1000e/82571.c 2011-04-17 15:56:46.000000000 -0400
32475@@ -212,6 +212,7 @@ static s32 e1000_init_mac_params_82571(s
32476 {
32477 struct e1000_hw *hw = &adapter->hw;
32478 struct e1000_mac_info *mac = &hw->mac;
32479+ /* cannot be const */
32480 struct e1000_mac_operations *func = &mac->ops;
32481 u32 swsm = 0;
32482 u32 swsm2 = 0;
32483@@ -1656,7 +1657,7 @@ static void e1000_clear_hw_cntrs_82571(s
32484 temp = er32(ICRXDMTC);
32485 }
32486
32487-static struct e1000_mac_operations e82571_mac_ops = {
32488+static const struct e1000_mac_operations e82571_mac_ops = {
32489 /* .check_mng_mode: mac type dependent */
32490 /* .check_for_link: media type dependent */
32491 .id_led_init = e1000e_id_led_init,
32492@@ -1674,7 +1675,7 @@ static struct e1000_mac_operations e8257
32493 .setup_led = e1000e_setup_led_generic,
32494 };
32495
32496-static struct e1000_phy_operations e82_phy_ops_igp = {
32497+static const struct e1000_phy_operations e82_phy_ops_igp = {
32498 .acquire_phy = e1000_get_hw_semaphore_82571,
32499 .check_reset_block = e1000e_check_reset_block_generic,
32500 .commit_phy = NULL,
32501@@ -1691,7 +1692,7 @@ static struct e1000_phy_operations e82_p
32502 .cfg_on_link_up = NULL,
32503 };
32504
32505-static struct e1000_phy_operations e82_phy_ops_m88 = {
32506+static const struct e1000_phy_operations e82_phy_ops_m88 = {
32507 .acquire_phy = e1000_get_hw_semaphore_82571,
32508 .check_reset_block = e1000e_check_reset_block_generic,
32509 .commit_phy = e1000e_phy_sw_reset,
32510@@ -1708,7 +1709,7 @@ static struct e1000_phy_operations e82_p
32511 .cfg_on_link_up = NULL,
32512 };
32513
32514-static struct e1000_phy_operations e82_phy_ops_bm = {
32515+static const struct e1000_phy_operations e82_phy_ops_bm = {
32516 .acquire_phy = e1000_get_hw_semaphore_82571,
32517 .check_reset_block = e1000e_check_reset_block_generic,
32518 .commit_phy = e1000e_phy_sw_reset,
32519@@ -1725,7 +1726,7 @@ static struct e1000_phy_operations e82_p
32520 .cfg_on_link_up = NULL,
32521 };
32522
32523-static struct e1000_nvm_operations e82571_nvm_ops = {
32524+static const struct e1000_nvm_operations e82571_nvm_ops = {
32525 .acquire_nvm = e1000_acquire_nvm_82571,
32526 .read_nvm = e1000e_read_nvm_eerd,
32527 .release_nvm = e1000_release_nvm_82571,
32528diff -urNp linux-2.6.32.42/drivers/net/e1000e/e1000.h linux-2.6.32.42/drivers/net/e1000e/e1000.h
32529--- linux-2.6.32.42/drivers/net/e1000e/e1000.h 2011-03-27 14:31:47.000000000 -0400
32530+++ linux-2.6.32.42/drivers/net/e1000e/e1000.h 2011-04-17 15:56:46.000000000 -0400
32531@@ -375,9 +375,9 @@ struct e1000_info {
32532 u32 pba;
32533 u32 max_hw_frame_size;
32534 s32 (*get_variants)(struct e1000_adapter *);
32535- struct e1000_mac_operations *mac_ops;
32536- struct e1000_phy_operations *phy_ops;
32537- struct e1000_nvm_operations *nvm_ops;
32538+ const struct e1000_mac_operations *mac_ops;
32539+ const struct e1000_phy_operations *phy_ops;
32540+ const struct e1000_nvm_operations *nvm_ops;
32541 };
32542
32543 /* hardware capability, feature, and workaround flags */
32544diff -urNp linux-2.6.32.42/drivers/net/e1000e/es2lan.c linux-2.6.32.42/drivers/net/e1000e/es2lan.c
32545--- linux-2.6.32.42/drivers/net/e1000e/es2lan.c 2011-03-27 14:31:47.000000000 -0400
32546+++ linux-2.6.32.42/drivers/net/e1000e/es2lan.c 2011-04-17 15:56:46.000000000 -0400
32547@@ -207,6 +207,7 @@ static s32 e1000_init_mac_params_80003es
32548 {
32549 struct e1000_hw *hw = &adapter->hw;
32550 struct e1000_mac_info *mac = &hw->mac;
32551+ /* cannot be const */
32552 struct e1000_mac_operations *func = &mac->ops;
32553
32554 /* Set media type */
32555@@ -1365,7 +1366,7 @@ static void e1000_clear_hw_cntrs_80003es
32556 temp = er32(ICRXDMTC);
32557 }
32558
32559-static struct e1000_mac_operations es2_mac_ops = {
32560+static const struct e1000_mac_operations es2_mac_ops = {
32561 .id_led_init = e1000e_id_led_init,
32562 .check_mng_mode = e1000e_check_mng_mode_generic,
32563 /* check_for_link dependent on media type */
32564@@ -1383,7 +1384,7 @@ static struct e1000_mac_operations es2_m
32565 .setup_led = e1000e_setup_led_generic,
32566 };
32567
32568-static struct e1000_phy_operations es2_phy_ops = {
32569+static const struct e1000_phy_operations es2_phy_ops = {
32570 .acquire_phy = e1000_acquire_phy_80003es2lan,
32571 .check_reset_block = e1000e_check_reset_block_generic,
32572 .commit_phy = e1000e_phy_sw_reset,
32573@@ -1400,7 +1401,7 @@ static struct e1000_phy_operations es2_p
32574 .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
32575 };
32576
32577-static struct e1000_nvm_operations es2_nvm_ops = {
32578+static const struct e1000_nvm_operations es2_nvm_ops = {
32579 .acquire_nvm = e1000_acquire_nvm_80003es2lan,
32580 .read_nvm = e1000e_read_nvm_eerd,
32581 .release_nvm = e1000_release_nvm_80003es2lan,
32582diff -urNp linux-2.6.32.42/drivers/net/e1000e/hw.h linux-2.6.32.42/drivers/net/e1000e/hw.h
32583--- linux-2.6.32.42/drivers/net/e1000e/hw.h 2011-03-27 14:31:47.000000000 -0400
32584+++ linux-2.6.32.42/drivers/net/e1000e/hw.h 2011-04-17 15:56:46.000000000 -0400
32585@@ -756,34 +756,34 @@ struct e1000_mac_operations {
32586
32587 /* Function pointers for the PHY. */
32588 struct e1000_phy_operations {
32589- s32 (*acquire_phy)(struct e1000_hw *);
32590- s32 (*check_polarity)(struct e1000_hw *);
32591- s32 (*check_reset_block)(struct e1000_hw *);
32592- s32 (*commit_phy)(struct e1000_hw *);
32593- s32 (*force_speed_duplex)(struct e1000_hw *);
32594- s32 (*get_cfg_done)(struct e1000_hw *hw);
32595- s32 (*get_cable_length)(struct e1000_hw *);
32596- s32 (*get_phy_info)(struct e1000_hw *);
32597- s32 (*read_phy_reg)(struct e1000_hw *, u32, u16 *);
32598- s32 (*read_phy_reg_locked)(struct e1000_hw *, u32, u16 *);
32599- void (*release_phy)(struct e1000_hw *);
32600- s32 (*reset_phy)(struct e1000_hw *);
32601- s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
32602- s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
32603- s32 (*write_phy_reg)(struct e1000_hw *, u32, u16);
32604- s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16);
32605- s32 (*cfg_on_link_up)(struct e1000_hw *);
32606+ s32 (* acquire_phy)(struct e1000_hw *);
32607+ s32 (* check_polarity)(struct e1000_hw *);
32608+ s32 (* check_reset_block)(struct e1000_hw *);
32609+ s32 (* commit_phy)(struct e1000_hw *);
32610+ s32 (* force_speed_duplex)(struct e1000_hw *);
32611+ s32 (* get_cfg_done)(struct e1000_hw *hw);
32612+ s32 (* get_cable_length)(struct e1000_hw *);
32613+ s32 (* get_phy_info)(struct e1000_hw *);
32614+ s32 (* read_phy_reg)(struct e1000_hw *, u32, u16 *);
32615+ s32 (* read_phy_reg_locked)(struct e1000_hw *, u32, u16 *);
32616+ void (* release_phy)(struct e1000_hw *);
32617+ s32 (* reset_phy)(struct e1000_hw *);
32618+ s32 (* set_d0_lplu_state)(struct e1000_hw *, bool);
32619+ s32 (* set_d3_lplu_state)(struct e1000_hw *, bool);
32620+ s32 (* write_phy_reg)(struct e1000_hw *, u32, u16);
32621+ s32 (* write_phy_reg_locked)(struct e1000_hw *, u32, u16);
32622+ s32 (* cfg_on_link_up)(struct e1000_hw *);
32623 };
32624
32625 /* Function pointers for the NVM. */
32626 struct e1000_nvm_operations {
32627- s32 (*acquire_nvm)(struct e1000_hw *);
32628- s32 (*read_nvm)(struct e1000_hw *, u16, u16, u16 *);
32629- void (*release_nvm)(struct e1000_hw *);
32630- s32 (*update_nvm)(struct e1000_hw *);
32631- s32 (*valid_led_default)(struct e1000_hw *, u16 *);
32632- s32 (*validate_nvm)(struct e1000_hw *);
32633- s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *);
32634+ s32 (* const acquire_nvm)(struct e1000_hw *);
32635+ s32 (* const read_nvm)(struct e1000_hw *, u16, u16, u16 *);
32636+ void (* const release_nvm)(struct e1000_hw *);
32637+ s32 (* const update_nvm)(struct e1000_hw *);
32638+ s32 (* const valid_led_default)(struct e1000_hw *, u16 *);
32639+ s32 (* const validate_nvm)(struct e1000_hw *);
32640+ s32 (* const write_nvm)(struct e1000_hw *, u16, u16, u16 *);
32641 };
32642
32643 struct e1000_mac_info {
32644diff -urNp linux-2.6.32.42/drivers/net/e1000e/ich8lan.c linux-2.6.32.42/drivers/net/e1000e/ich8lan.c
32645--- linux-2.6.32.42/drivers/net/e1000e/ich8lan.c 2011-05-10 22:12:01.000000000 -0400
32646+++ linux-2.6.32.42/drivers/net/e1000e/ich8lan.c 2011-05-10 22:12:32.000000000 -0400
32647@@ -3463,7 +3463,7 @@ static void e1000_clear_hw_cntrs_ich8lan
32648 }
32649 }
32650
32651-static struct e1000_mac_operations ich8_mac_ops = {
32652+static const struct e1000_mac_operations ich8_mac_ops = {
32653 .id_led_init = e1000e_id_led_init,
32654 .check_mng_mode = e1000_check_mng_mode_ich8lan,
32655 .check_for_link = e1000_check_for_copper_link_ich8lan,
32656@@ -3481,7 +3481,7 @@ static struct e1000_mac_operations ich8_
32657 /* id_led_init dependent on mac type */
32658 };
32659
32660-static struct e1000_phy_operations ich8_phy_ops = {
32661+static const struct e1000_phy_operations ich8_phy_ops = {
32662 .acquire_phy = e1000_acquire_swflag_ich8lan,
32663 .check_reset_block = e1000_check_reset_block_ich8lan,
32664 .commit_phy = NULL,
32665@@ -3497,7 +3497,7 @@ static struct e1000_phy_operations ich8_
32666 .write_phy_reg = e1000e_write_phy_reg_igp,
32667 };
32668
32669-static struct e1000_nvm_operations ich8_nvm_ops = {
32670+static const struct e1000_nvm_operations ich8_nvm_ops = {
32671 .acquire_nvm = e1000_acquire_nvm_ich8lan,
32672 .read_nvm = e1000_read_nvm_ich8lan,
32673 .release_nvm = e1000_release_nvm_ich8lan,
32674diff -urNp linux-2.6.32.42/drivers/net/hamradio/6pack.c linux-2.6.32.42/drivers/net/hamradio/6pack.c
32675--- linux-2.6.32.42/drivers/net/hamradio/6pack.c 2011-03-27 14:31:47.000000000 -0400
32676+++ linux-2.6.32.42/drivers/net/hamradio/6pack.c 2011-05-16 21:46:57.000000000 -0400
32677@@ -461,6 +461,8 @@ static void sixpack_receive_buf(struct t
32678 unsigned char buf[512];
32679 int count1;
32680
32681+ pax_track_stack();
32682+
32683 if (!count)
32684 return;
32685
32686diff -urNp linux-2.6.32.42/drivers/net/ibmveth.c linux-2.6.32.42/drivers/net/ibmveth.c
32687--- linux-2.6.32.42/drivers/net/ibmveth.c 2011-03-27 14:31:47.000000000 -0400
32688+++ linux-2.6.32.42/drivers/net/ibmveth.c 2011-04-17 15:56:46.000000000 -0400
32689@@ -1577,7 +1577,7 @@ static struct attribute * veth_pool_attr
32690 NULL,
32691 };
32692
32693-static struct sysfs_ops veth_pool_ops = {
32694+static const struct sysfs_ops veth_pool_ops = {
32695 .show = veth_pool_show,
32696 .store = veth_pool_store,
32697 };
32698diff -urNp linux-2.6.32.42/drivers/net/igb/e1000_82575.c linux-2.6.32.42/drivers/net/igb/e1000_82575.c
32699--- linux-2.6.32.42/drivers/net/igb/e1000_82575.c 2011-03-27 14:31:47.000000000 -0400
32700+++ linux-2.6.32.42/drivers/net/igb/e1000_82575.c 2011-04-17 15:56:46.000000000 -0400
32701@@ -1410,7 +1410,7 @@ void igb_vmdq_set_replication_pf(struct
32702 wr32(E1000_VT_CTL, vt_ctl);
32703 }
32704
32705-static struct e1000_mac_operations e1000_mac_ops_82575 = {
32706+static const struct e1000_mac_operations e1000_mac_ops_82575 = {
32707 .reset_hw = igb_reset_hw_82575,
32708 .init_hw = igb_init_hw_82575,
32709 .check_for_link = igb_check_for_link_82575,
32710@@ -1419,13 +1419,13 @@ static struct e1000_mac_operations e1000
32711 .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
32712 };
32713
32714-static struct e1000_phy_operations e1000_phy_ops_82575 = {
32715+static const struct e1000_phy_operations e1000_phy_ops_82575 = {
32716 .acquire = igb_acquire_phy_82575,
32717 .get_cfg_done = igb_get_cfg_done_82575,
32718 .release = igb_release_phy_82575,
32719 };
32720
32721-static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
32722+static const struct e1000_nvm_operations e1000_nvm_ops_82575 = {
32723 .acquire = igb_acquire_nvm_82575,
32724 .read = igb_read_nvm_eerd,
32725 .release = igb_release_nvm_82575,
32726diff -urNp linux-2.6.32.42/drivers/net/igb/e1000_hw.h linux-2.6.32.42/drivers/net/igb/e1000_hw.h
32727--- linux-2.6.32.42/drivers/net/igb/e1000_hw.h 2011-03-27 14:31:47.000000000 -0400
32728+++ linux-2.6.32.42/drivers/net/igb/e1000_hw.h 2011-04-17 15:56:46.000000000 -0400
32729@@ -305,17 +305,17 @@ struct e1000_phy_operations {
32730 };
32731
32732 struct e1000_nvm_operations {
32733- s32 (*acquire)(struct e1000_hw *);
32734- s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
32735- void (*release)(struct e1000_hw *);
32736- s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
32737+ s32 (* const acquire)(struct e1000_hw *);
32738+ s32 (* const read)(struct e1000_hw *, u16, u16, u16 *);
32739+ void (* const release)(struct e1000_hw *);
32740+ s32 (* const write)(struct e1000_hw *, u16, u16, u16 *);
32741 };
32742
32743 struct e1000_info {
32744 s32 (*get_invariants)(struct e1000_hw *);
32745- struct e1000_mac_operations *mac_ops;
32746- struct e1000_phy_operations *phy_ops;
32747- struct e1000_nvm_operations *nvm_ops;
32748+ const struct e1000_mac_operations *mac_ops;
32749+ const struct e1000_phy_operations *phy_ops;
32750+ const struct e1000_nvm_operations *nvm_ops;
32751 };
32752
32753 extern const struct e1000_info e1000_82575_info;
32754diff -urNp linux-2.6.32.42/drivers/net/iseries_veth.c linux-2.6.32.42/drivers/net/iseries_veth.c
32755--- linux-2.6.32.42/drivers/net/iseries_veth.c 2011-03-27 14:31:47.000000000 -0400
32756+++ linux-2.6.32.42/drivers/net/iseries_veth.c 2011-04-17 15:56:46.000000000 -0400
32757@@ -384,7 +384,7 @@ static struct attribute *veth_cnx_defaul
32758 NULL
32759 };
32760
32761-static struct sysfs_ops veth_cnx_sysfs_ops = {
32762+static const struct sysfs_ops veth_cnx_sysfs_ops = {
32763 .show = veth_cnx_attribute_show
32764 };
32765
32766@@ -441,7 +441,7 @@ static struct attribute *veth_port_defau
32767 NULL
32768 };
32769
32770-static struct sysfs_ops veth_port_sysfs_ops = {
32771+static const struct sysfs_ops veth_port_sysfs_ops = {
32772 .show = veth_port_attribute_show
32773 };
32774
32775diff -urNp linux-2.6.32.42/drivers/net/ixgb/ixgb_main.c linux-2.6.32.42/drivers/net/ixgb/ixgb_main.c
32776--- linux-2.6.32.42/drivers/net/ixgb/ixgb_main.c 2011-03-27 14:31:47.000000000 -0400
32777+++ linux-2.6.32.42/drivers/net/ixgb/ixgb_main.c 2011-05-16 21:46:57.000000000 -0400
32778@@ -1052,6 +1052,8 @@ ixgb_set_multi(struct net_device *netdev
32779 u32 rctl;
32780 int i;
32781
32782+ pax_track_stack();
32783+
32784 /* Check for Promiscuous and All Multicast modes */
32785
32786 rctl = IXGB_READ_REG(hw, RCTL);
32787diff -urNp linux-2.6.32.42/drivers/net/ixgb/ixgb_param.c linux-2.6.32.42/drivers/net/ixgb/ixgb_param.c
32788--- linux-2.6.32.42/drivers/net/ixgb/ixgb_param.c 2011-03-27 14:31:47.000000000 -0400
32789+++ linux-2.6.32.42/drivers/net/ixgb/ixgb_param.c 2011-05-16 21:46:57.000000000 -0400
32790@@ -260,6 +260,9 @@ void __devinit
32791 ixgb_check_options(struct ixgb_adapter *adapter)
32792 {
32793 int bd = adapter->bd_number;
32794+
32795+ pax_track_stack();
32796+
32797 if (bd >= IXGB_MAX_NIC) {
32798 printk(KERN_NOTICE
32799 "Warning: no configuration for board #%i\n", bd);
32800diff -urNp linux-2.6.32.42/drivers/net/mlx4/main.c linux-2.6.32.42/drivers/net/mlx4/main.c
32801--- linux-2.6.32.42/drivers/net/mlx4/main.c 2011-03-27 14:31:47.000000000 -0400
32802+++ linux-2.6.32.42/drivers/net/mlx4/main.c 2011-05-18 20:09:37.000000000 -0400
32803@@ -38,6 +38,7 @@
32804 #include <linux/errno.h>
32805 #include <linux/pci.h>
32806 #include <linux/dma-mapping.h>
32807+#include <linux/sched.h>
32808
32809 #include <linux/mlx4/device.h>
32810 #include <linux/mlx4/doorbell.h>
32811@@ -730,6 +731,8 @@ static int mlx4_init_hca(struct mlx4_dev
32812 u64 icm_size;
32813 int err;
32814
32815+ pax_track_stack();
32816+
32817 err = mlx4_QUERY_FW(dev);
32818 if (err) {
32819 if (err == -EACCES)
32820diff -urNp linux-2.6.32.42/drivers/net/niu.c linux-2.6.32.42/drivers/net/niu.c
32821--- linux-2.6.32.42/drivers/net/niu.c 2011-05-10 22:12:01.000000000 -0400
32822+++ linux-2.6.32.42/drivers/net/niu.c 2011-05-16 21:46:57.000000000 -0400
32823@@ -9128,6 +9128,8 @@ static void __devinit niu_try_msix(struc
32824 int i, num_irqs, err;
32825 u8 first_ldg;
32826
32827+ pax_track_stack();
32828+
32829 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
32830 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
32831 ldg_num_map[i] = first_ldg + i;
32832diff -urNp linux-2.6.32.42/drivers/net/pcnet32.c linux-2.6.32.42/drivers/net/pcnet32.c
32833--- linux-2.6.32.42/drivers/net/pcnet32.c 2011-03-27 14:31:47.000000000 -0400
32834+++ linux-2.6.32.42/drivers/net/pcnet32.c 2011-04-17 15:56:46.000000000 -0400
32835@@ -79,7 +79,7 @@ static int cards_found;
32836 /*
32837 * VLB I/O addresses
32838 */
32839-static unsigned int pcnet32_portlist[] __initdata =
32840+static unsigned int pcnet32_portlist[] __devinitdata =
32841 { 0x300, 0x320, 0x340, 0x360, 0 };
32842
32843 static int pcnet32_debug = 0;
32844diff -urNp linux-2.6.32.42/drivers/net/tg3.h linux-2.6.32.42/drivers/net/tg3.h
32845--- linux-2.6.32.42/drivers/net/tg3.h 2011-03-27 14:31:47.000000000 -0400
32846+++ linux-2.6.32.42/drivers/net/tg3.h 2011-04-17 15:56:46.000000000 -0400
32847@@ -95,6 +95,7 @@
32848 #define CHIPREV_ID_5750_A0 0x4000
32849 #define CHIPREV_ID_5750_A1 0x4001
32850 #define CHIPREV_ID_5750_A3 0x4003
32851+#define CHIPREV_ID_5750_C1 0x4201
32852 #define CHIPREV_ID_5750_C2 0x4202
32853 #define CHIPREV_ID_5752_A0_HW 0x5000
32854 #define CHIPREV_ID_5752_A0 0x6000
32855diff -urNp linux-2.6.32.42/drivers/net/tulip/de2104x.c linux-2.6.32.42/drivers/net/tulip/de2104x.c
32856--- linux-2.6.32.42/drivers/net/tulip/de2104x.c 2011-03-27 14:31:47.000000000 -0400
32857+++ linux-2.6.32.42/drivers/net/tulip/de2104x.c 2011-05-16 21:46:57.000000000 -0400
32858@@ -1785,6 +1785,8 @@ static void __devinit de21041_get_srom_i
32859 struct de_srom_info_leaf *il;
32860 void *bufp;
32861
32862+ pax_track_stack();
32863+
32864 /* download entire eeprom */
32865 for (i = 0; i < DE_EEPROM_WORDS; i++)
32866 ((__le16 *)ee_data)[i] =
32867diff -urNp linux-2.6.32.42/drivers/net/tulip/de4x5.c linux-2.6.32.42/drivers/net/tulip/de4x5.c
32868--- linux-2.6.32.42/drivers/net/tulip/de4x5.c 2011-03-27 14:31:47.000000000 -0400
32869+++ linux-2.6.32.42/drivers/net/tulip/de4x5.c 2011-04-17 15:56:46.000000000 -0400
32870@@ -5472,7 +5472,7 @@ de4x5_ioctl(struct net_device *dev, stru
32871 for (i=0; i<ETH_ALEN; i++) {
32872 tmp.addr[i] = dev->dev_addr[i];
32873 }
32874- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32875+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32876 break;
32877
32878 case DE4X5_SET_HWADDR: /* Set the hardware address */
32879@@ -5512,7 +5512,7 @@ de4x5_ioctl(struct net_device *dev, stru
32880 spin_lock_irqsave(&lp->lock, flags);
32881 memcpy(&statbuf, &lp->pktStats, ioc->len);
32882 spin_unlock_irqrestore(&lp->lock, flags);
32883- if (copy_to_user(ioc->data, &statbuf, ioc->len))
32884+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
32885 return -EFAULT;
32886 break;
32887 }
32888diff -urNp linux-2.6.32.42/drivers/net/usb/hso.c linux-2.6.32.42/drivers/net/usb/hso.c
32889--- linux-2.6.32.42/drivers/net/usb/hso.c 2011-03-27 14:31:47.000000000 -0400
32890+++ linux-2.6.32.42/drivers/net/usb/hso.c 2011-04-17 15:56:46.000000000 -0400
32891@@ -71,7 +71,7 @@
32892 #include <asm/byteorder.h>
32893 #include <linux/serial_core.h>
32894 #include <linux/serial.h>
32895-
32896+#include <asm/local.h>
32897
32898 #define DRIVER_VERSION "1.2"
32899 #define MOD_AUTHOR "Option Wireless"
32900@@ -258,7 +258,7 @@ struct hso_serial {
32901
32902 /* from usb_serial_port */
32903 struct tty_struct *tty;
32904- int open_count;
32905+ local_t open_count;
32906 spinlock_t serial_lock;
32907
32908 int (*write_data) (struct hso_serial *serial);
32909@@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_
32910 struct urb *urb;
32911
32912 urb = serial->rx_urb[0];
32913- if (serial->open_count > 0) {
32914+ if (local_read(&serial->open_count) > 0) {
32915 count = put_rxbuf_data(urb, serial);
32916 if (count == -1)
32917 return;
32918@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_cal
32919 DUMP1(urb->transfer_buffer, urb->actual_length);
32920
32921 /* Anyone listening? */
32922- if (serial->open_count == 0)
32923+ if (local_read(&serial->open_count) == 0)
32924 return;
32925
32926 if (status == 0) {
32927@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
32928 spin_unlock_irq(&serial->serial_lock);
32929
32930 /* check for port already opened, if not set the termios */
32931- serial->open_count++;
32932- if (serial->open_count == 1) {
32933+ if (local_inc_return(&serial->open_count) == 1) {
32934 tty->low_latency = 1;
32935 serial->rx_state = RX_IDLE;
32936 /* Force default termio settings */
32937@@ -1325,7 +1324,7 @@ static int hso_serial_open(struct tty_st
32938 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
32939 if (result) {
32940 hso_stop_serial_device(serial->parent);
32941- serial->open_count--;
32942+ local_dec(&serial->open_count);
32943 kref_put(&serial->parent->ref, hso_serial_ref_free);
32944 }
32945 } else {
32946@@ -1362,10 +1361,10 @@ static void hso_serial_close(struct tty_
32947
32948 /* reset the rts and dtr */
32949 /* do the actual close */
32950- serial->open_count--;
32951+ local_dec(&serial->open_count);
32952
32953- if (serial->open_count <= 0) {
32954- serial->open_count = 0;
32955+ if (local_read(&serial->open_count) <= 0) {
32956+ local_set(&serial->open_count, 0);
32957 spin_lock_irq(&serial->serial_lock);
32958 if (serial->tty == tty) {
32959 serial->tty->driver_data = NULL;
32960@@ -1447,7 +1446,7 @@ static void hso_serial_set_termios(struc
32961
32962 /* the actual setup */
32963 spin_lock_irqsave(&serial->serial_lock, flags);
32964- if (serial->open_count)
32965+ if (local_read(&serial->open_count))
32966 _hso_serial_set_termios(tty, old);
32967 else
32968 tty->termios = old;
32969@@ -3097,7 +3096,7 @@ static int hso_resume(struct usb_interfa
32970 /* Start all serial ports */
32971 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
32972 if (serial_table[i] && (serial_table[i]->interface == iface)) {
32973- if (dev2ser(serial_table[i])->open_count) {
32974+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
32975 result =
32976 hso_start_serial_device(serial_table[i], GFP_NOIO);
32977 hso_kick_transmit(dev2ser(serial_table[i]));
32978diff -urNp linux-2.6.32.42/drivers/net/vxge/vxge-main.c linux-2.6.32.42/drivers/net/vxge/vxge-main.c
32979--- linux-2.6.32.42/drivers/net/vxge/vxge-main.c 2011-03-27 14:31:47.000000000 -0400
32980+++ linux-2.6.32.42/drivers/net/vxge/vxge-main.c 2011-05-16 21:46:57.000000000 -0400
32981@@ -93,6 +93,8 @@ static inline void VXGE_COMPLETE_VPATH_T
32982 struct sk_buff *completed[NR_SKB_COMPLETED];
32983 int more;
32984
32985+ pax_track_stack();
32986+
32987 do {
32988 more = 0;
32989 skb_ptr = completed;
32990@@ -1779,6 +1781,8 @@ static enum vxge_hw_status vxge_rth_conf
32991 u8 mtable[256] = {0}; /* CPU to vpath mapping */
32992 int index;
32993
32994+ pax_track_stack();
32995+
32996 /*
32997 * Filling
32998 * - itable with bucket numbers
32999diff -urNp linux-2.6.32.42/drivers/net/wan/cycx_x25.c linux-2.6.32.42/drivers/net/wan/cycx_x25.c
33000--- linux-2.6.32.42/drivers/net/wan/cycx_x25.c 2011-03-27 14:31:47.000000000 -0400
33001+++ linux-2.6.32.42/drivers/net/wan/cycx_x25.c 2011-05-16 21:46:57.000000000 -0400
33002@@ -1017,6 +1017,8 @@ static void hex_dump(char *msg, unsigned
33003 unsigned char hex[1024],
33004 * phex = hex;
33005
33006+ pax_track_stack();
33007+
33008 if (len >= (sizeof(hex) / 2))
33009 len = (sizeof(hex) / 2) - 1;
33010
33011diff -urNp linux-2.6.32.42/drivers/net/wimax/i2400m/usb-fw.c linux-2.6.32.42/drivers/net/wimax/i2400m/usb-fw.c
33012--- linux-2.6.32.42/drivers/net/wimax/i2400m/usb-fw.c 2011-03-27 14:31:47.000000000 -0400
33013+++ linux-2.6.32.42/drivers/net/wimax/i2400m/usb-fw.c 2011-05-16 21:46:57.000000000 -0400
33014@@ -263,6 +263,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
33015 int do_autopm = 1;
33016 DECLARE_COMPLETION_ONSTACK(notif_completion);
33017
33018+ pax_track_stack();
33019+
33020 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
33021 i2400m, ack, ack_size);
33022 BUG_ON(_ack == i2400m->bm_ack_buf);
33023diff -urNp linux-2.6.32.42/drivers/net/wireless/airo.c linux-2.6.32.42/drivers/net/wireless/airo.c
33024--- linux-2.6.32.42/drivers/net/wireless/airo.c 2011-03-27 14:31:47.000000000 -0400
33025+++ linux-2.6.32.42/drivers/net/wireless/airo.c 2011-05-16 21:46:57.000000000 -0400
33026@@ -3003,6 +3003,8 @@ static void airo_process_scan_results (s
33027 BSSListElement * loop_net;
33028 BSSListElement * tmp_net;
33029
33030+ pax_track_stack();
33031+
33032 /* Blow away current list of scan results */
33033 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
33034 list_move_tail (&loop_net->list, &ai->network_free_list);
33035@@ -3783,6 +3785,8 @@ static u16 setup_card(struct airo_info *
33036 WepKeyRid wkr;
33037 int rc;
33038
33039+ pax_track_stack();
33040+
33041 memset( &mySsid, 0, sizeof( mySsid ) );
33042 kfree (ai->flash);
33043 ai->flash = NULL;
33044@@ -4758,6 +4762,8 @@ static int proc_stats_rid_open( struct i
33045 __le32 *vals = stats.vals;
33046 int len;
33047
33048+ pax_track_stack();
33049+
33050 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
33051 return -ENOMEM;
33052 data = (struct proc_data *)file->private_data;
33053@@ -5487,6 +5493,8 @@ static int proc_BSSList_open( struct ino
33054 /* If doLoseSync is not 1, we won't do a Lose Sync */
33055 int doLoseSync = -1;
33056
33057+ pax_track_stack();
33058+
33059 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
33060 return -ENOMEM;
33061 data = (struct proc_data *)file->private_data;
33062@@ -7193,6 +7201,8 @@ static int airo_get_aplist(struct net_de
33063 int i;
33064 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
33065
33066+ pax_track_stack();
33067+
33068 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
33069 if (!qual)
33070 return -ENOMEM;
33071@@ -7753,6 +7763,8 @@ static void airo_read_wireless_stats(str
33072 CapabilityRid cap_rid;
33073 __le32 *vals = stats_rid.vals;
33074
33075+ pax_track_stack();
33076+
33077 /* Get stats out of the card */
33078 clear_bit(JOB_WSTATS, &local->jobs);
33079 if (local->power.event) {
33080diff -urNp linux-2.6.32.42/drivers/net/wireless/ath/ath5k/debug.c linux-2.6.32.42/drivers/net/wireless/ath/ath5k/debug.c
33081--- linux-2.6.32.42/drivers/net/wireless/ath/ath5k/debug.c 2011-03-27 14:31:47.000000000 -0400
33082+++ linux-2.6.32.42/drivers/net/wireless/ath/ath5k/debug.c 2011-05-16 21:46:57.000000000 -0400
33083@@ -205,6 +205,8 @@ static ssize_t read_file_beacon(struct f
33084 unsigned int v;
33085 u64 tsf;
33086
33087+ pax_track_stack();
33088+
33089 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
33090 len += snprintf(buf+len, sizeof(buf)-len,
33091 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
33092@@ -318,6 +320,8 @@ static ssize_t read_file_debug(struct fi
33093 unsigned int len = 0;
33094 unsigned int i;
33095
33096+ pax_track_stack();
33097+
33098 len += snprintf(buf+len, sizeof(buf)-len,
33099 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
33100
33101diff -urNp linux-2.6.32.42/drivers/net/wireless/ath/ath9k/debug.c linux-2.6.32.42/drivers/net/wireless/ath/ath9k/debug.c
33102--- linux-2.6.32.42/drivers/net/wireless/ath/ath9k/debug.c 2011-03-27 14:31:47.000000000 -0400
33103+++ linux-2.6.32.42/drivers/net/wireless/ath/ath9k/debug.c 2011-05-16 21:46:57.000000000 -0400
33104@@ -220,6 +220,8 @@ static ssize_t read_file_interrupt(struc
33105 char buf[512];
33106 unsigned int len = 0;
33107
33108+ pax_track_stack();
33109+
33110 len += snprintf(buf + len, sizeof(buf) - len,
33111 "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
33112 len += snprintf(buf + len, sizeof(buf) - len,
33113@@ -360,6 +362,8 @@ static ssize_t read_file_wiphy(struct fi
33114 int i;
33115 u8 addr[ETH_ALEN];
33116
33117+ pax_track_stack();
33118+
33119 len += snprintf(buf + len, sizeof(buf) - len,
33120 "primary: %s (%s chan=%d ht=%d)\n",
33121 wiphy_name(sc->pri_wiphy->hw->wiphy),
33122diff -urNp linux-2.6.32.42/drivers/net/wireless/b43/debugfs.c linux-2.6.32.42/drivers/net/wireless/b43/debugfs.c
33123--- linux-2.6.32.42/drivers/net/wireless/b43/debugfs.c 2011-03-27 14:31:47.000000000 -0400
33124+++ linux-2.6.32.42/drivers/net/wireless/b43/debugfs.c 2011-04-17 15:56:46.000000000 -0400
33125@@ -43,7 +43,7 @@ static struct dentry *rootdir;
33126 struct b43_debugfs_fops {
33127 ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize);
33128 int (*write)(struct b43_wldev *dev, const char *buf, size_t count);
33129- struct file_operations fops;
33130+ const struct file_operations fops;
33131 /* Offset of struct b43_dfs_file in struct b43_dfsentry */
33132 size_t file_struct_offset;
33133 };
33134diff -urNp linux-2.6.32.42/drivers/net/wireless/b43legacy/debugfs.c linux-2.6.32.42/drivers/net/wireless/b43legacy/debugfs.c
33135--- linux-2.6.32.42/drivers/net/wireless/b43legacy/debugfs.c 2011-03-27 14:31:47.000000000 -0400
33136+++ linux-2.6.32.42/drivers/net/wireless/b43legacy/debugfs.c 2011-04-17 15:56:46.000000000 -0400
33137@@ -44,7 +44,7 @@ static struct dentry *rootdir;
33138 struct b43legacy_debugfs_fops {
33139 ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize);
33140 int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count);
33141- struct file_operations fops;
33142+ const struct file_operations fops;
33143 /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */
33144 size_t file_struct_offset;
33145 /* Take wl->irq_lock before calling read/write? */
33146diff -urNp linux-2.6.32.42/drivers/net/wireless/ipw2x00/ipw2100.c linux-2.6.32.42/drivers/net/wireless/ipw2x00/ipw2100.c
33147--- linux-2.6.32.42/drivers/net/wireless/ipw2x00/ipw2100.c 2011-03-27 14:31:47.000000000 -0400
33148+++ linux-2.6.32.42/drivers/net/wireless/ipw2x00/ipw2100.c 2011-05-16 21:46:57.000000000 -0400
33149@@ -2014,6 +2014,8 @@ static int ipw2100_set_essid(struct ipw2
33150 int err;
33151 DECLARE_SSID_BUF(ssid);
33152
33153+ pax_track_stack();
33154+
33155 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
33156
33157 if (ssid_len)
33158@@ -5380,6 +5382,8 @@ static int ipw2100_set_key(struct ipw210
33159 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
33160 int err;
33161
33162+ pax_track_stack();
33163+
33164 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
33165 idx, keylen, len);
33166
33167diff -urNp linux-2.6.32.42/drivers/net/wireless/ipw2x00/libipw_rx.c linux-2.6.32.42/drivers/net/wireless/ipw2x00/libipw_rx.c
33168--- linux-2.6.32.42/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-03-27 14:31:47.000000000 -0400
33169+++ linux-2.6.32.42/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-05-16 21:46:57.000000000 -0400
33170@@ -1566,6 +1566,8 @@ static void libipw_process_probe_respons
33171 unsigned long flags;
33172 DECLARE_SSID_BUF(ssid);
33173
33174+ pax_track_stack();
33175+
33176 LIBIPW_DEBUG_SCAN("'%s' (%pM"
33177 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
33178 print_ssid(ssid, info_element->data, info_element->len),
33179diff -urNp linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-1000.c linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-1000.c
33180--- linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-1000.c 2011-03-27 14:31:47.000000000 -0400
33181+++ linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-1000.c 2011-04-17 15:56:46.000000000 -0400
33182@@ -137,7 +137,7 @@ static struct iwl_lib_ops iwl1000_lib =
33183 },
33184 };
33185
33186-static struct iwl_ops iwl1000_ops = {
33187+static const struct iwl_ops iwl1000_ops = {
33188 .ucode = &iwl5000_ucode,
33189 .lib = &iwl1000_lib,
33190 .hcmd = &iwl5000_hcmd,
33191diff -urNp linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-3945.c linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-3945.c
33192--- linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-3945.c 2011-03-27 14:31:47.000000000 -0400
33193+++ linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-3945.c 2011-04-17 15:56:46.000000000 -0400
33194@@ -2874,7 +2874,7 @@ static struct iwl_hcmd_utils_ops iwl3945
33195 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
33196 };
33197
33198-static struct iwl_ops iwl3945_ops = {
33199+static const struct iwl_ops iwl3945_ops = {
33200 .ucode = &iwl3945_ucode,
33201 .lib = &iwl3945_lib,
33202 .hcmd = &iwl3945_hcmd,
33203diff -urNp linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-4965.c linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-4965.c
33204--- linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-4965.c 2011-03-27 14:31:47.000000000 -0400
33205+++ linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-4965.c 2011-04-17 15:56:46.000000000 -0400
33206@@ -2345,7 +2345,7 @@ static struct iwl_lib_ops iwl4965_lib =
33207 },
33208 };
33209
33210-static struct iwl_ops iwl4965_ops = {
33211+static const struct iwl_ops iwl4965_ops = {
33212 .ucode = &iwl4965_ucode,
33213 .lib = &iwl4965_lib,
33214 .hcmd = &iwl4965_hcmd,
33215diff -urNp linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-5000.c linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-5000.c
33216--- linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-5000.c 2011-06-25 12:55:34.000000000 -0400
33217+++ linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-5000.c 2011-06-25 12:56:37.000000000 -0400
33218@@ -1633,14 +1633,14 @@ static struct iwl_lib_ops iwl5150_lib =
33219 },
33220 };
33221
33222-struct iwl_ops iwl5000_ops = {
33223+const struct iwl_ops iwl5000_ops = {
33224 .ucode = &iwl5000_ucode,
33225 .lib = &iwl5000_lib,
33226 .hcmd = &iwl5000_hcmd,
33227 .utils = &iwl5000_hcmd_utils,
33228 };
33229
33230-static struct iwl_ops iwl5150_ops = {
33231+static const struct iwl_ops iwl5150_ops = {
33232 .ucode = &iwl5000_ucode,
33233 .lib = &iwl5150_lib,
33234 .hcmd = &iwl5000_hcmd,
33235diff -urNp linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-6000.c linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-6000.c
33236--- linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-6000.c 2011-03-27 14:31:47.000000000 -0400
33237+++ linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-6000.c 2011-04-17 15:56:46.000000000 -0400
33238@@ -146,7 +146,7 @@ static struct iwl_hcmd_utils_ops iwl6000
33239 .calc_rssi = iwl5000_calc_rssi,
33240 };
33241
33242-static struct iwl_ops iwl6000_ops = {
33243+static const struct iwl_ops iwl6000_ops = {
33244 .ucode = &iwl5000_ucode,
33245 .lib = &iwl6000_lib,
33246 .hcmd = &iwl5000_hcmd,
33247diff -urNp linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
33248--- linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-03-27 14:31:47.000000000 -0400
33249+++ linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-05-16 21:46:57.000000000 -0400
33250@@ -857,6 +857,8 @@ static void rs_tx_status(void *priv_r, s
33251 u8 active_index = 0;
33252 s32 tpt = 0;
33253
33254+ pax_track_stack();
33255+
33256 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
33257
33258 if (!ieee80211_is_data(hdr->frame_control) ||
33259@@ -2722,6 +2724,8 @@ static void rs_fill_link_cmd(struct iwl_
33260 u8 valid_tx_ant = 0;
33261 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
33262
33263+ pax_track_stack();
33264+
33265 /* Override starting rate (index 0) if needed for debug purposes */
33266 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
33267
33268diff -urNp linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-debugfs.c
33269--- linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-03-27 14:31:47.000000000 -0400
33270+++ linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-05-16 21:46:57.000000000 -0400
33271@@ -524,6 +524,8 @@ static ssize_t iwl_dbgfs_status_read(str
33272 int pos = 0;
33273 const size_t bufsz = sizeof(buf);
33274
33275+ pax_track_stack();
33276+
33277 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
33278 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
33279 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_SYNC_ACTIVE: %d\n",
33280@@ -658,6 +660,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
33281 const size_t bufsz = sizeof(buf);
33282 ssize_t ret;
33283
33284+ pax_track_stack();
33285+
33286 for (i = 0; i < AC_NUM; i++) {
33287 pos += scnprintf(buf + pos, bufsz - pos,
33288 "\tcw_min\tcw_max\taifsn\ttxop\n");
33289diff -urNp linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-debug.h linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-debug.h
33290--- linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-03-27 14:31:47.000000000 -0400
33291+++ linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-04-17 15:56:46.000000000 -0400
33292@@ -118,8 +118,8 @@ void iwl_dbgfs_unregister(struct iwl_pri
33293 #endif
33294
33295 #else
33296-#define IWL_DEBUG(__priv, level, fmt, args...)
33297-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
33298+#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
33299+#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
33300 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
33301 void *p, u32 len)
33302 {}
33303diff -urNp linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-dev.h linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-dev.h
33304--- linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-dev.h 2011-03-27 14:31:47.000000000 -0400
33305+++ linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-dev.h 2011-04-17 15:56:46.000000000 -0400
33306@@ -68,7 +68,7 @@ struct iwl_tx_queue;
33307
33308 /* shared structures from iwl-5000.c */
33309 extern struct iwl_mod_params iwl50_mod_params;
33310-extern struct iwl_ops iwl5000_ops;
33311+extern const struct iwl_ops iwl5000_ops;
33312 extern struct iwl_ucode_ops iwl5000_ucode;
33313 extern struct iwl_lib_ops iwl5000_lib;
33314 extern struct iwl_hcmd_ops iwl5000_hcmd;
33315diff -urNp linux-2.6.32.42/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-2.6.32.42/drivers/net/wireless/iwmc3200wifi/debugfs.c
33316--- linux-2.6.32.42/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-03-27 14:31:47.000000000 -0400
33317+++ linux-2.6.32.42/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-05-16 21:46:57.000000000 -0400
33318@@ -299,6 +299,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
33319 int buf_len = 512;
33320 size_t len = 0;
33321
33322+ pax_track_stack();
33323+
33324 if (*ppos != 0)
33325 return 0;
33326 if (count < sizeof(buf))
33327diff -urNp linux-2.6.32.42/drivers/net/wireless/libertas/debugfs.c linux-2.6.32.42/drivers/net/wireless/libertas/debugfs.c
33328--- linux-2.6.32.42/drivers/net/wireless/libertas/debugfs.c 2011-03-27 14:31:47.000000000 -0400
33329+++ linux-2.6.32.42/drivers/net/wireless/libertas/debugfs.c 2011-04-17 15:56:46.000000000 -0400
33330@@ -708,7 +708,7 @@ out_unlock:
33331 struct lbs_debugfs_files {
33332 const char *name;
33333 int perm;
33334- struct file_operations fops;
33335+ const struct file_operations fops;
33336 };
33337
33338 static const struct lbs_debugfs_files debugfs_files[] = {
33339diff -urNp linux-2.6.32.42/drivers/net/wireless/rndis_wlan.c linux-2.6.32.42/drivers/net/wireless/rndis_wlan.c
33340--- linux-2.6.32.42/drivers/net/wireless/rndis_wlan.c 2011-03-27 14:31:47.000000000 -0400
33341+++ linux-2.6.32.42/drivers/net/wireless/rndis_wlan.c 2011-04-17 15:56:46.000000000 -0400
33342@@ -1176,7 +1176,7 @@ static int set_rts_threshold(struct usbn
33343
33344 devdbg(usbdev, "set_rts_threshold %i", rts_threshold);
33345
33346- if (rts_threshold < 0 || rts_threshold > 2347)
33347+ if (rts_threshold > 2347)
33348 rts_threshold = 2347;
33349
33350 tmp = cpu_to_le32(rts_threshold);
33351diff -urNp linux-2.6.32.42/drivers/oprofile/buffer_sync.c linux-2.6.32.42/drivers/oprofile/buffer_sync.c
33352--- linux-2.6.32.42/drivers/oprofile/buffer_sync.c 2011-03-27 14:31:47.000000000 -0400
33353+++ linux-2.6.32.42/drivers/oprofile/buffer_sync.c 2011-04-17 15:56:46.000000000 -0400
33354@@ -341,7 +341,7 @@ static void add_data(struct op_entry *en
33355 if (cookie == NO_COOKIE)
33356 offset = pc;
33357 if (cookie == INVALID_COOKIE) {
33358- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
33359+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
33360 offset = pc;
33361 }
33362 if (cookie != last_cookie) {
33363@@ -385,14 +385,14 @@ add_sample(struct mm_struct *mm, struct
33364 /* add userspace sample */
33365
33366 if (!mm) {
33367- atomic_inc(&oprofile_stats.sample_lost_no_mm);
33368+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
33369 return 0;
33370 }
33371
33372 cookie = lookup_dcookie(mm, s->eip, &offset);
33373
33374 if (cookie == INVALID_COOKIE) {
33375- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
33376+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
33377 return 0;
33378 }
33379
33380@@ -561,7 +561,7 @@ void sync_buffer(int cpu)
33381 /* ignore backtraces if failed to add a sample */
33382 if (state == sb_bt_start) {
33383 state = sb_bt_ignore;
33384- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
33385+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
33386 }
33387 }
33388 release_mm(mm);
33389diff -urNp linux-2.6.32.42/drivers/oprofile/event_buffer.c linux-2.6.32.42/drivers/oprofile/event_buffer.c
33390--- linux-2.6.32.42/drivers/oprofile/event_buffer.c 2011-03-27 14:31:47.000000000 -0400
33391+++ linux-2.6.32.42/drivers/oprofile/event_buffer.c 2011-04-17 15:56:46.000000000 -0400
33392@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
33393 }
33394
33395 if (buffer_pos == buffer_size) {
33396- atomic_inc(&oprofile_stats.event_lost_overflow);
33397+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
33398 return;
33399 }
33400
33401diff -urNp linux-2.6.32.42/drivers/oprofile/oprof.c linux-2.6.32.42/drivers/oprofile/oprof.c
33402--- linux-2.6.32.42/drivers/oprofile/oprof.c 2011-03-27 14:31:47.000000000 -0400
33403+++ linux-2.6.32.42/drivers/oprofile/oprof.c 2011-04-17 15:56:46.000000000 -0400
33404@@ -110,7 +110,7 @@ static void switch_worker(struct work_st
33405 if (oprofile_ops.switch_events())
33406 return;
33407
33408- atomic_inc(&oprofile_stats.multiplex_counter);
33409+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
33410 start_switch_worker();
33411 }
33412
33413diff -urNp linux-2.6.32.42/drivers/oprofile/oprofilefs.c linux-2.6.32.42/drivers/oprofile/oprofilefs.c
33414--- linux-2.6.32.42/drivers/oprofile/oprofilefs.c 2011-03-27 14:31:47.000000000 -0400
33415+++ linux-2.6.32.42/drivers/oprofile/oprofilefs.c 2011-04-17 15:56:46.000000000 -0400
33416@@ -187,7 +187,7 @@ static const struct file_operations atom
33417
33418
33419 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
33420- char const *name, atomic_t *val)
33421+ char const *name, atomic_unchecked_t *val)
33422 {
33423 struct dentry *d = __oprofilefs_create_file(sb, root, name,
33424 &atomic_ro_fops, 0444);
33425diff -urNp linux-2.6.32.42/drivers/oprofile/oprofile_stats.c linux-2.6.32.42/drivers/oprofile/oprofile_stats.c
33426--- linux-2.6.32.42/drivers/oprofile/oprofile_stats.c 2011-03-27 14:31:47.000000000 -0400
33427+++ linux-2.6.32.42/drivers/oprofile/oprofile_stats.c 2011-04-17 15:56:46.000000000 -0400
33428@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
33429 cpu_buf->sample_invalid_eip = 0;
33430 }
33431
33432- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
33433- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
33434- atomic_set(&oprofile_stats.event_lost_overflow, 0);
33435- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
33436- atomic_set(&oprofile_stats.multiplex_counter, 0);
33437+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
33438+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
33439+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
33440+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
33441+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
33442 }
33443
33444
33445diff -urNp linux-2.6.32.42/drivers/oprofile/oprofile_stats.h linux-2.6.32.42/drivers/oprofile/oprofile_stats.h
33446--- linux-2.6.32.42/drivers/oprofile/oprofile_stats.h 2011-03-27 14:31:47.000000000 -0400
33447+++ linux-2.6.32.42/drivers/oprofile/oprofile_stats.h 2011-04-17 15:56:46.000000000 -0400
33448@@ -13,11 +13,11 @@
33449 #include <asm/atomic.h>
33450
33451 struct oprofile_stat_struct {
33452- atomic_t sample_lost_no_mm;
33453- atomic_t sample_lost_no_mapping;
33454- atomic_t bt_lost_no_mapping;
33455- atomic_t event_lost_overflow;
33456- atomic_t multiplex_counter;
33457+ atomic_unchecked_t sample_lost_no_mm;
33458+ atomic_unchecked_t sample_lost_no_mapping;
33459+ atomic_unchecked_t bt_lost_no_mapping;
33460+ atomic_unchecked_t event_lost_overflow;
33461+ atomic_unchecked_t multiplex_counter;
33462 };
33463
33464 extern struct oprofile_stat_struct oprofile_stats;
33465diff -urNp linux-2.6.32.42/drivers/parisc/pdc_stable.c linux-2.6.32.42/drivers/parisc/pdc_stable.c
33466--- linux-2.6.32.42/drivers/parisc/pdc_stable.c 2011-03-27 14:31:47.000000000 -0400
33467+++ linux-2.6.32.42/drivers/parisc/pdc_stable.c 2011-04-17 15:56:46.000000000 -0400
33468@@ -481,7 +481,7 @@ pdcspath_attr_store(struct kobject *kobj
33469 return ret;
33470 }
33471
33472-static struct sysfs_ops pdcspath_attr_ops = {
33473+static const struct sysfs_ops pdcspath_attr_ops = {
33474 .show = pdcspath_attr_show,
33475 .store = pdcspath_attr_store,
33476 };
33477diff -urNp linux-2.6.32.42/drivers/parport/procfs.c linux-2.6.32.42/drivers/parport/procfs.c
33478--- linux-2.6.32.42/drivers/parport/procfs.c 2011-03-27 14:31:47.000000000 -0400
33479+++ linux-2.6.32.42/drivers/parport/procfs.c 2011-04-17 15:56:46.000000000 -0400
33480@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
33481
33482 *ppos += len;
33483
33484- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
33485+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
33486 }
33487
33488 #ifdef CONFIG_PARPORT_1284
33489@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
33490
33491 *ppos += len;
33492
33493- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
33494+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
33495 }
33496 #endif /* IEEE1284.3 support. */
33497
33498diff -urNp linux-2.6.32.42/drivers/pci/hotplug/acpiphp_glue.c linux-2.6.32.42/drivers/pci/hotplug/acpiphp_glue.c
33499--- linux-2.6.32.42/drivers/pci/hotplug/acpiphp_glue.c 2011-03-27 14:31:47.000000000 -0400
33500+++ linux-2.6.32.42/drivers/pci/hotplug/acpiphp_glue.c 2011-04-17 15:56:46.000000000 -0400
33501@@ -111,7 +111,7 @@ static int post_dock_fixups(struct notif
33502 }
33503
33504
33505-static struct acpi_dock_ops acpiphp_dock_ops = {
33506+static const struct acpi_dock_ops acpiphp_dock_ops = {
33507 .handler = handle_hotplug_event_func,
33508 };
33509
33510diff -urNp linux-2.6.32.42/drivers/pci/hotplug/cpqphp_nvram.c linux-2.6.32.42/drivers/pci/hotplug/cpqphp_nvram.c
33511--- linux-2.6.32.42/drivers/pci/hotplug/cpqphp_nvram.c 2011-03-27 14:31:47.000000000 -0400
33512+++ linux-2.6.32.42/drivers/pci/hotplug/cpqphp_nvram.c 2011-04-17 15:56:46.000000000 -0400
33513@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
33514
33515 void compaq_nvram_init (void __iomem *rom_start)
33516 {
33517+
33518+#ifndef CONFIG_PAX_KERNEXEC
33519 if (rom_start) {
33520 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
33521 }
33522+#endif
33523+
33524 dbg("int15 entry = %p\n", compaq_int15_entry_point);
33525
33526 /* initialize our int15 lock */
33527diff -urNp linux-2.6.32.42/drivers/pci/hotplug/fakephp.c linux-2.6.32.42/drivers/pci/hotplug/fakephp.c
33528--- linux-2.6.32.42/drivers/pci/hotplug/fakephp.c 2011-03-27 14:31:47.000000000 -0400
33529+++ linux-2.6.32.42/drivers/pci/hotplug/fakephp.c 2011-04-17 15:56:46.000000000 -0400
33530@@ -73,7 +73,7 @@ static void legacy_release(struct kobjec
33531 }
33532
33533 static struct kobj_type legacy_ktype = {
33534- .sysfs_ops = &(struct sysfs_ops){
33535+ .sysfs_ops = &(const struct sysfs_ops){
33536 .store = legacy_store, .show = legacy_show
33537 },
33538 .release = &legacy_release,
33539diff -urNp linux-2.6.32.42/drivers/pci/intel-iommu.c linux-2.6.32.42/drivers/pci/intel-iommu.c
33540--- linux-2.6.32.42/drivers/pci/intel-iommu.c 2011-05-10 22:12:01.000000000 -0400
33541+++ linux-2.6.32.42/drivers/pci/intel-iommu.c 2011-05-10 22:12:33.000000000 -0400
33542@@ -2643,7 +2643,7 @@ error:
33543 return 0;
33544 }
33545
33546-static dma_addr_t intel_map_page(struct device *dev, struct page *page,
33547+dma_addr_t intel_map_page(struct device *dev, struct page *page,
33548 unsigned long offset, size_t size,
33549 enum dma_data_direction dir,
33550 struct dma_attrs *attrs)
33551@@ -2719,7 +2719,7 @@ static void add_unmap(struct dmar_domain
33552 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
33553 }
33554
33555-static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
33556+void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
33557 size_t size, enum dma_data_direction dir,
33558 struct dma_attrs *attrs)
33559 {
33560@@ -2768,7 +2768,7 @@ static void intel_unmap_page(struct devi
33561 }
33562 }
33563
33564-static void *intel_alloc_coherent(struct device *hwdev, size_t size,
33565+void *intel_alloc_coherent(struct device *hwdev, size_t size,
33566 dma_addr_t *dma_handle, gfp_t flags)
33567 {
33568 void *vaddr;
33569@@ -2800,7 +2800,7 @@ static void *intel_alloc_coherent(struct
33570 return NULL;
33571 }
33572
33573-static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
33574+void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
33575 dma_addr_t dma_handle)
33576 {
33577 int order;
33578@@ -2812,7 +2812,7 @@ static void intel_free_coherent(struct d
33579 free_pages((unsigned long)vaddr, order);
33580 }
33581
33582-static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
33583+void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
33584 int nelems, enum dma_data_direction dir,
33585 struct dma_attrs *attrs)
33586 {
33587@@ -2872,7 +2872,7 @@ static int intel_nontranslate_map_sg(str
33588 return nelems;
33589 }
33590
33591-static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
33592+int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
33593 enum dma_data_direction dir, struct dma_attrs *attrs)
33594 {
33595 int i;
33596@@ -2941,12 +2941,12 @@ static int intel_map_sg(struct device *h
33597 return nelems;
33598 }
33599
33600-static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
33601+int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
33602 {
33603 return !dma_addr;
33604 }
33605
33606-struct dma_map_ops intel_dma_ops = {
33607+const struct dma_map_ops intel_dma_ops = {
33608 .alloc_coherent = intel_alloc_coherent,
33609 .free_coherent = intel_free_coherent,
33610 .map_sg = intel_map_sg,
33611diff -urNp linux-2.6.32.42/drivers/pci/pcie/aspm.c linux-2.6.32.42/drivers/pci/pcie/aspm.c
33612--- linux-2.6.32.42/drivers/pci/pcie/aspm.c 2011-03-27 14:31:47.000000000 -0400
33613+++ linux-2.6.32.42/drivers/pci/pcie/aspm.c 2011-04-17 15:56:46.000000000 -0400
33614@@ -27,9 +27,9 @@
33615 #define MODULE_PARAM_PREFIX "pcie_aspm."
33616
33617 /* Note: those are not register definitions */
33618-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
33619-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
33620-#define ASPM_STATE_L1 (4) /* L1 state */
33621+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
33622+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
33623+#define ASPM_STATE_L1 (4U) /* L1 state */
33624 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
33625 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
33626
33627diff -urNp linux-2.6.32.42/drivers/pci/probe.c linux-2.6.32.42/drivers/pci/probe.c
33628--- linux-2.6.32.42/drivers/pci/probe.c 2011-03-27 14:31:47.000000000 -0400
33629+++ linux-2.6.32.42/drivers/pci/probe.c 2011-04-17 15:56:46.000000000 -0400
33630@@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(
33631 return ret;
33632 }
33633
33634-static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
33635+static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
33636 struct device_attribute *attr,
33637 char *buf)
33638 {
33639 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
33640 }
33641
33642-static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
33643+static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
33644 struct device_attribute *attr,
33645 char *buf)
33646 {
33647diff -urNp linux-2.6.32.42/drivers/pci/proc.c linux-2.6.32.42/drivers/pci/proc.c
33648--- linux-2.6.32.42/drivers/pci/proc.c 2011-03-27 14:31:47.000000000 -0400
33649+++ linux-2.6.32.42/drivers/pci/proc.c 2011-04-17 15:56:46.000000000 -0400
33650@@ -480,7 +480,16 @@ static const struct file_operations proc
33651 static int __init pci_proc_init(void)
33652 {
33653 struct pci_dev *dev = NULL;
33654+
33655+#ifdef CONFIG_GRKERNSEC_PROC_ADD
33656+#ifdef CONFIG_GRKERNSEC_PROC_USER
33657+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
33658+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
33659+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
33660+#endif
33661+#else
33662 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
33663+#endif
33664 proc_create("devices", 0, proc_bus_pci_dir,
33665 &proc_bus_pci_dev_operations);
33666 proc_initialized = 1;
33667diff -urNp linux-2.6.32.42/drivers/pci/slot.c linux-2.6.32.42/drivers/pci/slot.c
33668--- linux-2.6.32.42/drivers/pci/slot.c 2011-03-27 14:31:47.000000000 -0400
33669+++ linux-2.6.32.42/drivers/pci/slot.c 2011-04-17 15:56:46.000000000 -0400
33670@@ -29,7 +29,7 @@ static ssize_t pci_slot_attr_store(struc
33671 return attribute->store ? attribute->store(slot, buf, len) : -EIO;
33672 }
33673
33674-static struct sysfs_ops pci_slot_sysfs_ops = {
33675+static const struct sysfs_ops pci_slot_sysfs_ops = {
33676 .show = pci_slot_attr_show,
33677 .store = pci_slot_attr_store,
33678 };
33679diff -urNp linux-2.6.32.42/drivers/pcmcia/pcmcia_ioctl.c linux-2.6.32.42/drivers/pcmcia/pcmcia_ioctl.c
33680--- linux-2.6.32.42/drivers/pcmcia/pcmcia_ioctl.c 2011-03-27 14:31:47.000000000 -0400
33681+++ linux-2.6.32.42/drivers/pcmcia/pcmcia_ioctl.c 2011-04-17 15:56:46.000000000 -0400
33682@@ -819,7 +819,7 @@ static int ds_ioctl(struct inode * inode
33683 return -EFAULT;
33684 }
33685 }
33686- buf = kmalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
33687+ buf = kzalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
33688 if (!buf)
33689 return -ENOMEM;
33690
33691diff -urNp linux-2.6.32.42/drivers/platform/x86/acer-wmi.c linux-2.6.32.42/drivers/platform/x86/acer-wmi.c
33692--- linux-2.6.32.42/drivers/platform/x86/acer-wmi.c 2011-03-27 14:31:47.000000000 -0400
33693+++ linux-2.6.32.42/drivers/platform/x86/acer-wmi.c 2011-04-17 15:56:46.000000000 -0400
33694@@ -918,7 +918,7 @@ static int update_bl_status(struct backl
33695 return 0;
33696 }
33697
33698-static struct backlight_ops acer_bl_ops = {
33699+static const struct backlight_ops acer_bl_ops = {
33700 .get_brightness = read_brightness,
33701 .update_status = update_bl_status,
33702 };
33703diff -urNp linux-2.6.32.42/drivers/platform/x86/asus_acpi.c linux-2.6.32.42/drivers/platform/x86/asus_acpi.c
33704--- linux-2.6.32.42/drivers/platform/x86/asus_acpi.c 2011-03-27 14:31:47.000000000 -0400
33705+++ linux-2.6.32.42/drivers/platform/x86/asus_acpi.c 2011-04-17 15:56:46.000000000 -0400
33706@@ -1396,7 +1396,7 @@ static int asus_hotk_remove(struct acpi_
33707 return 0;
33708 }
33709
33710-static struct backlight_ops asus_backlight_data = {
33711+static const struct backlight_ops asus_backlight_data = {
33712 .get_brightness = read_brightness,
33713 .update_status = set_brightness_status,
33714 };
33715diff -urNp linux-2.6.32.42/drivers/platform/x86/asus-laptop.c linux-2.6.32.42/drivers/platform/x86/asus-laptop.c
33716--- linux-2.6.32.42/drivers/platform/x86/asus-laptop.c 2011-03-27 14:31:47.000000000 -0400
33717+++ linux-2.6.32.42/drivers/platform/x86/asus-laptop.c 2011-04-17 15:56:46.000000000 -0400
33718@@ -250,7 +250,7 @@ static struct backlight_device *asus_bac
33719 */
33720 static int read_brightness(struct backlight_device *bd);
33721 static int update_bl_status(struct backlight_device *bd);
33722-static struct backlight_ops asusbl_ops = {
33723+static const struct backlight_ops asusbl_ops = {
33724 .get_brightness = read_brightness,
33725 .update_status = update_bl_status,
33726 };
33727diff -urNp linux-2.6.32.42/drivers/platform/x86/compal-laptop.c linux-2.6.32.42/drivers/platform/x86/compal-laptop.c
33728--- linux-2.6.32.42/drivers/platform/x86/compal-laptop.c 2011-03-27 14:31:47.000000000 -0400
33729+++ linux-2.6.32.42/drivers/platform/x86/compal-laptop.c 2011-04-17 15:56:46.000000000 -0400
33730@@ -163,7 +163,7 @@ static int bl_update_status(struct backl
33731 return set_lcd_level(b->props.brightness);
33732 }
33733
33734-static struct backlight_ops compalbl_ops = {
33735+static const struct backlight_ops compalbl_ops = {
33736 .get_brightness = bl_get_brightness,
33737 .update_status = bl_update_status,
33738 };
33739diff -urNp linux-2.6.32.42/drivers/platform/x86/dell-laptop.c linux-2.6.32.42/drivers/platform/x86/dell-laptop.c
33740--- linux-2.6.32.42/drivers/platform/x86/dell-laptop.c 2011-05-10 22:12:01.000000000 -0400
33741+++ linux-2.6.32.42/drivers/platform/x86/dell-laptop.c 2011-05-10 22:12:33.000000000 -0400
33742@@ -318,7 +318,7 @@ static int dell_get_intensity(struct bac
33743 return buffer.output[1];
33744 }
33745
33746-static struct backlight_ops dell_ops = {
33747+static const struct backlight_ops dell_ops = {
33748 .get_brightness = dell_get_intensity,
33749 .update_status = dell_send_intensity,
33750 };
33751diff -urNp linux-2.6.32.42/drivers/platform/x86/eeepc-laptop.c linux-2.6.32.42/drivers/platform/x86/eeepc-laptop.c
33752--- linux-2.6.32.42/drivers/platform/x86/eeepc-laptop.c 2011-03-27 14:31:47.000000000 -0400
33753+++ linux-2.6.32.42/drivers/platform/x86/eeepc-laptop.c 2011-04-17 15:56:46.000000000 -0400
33754@@ -245,7 +245,7 @@ static struct device *eeepc_hwmon_device
33755 */
33756 static int read_brightness(struct backlight_device *bd);
33757 static int update_bl_status(struct backlight_device *bd);
33758-static struct backlight_ops eeepcbl_ops = {
33759+static const struct backlight_ops eeepcbl_ops = {
33760 .get_brightness = read_brightness,
33761 .update_status = update_bl_status,
33762 };
33763diff -urNp linux-2.6.32.42/drivers/platform/x86/fujitsu-laptop.c linux-2.6.32.42/drivers/platform/x86/fujitsu-laptop.c
33764--- linux-2.6.32.42/drivers/platform/x86/fujitsu-laptop.c 2011-03-27 14:31:47.000000000 -0400
33765+++ linux-2.6.32.42/drivers/platform/x86/fujitsu-laptop.c 2011-04-17 15:56:46.000000000 -0400
33766@@ -436,7 +436,7 @@ static int bl_update_status(struct backl
33767 return ret;
33768 }
33769
33770-static struct backlight_ops fujitsubl_ops = {
33771+static const struct backlight_ops fujitsubl_ops = {
33772 .get_brightness = bl_get_brightness,
33773 .update_status = bl_update_status,
33774 };
33775diff -urNp linux-2.6.32.42/drivers/platform/x86/msi-laptop.c linux-2.6.32.42/drivers/platform/x86/msi-laptop.c
33776--- linux-2.6.32.42/drivers/platform/x86/msi-laptop.c 2011-03-27 14:31:47.000000000 -0400
33777+++ linux-2.6.32.42/drivers/platform/x86/msi-laptop.c 2011-04-17 15:56:46.000000000 -0400
33778@@ -161,7 +161,7 @@ static int bl_update_status(struct backl
33779 return set_lcd_level(b->props.brightness);
33780 }
33781
33782-static struct backlight_ops msibl_ops = {
33783+static const struct backlight_ops msibl_ops = {
33784 .get_brightness = bl_get_brightness,
33785 .update_status = bl_update_status,
33786 };
33787diff -urNp linux-2.6.32.42/drivers/platform/x86/panasonic-laptop.c linux-2.6.32.42/drivers/platform/x86/panasonic-laptop.c
33788--- linux-2.6.32.42/drivers/platform/x86/panasonic-laptop.c 2011-03-27 14:31:47.000000000 -0400
33789+++ linux-2.6.32.42/drivers/platform/x86/panasonic-laptop.c 2011-04-17 15:56:46.000000000 -0400
33790@@ -352,7 +352,7 @@ static int bl_set_status(struct backligh
33791 return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright);
33792 }
33793
33794-static struct backlight_ops pcc_backlight_ops = {
33795+static const struct backlight_ops pcc_backlight_ops = {
33796 .get_brightness = bl_get,
33797 .update_status = bl_set_status,
33798 };
33799diff -urNp linux-2.6.32.42/drivers/platform/x86/sony-laptop.c linux-2.6.32.42/drivers/platform/x86/sony-laptop.c
33800--- linux-2.6.32.42/drivers/platform/x86/sony-laptop.c 2011-03-27 14:31:47.000000000 -0400
33801+++ linux-2.6.32.42/drivers/platform/x86/sony-laptop.c 2011-04-17 15:56:46.000000000 -0400
33802@@ -850,7 +850,7 @@ static int sony_backlight_get_brightness
33803 }
33804
33805 static struct backlight_device *sony_backlight_device;
33806-static struct backlight_ops sony_backlight_ops = {
33807+static const struct backlight_ops sony_backlight_ops = {
33808 .update_status = sony_backlight_update_status,
33809 .get_brightness = sony_backlight_get_brightness,
33810 };
33811diff -urNp linux-2.6.32.42/drivers/platform/x86/thinkpad_acpi.c linux-2.6.32.42/drivers/platform/x86/thinkpad_acpi.c
33812--- linux-2.6.32.42/drivers/platform/x86/thinkpad_acpi.c 2011-03-27 14:31:47.000000000 -0400
33813+++ linux-2.6.32.42/drivers/platform/x86/thinkpad_acpi.c 2011-04-17 15:56:46.000000000 -0400
33814@@ -6122,7 +6122,7 @@ static void tpacpi_brightness_notify_cha
33815 BACKLIGHT_UPDATE_HOTKEY);
33816 }
33817
33818-static struct backlight_ops ibm_backlight_data = {
33819+static const struct backlight_ops ibm_backlight_data = {
33820 .get_brightness = brightness_get,
33821 .update_status = brightness_update_status,
33822 };
33823diff -urNp linux-2.6.32.42/drivers/platform/x86/toshiba_acpi.c linux-2.6.32.42/drivers/platform/x86/toshiba_acpi.c
33824--- linux-2.6.32.42/drivers/platform/x86/toshiba_acpi.c 2011-03-27 14:31:47.000000000 -0400
33825+++ linux-2.6.32.42/drivers/platform/x86/toshiba_acpi.c 2011-04-17 15:56:46.000000000 -0400
33826@@ -671,7 +671,7 @@ static acpi_status remove_device(void)
33827 return AE_OK;
33828 }
33829
33830-static struct backlight_ops toshiba_backlight_data = {
33831+static const struct backlight_ops toshiba_backlight_data = {
33832 .get_brightness = get_lcd,
33833 .update_status = set_lcd_status,
33834 };
33835diff -urNp linux-2.6.32.42/drivers/pnp/pnpbios/bioscalls.c linux-2.6.32.42/drivers/pnp/pnpbios/bioscalls.c
33836--- linux-2.6.32.42/drivers/pnp/pnpbios/bioscalls.c 2011-03-27 14:31:47.000000000 -0400
33837+++ linux-2.6.32.42/drivers/pnp/pnpbios/bioscalls.c 2011-04-17 15:56:46.000000000 -0400
33838@@ -60,7 +60,7 @@ do { \
33839 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
33840 } while(0)
33841
33842-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
33843+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
33844 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
33845
33846 /*
33847@@ -97,7 +97,10 @@ static inline u16 call_pnp_bios(u16 func
33848
33849 cpu = get_cpu();
33850 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
33851+
33852+ pax_open_kernel();
33853 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
33854+ pax_close_kernel();
33855
33856 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
33857 spin_lock_irqsave(&pnp_bios_lock, flags);
33858@@ -135,7 +138,10 @@ static inline u16 call_pnp_bios(u16 func
33859 :"memory");
33860 spin_unlock_irqrestore(&pnp_bios_lock, flags);
33861
33862+ pax_open_kernel();
33863 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
33864+ pax_close_kernel();
33865+
33866 put_cpu();
33867
33868 /* If we get here and this is set then the PnP BIOS faulted on us. */
33869@@ -469,7 +475,7 @@ int pnp_bios_read_escd(char *data, u32 n
33870 return status;
33871 }
33872
33873-void pnpbios_calls_init(union pnp_bios_install_struct *header)
33874+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
33875 {
33876 int i;
33877
33878@@ -477,6 +483,8 @@ void pnpbios_calls_init(union pnp_bios_i
33879 pnp_bios_callpoint.offset = header->fields.pm16offset;
33880 pnp_bios_callpoint.segment = PNP_CS16;
33881
33882+ pax_open_kernel();
33883+
33884 for_each_possible_cpu(i) {
33885 struct desc_struct *gdt = get_cpu_gdt_table(i);
33886 if (!gdt)
33887@@ -488,4 +496,6 @@ void pnpbios_calls_init(union pnp_bios_i
33888 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
33889 (unsigned long)__va(header->fields.pm16dseg));
33890 }
33891+
33892+ pax_close_kernel();
33893 }
33894diff -urNp linux-2.6.32.42/drivers/pnp/resource.c linux-2.6.32.42/drivers/pnp/resource.c
33895--- linux-2.6.32.42/drivers/pnp/resource.c 2011-03-27 14:31:47.000000000 -0400
33896+++ linux-2.6.32.42/drivers/pnp/resource.c 2011-04-17 15:56:46.000000000 -0400
33897@@ -355,7 +355,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
33898 return 1;
33899
33900 /* check if the resource is valid */
33901- if (*irq < 0 || *irq > 15)
33902+ if (*irq > 15)
33903 return 0;
33904
33905 /* check if the resource is reserved */
33906@@ -419,7 +419,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
33907 return 1;
33908
33909 /* check if the resource is valid */
33910- if (*dma < 0 || *dma == 4 || *dma > 7)
33911+ if (*dma == 4 || *dma > 7)
33912 return 0;
33913
33914 /* check if the resource is reserved */
33915diff -urNp linux-2.6.32.42/drivers/rtc/rtc-dev.c linux-2.6.32.42/drivers/rtc/rtc-dev.c
33916--- linux-2.6.32.42/drivers/rtc/rtc-dev.c 2011-03-27 14:31:47.000000000 -0400
33917+++ linux-2.6.32.42/drivers/rtc/rtc-dev.c 2011-04-17 15:56:46.000000000 -0400
33918@@ -14,6 +14,7 @@
33919 #include <linux/module.h>
33920 #include <linux/rtc.h>
33921 #include <linux/sched.h>
33922+#include <linux/grsecurity.h>
33923 #include "rtc-core.h"
33924
33925 static dev_t rtc_devt;
33926@@ -357,6 +358,8 @@ static long rtc_dev_ioctl(struct file *f
33927 if (copy_from_user(&tm, uarg, sizeof(tm)))
33928 return -EFAULT;
33929
33930+ gr_log_timechange();
33931+
33932 return rtc_set_time(rtc, &tm);
33933
33934 case RTC_PIE_ON:
33935diff -urNp linux-2.6.32.42/drivers/s390/cio/qdio_perf.c linux-2.6.32.42/drivers/s390/cio/qdio_perf.c
33936--- linux-2.6.32.42/drivers/s390/cio/qdio_perf.c 2011-03-27 14:31:47.000000000 -0400
33937+++ linux-2.6.32.42/drivers/s390/cio/qdio_perf.c 2011-04-17 15:56:46.000000000 -0400
33938@@ -31,51 +31,51 @@ static struct proc_dir_entry *qdio_perf_
33939 static int qdio_perf_proc_show(struct seq_file *m, void *v)
33940 {
33941 seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
33942- (long)atomic_long_read(&perf_stats.qdio_int));
33943+ (long)atomic_long_read_unchecked(&perf_stats.qdio_int));
33944 seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
33945- (long)atomic_long_read(&perf_stats.pci_int));
33946+ (long)atomic_long_read_unchecked(&perf_stats.pci_int));
33947 seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
33948- (long)atomic_long_read(&perf_stats.thin_int));
33949+ (long)atomic_long_read_unchecked(&perf_stats.thin_int));
33950 seq_printf(m, "\n");
33951 seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
33952- (long)atomic_long_read(&perf_stats.tasklet_inbound));
33953+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_inbound));
33954 seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
33955- (long)atomic_long_read(&perf_stats.tasklet_outbound));
33956+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_outbound));
33957 seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
33958- (long)atomic_long_read(&perf_stats.tasklet_thinint),
33959- (long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
33960+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint),
33961+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint_loop));
33962 seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
33963- (long)atomic_long_read(&perf_stats.thinint_inbound),
33964- (long)atomic_long_read(&perf_stats.thinint_inbound_loop));
33965+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound),
33966+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop));
33967 seq_printf(m, "\n");
33968 seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
33969- (long)atomic_long_read(&perf_stats.siga_in));
33970+ (long)atomic_long_read_unchecked(&perf_stats.siga_in));
33971 seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
33972- (long)atomic_long_read(&perf_stats.siga_out));
33973+ (long)atomic_long_read_unchecked(&perf_stats.siga_out));
33974 seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
33975- (long)atomic_long_read(&perf_stats.siga_sync));
33976+ (long)atomic_long_read_unchecked(&perf_stats.siga_sync));
33977 seq_printf(m, "\n");
33978 seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
33979- (long)atomic_long_read(&perf_stats.inbound_handler));
33980+ (long)atomic_long_read_unchecked(&perf_stats.inbound_handler));
33981 seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
33982- (long)atomic_long_read(&perf_stats.outbound_handler));
33983+ (long)atomic_long_read_unchecked(&perf_stats.outbound_handler));
33984 seq_printf(m, "\n");
33985 seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
33986- (long)atomic_long_read(&perf_stats.fast_requeue));
33987+ (long)atomic_long_read_unchecked(&perf_stats.fast_requeue));
33988 seq_printf(m, "Number of outbound target full condition\t: %li\n",
33989- (long)atomic_long_read(&perf_stats.outbound_target_full));
33990+ (long)atomic_long_read_unchecked(&perf_stats.outbound_target_full));
33991 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
33992- (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
33993+ (long)atomic_long_read_unchecked(&perf_stats.debug_tl_out_timer));
33994 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
33995- (long)atomic_long_read(&perf_stats.debug_stop_polling));
33996+ (long)atomic_long_read_unchecked(&perf_stats.debug_stop_polling));
33997 seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
33998- (long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
33999+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop2));
34000 seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n",
34001- (long)atomic_long_read(&perf_stats.debug_eqbs_all),
34002- (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete));
34003+ (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_all),
34004+ (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_incomplete));
34005 seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n",
34006- (long)atomic_long_read(&perf_stats.debug_sqbs_all),
34007- (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete));
34008+ (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_all),
34009+ (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_incomplete));
34010 seq_printf(m, "\n");
34011 return 0;
34012 }
34013diff -urNp linux-2.6.32.42/drivers/s390/cio/qdio_perf.h linux-2.6.32.42/drivers/s390/cio/qdio_perf.h
34014--- linux-2.6.32.42/drivers/s390/cio/qdio_perf.h 2011-03-27 14:31:47.000000000 -0400
34015+++ linux-2.6.32.42/drivers/s390/cio/qdio_perf.h 2011-04-17 15:56:46.000000000 -0400
34016@@ -13,46 +13,46 @@
34017
34018 struct qdio_perf_stats {
34019 /* interrupt handler calls */
34020- atomic_long_t qdio_int;
34021- atomic_long_t pci_int;
34022- atomic_long_t thin_int;
34023+ atomic_long_unchecked_t qdio_int;
34024+ atomic_long_unchecked_t pci_int;
34025+ atomic_long_unchecked_t thin_int;
34026
34027 /* tasklet runs */
34028- atomic_long_t tasklet_inbound;
34029- atomic_long_t tasklet_outbound;
34030- atomic_long_t tasklet_thinint;
34031- atomic_long_t tasklet_thinint_loop;
34032- atomic_long_t thinint_inbound;
34033- atomic_long_t thinint_inbound_loop;
34034- atomic_long_t thinint_inbound_loop2;
34035+ atomic_long_unchecked_t tasklet_inbound;
34036+ atomic_long_unchecked_t tasklet_outbound;
34037+ atomic_long_unchecked_t tasklet_thinint;
34038+ atomic_long_unchecked_t tasklet_thinint_loop;
34039+ atomic_long_unchecked_t thinint_inbound;
34040+ atomic_long_unchecked_t thinint_inbound_loop;
34041+ atomic_long_unchecked_t thinint_inbound_loop2;
34042
34043 /* signal adapter calls */
34044- atomic_long_t siga_out;
34045- atomic_long_t siga_in;
34046- atomic_long_t siga_sync;
34047+ atomic_long_unchecked_t siga_out;
34048+ atomic_long_unchecked_t siga_in;
34049+ atomic_long_unchecked_t siga_sync;
34050
34051 /* misc */
34052- atomic_long_t inbound_handler;
34053- atomic_long_t outbound_handler;
34054- atomic_long_t fast_requeue;
34055- atomic_long_t outbound_target_full;
34056+ atomic_long_unchecked_t inbound_handler;
34057+ atomic_long_unchecked_t outbound_handler;
34058+ atomic_long_unchecked_t fast_requeue;
34059+ atomic_long_unchecked_t outbound_target_full;
34060
34061 /* for debugging */
34062- atomic_long_t debug_tl_out_timer;
34063- atomic_long_t debug_stop_polling;
34064- atomic_long_t debug_eqbs_all;
34065- atomic_long_t debug_eqbs_incomplete;
34066- atomic_long_t debug_sqbs_all;
34067- atomic_long_t debug_sqbs_incomplete;
34068+ atomic_long_unchecked_t debug_tl_out_timer;
34069+ atomic_long_unchecked_t debug_stop_polling;
34070+ atomic_long_unchecked_t debug_eqbs_all;
34071+ atomic_long_unchecked_t debug_eqbs_incomplete;
34072+ atomic_long_unchecked_t debug_sqbs_all;
34073+ atomic_long_unchecked_t debug_sqbs_incomplete;
34074 };
34075
34076 extern struct qdio_perf_stats perf_stats;
34077 extern int qdio_performance_stats;
34078
34079-static inline void qdio_perf_stat_inc(atomic_long_t *count)
34080+static inline void qdio_perf_stat_inc(atomic_long_unchecked_t *count)
34081 {
34082 if (qdio_performance_stats)
34083- atomic_long_inc(count);
34084+ atomic_long_inc_unchecked(count);
34085 }
34086
34087 int qdio_setup_perf_stats(void);
34088diff -urNp linux-2.6.32.42/drivers/scsi/aacraid/commctrl.c linux-2.6.32.42/drivers/scsi/aacraid/commctrl.c
34089--- linux-2.6.32.42/drivers/scsi/aacraid/commctrl.c 2011-03-27 14:31:47.000000000 -0400
34090+++ linux-2.6.32.42/drivers/scsi/aacraid/commctrl.c 2011-05-16 21:46:57.000000000 -0400
34091@@ -481,6 +481,7 @@ static int aac_send_raw_srb(struct aac_d
34092 u32 actual_fibsize64, actual_fibsize = 0;
34093 int i;
34094
34095+ pax_track_stack();
34096
34097 if (dev->in_reset) {
34098 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
34099diff -urNp linux-2.6.32.42/drivers/scsi/aic94xx/aic94xx_init.c linux-2.6.32.42/drivers/scsi/aic94xx/aic94xx_init.c
34100--- linux-2.6.32.42/drivers/scsi/aic94xx/aic94xx_init.c 2011-03-27 14:31:47.000000000 -0400
34101+++ linux-2.6.32.42/drivers/scsi/aic94xx/aic94xx_init.c 2011-04-17 15:56:46.000000000 -0400
34102@@ -485,7 +485,7 @@ static ssize_t asd_show_update_bios(stru
34103 flash_error_table[i].reason);
34104 }
34105
34106-static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
34107+static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
34108 asd_show_update_bios, asd_store_update_bios);
34109
34110 static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
34111diff -urNp linux-2.6.32.42/drivers/scsi/BusLogic.c linux-2.6.32.42/drivers/scsi/BusLogic.c
34112--- linux-2.6.32.42/drivers/scsi/BusLogic.c 2011-03-27 14:31:47.000000000 -0400
34113+++ linux-2.6.32.42/drivers/scsi/BusLogic.c 2011-05-16 21:46:57.000000000 -0400
34114@@ -961,6 +961,8 @@ static int __init BusLogic_InitializeFla
34115 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
34116 *PrototypeHostAdapter)
34117 {
34118+ pax_track_stack();
34119+
34120 /*
34121 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
34122 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
34123diff -urNp linux-2.6.32.42/drivers/scsi/dpt_i2o.c linux-2.6.32.42/drivers/scsi/dpt_i2o.c
34124--- linux-2.6.32.42/drivers/scsi/dpt_i2o.c 2011-03-27 14:31:47.000000000 -0400
34125+++ linux-2.6.32.42/drivers/scsi/dpt_i2o.c 2011-05-16 21:46:57.000000000 -0400
34126@@ -1804,6 +1804,8 @@ static int adpt_i2o_passthru(adpt_hba* p
34127 dma_addr_t addr;
34128 ulong flags = 0;
34129
34130+ pax_track_stack();
34131+
34132 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
34133 // get user msg size in u32s
34134 if(get_user(size, &user_msg[0])){
34135@@ -2297,6 +2299,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
34136 s32 rcode;
34137 dma_addr_t addr;
34138
34139+ pax_track_stack();
34140+
34141 memset(msg, 0 , sizeof(msg));
34142 len = scsi_bufflen(cmd);
34143 direction = 0x00000000;
34144diff -urNp linux-2.6.32.42/drivers/scsi/eata.c linux-2.6.32.42/drivers/scsi/eata.c
34145--- linux-2.6.32.42/drivers/scsi/eata.c 2011-03-27 14:31:47.000000000 -0400
34146+++ linux-2.6.32.42/drivers/scsi/eata.c 2011-05-16 21:46:57.000000000 -0400
34147@@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
34148 struct hostdata *ha;
34149 char name[16];
34150
34151+ pax_track_stack();
34152+
34153 sprintf(name, "%s%d", driver_name, j);
34154
34155 if (!request_region(port_base, REGION_SIZE, driver_name)) {
34156diff -urNp linux-2.6.32.42/drivers/scsi/fcoe/libfcoe.c linux-2.6.32.42/drivers/scsi/fcoe/libfcoe.c
34157--- linux-2.6.32.42/drivers/scsi/fcoe/libfcoe.c 2011-03-27 14:31:47.000000000 -0400
34158+++ linux-2.6.32.42/drivers/scsi/fcoe/libfcoe.c 2011-05-16 21:46:57.000000000 -0400
34159@@ -809,6 +809,8 @@ static void fcoe_ctlr_recv_els(struct fc
34160 size_t rlen;
34161 size_t dlen;
34162
34163+ pax_track_stack();
34164+
34165 fiph = (struct fip_header *)skb->data;
34166 sub = fiph->fip_subcode;
34167 if (sub != FIP_SC_REQ && sub != FIP_SC_REP)
34168diff -urNp linux-2.6.32.42/drivers/scsi/gdth.c linux-2.6.32.42/drivers/scsi/gdth.c
34169--- linux-2.6.32.42/drivers/scsi/gdth.c 2011-03-27 14:31:47.000000000 -0400
34170+++ linux-2.6.32.42/drivers/scsi/gdth.c 2011-05-16 21:46:57.000000000 -0400
34171@@ -4102,6 +4102,8 @@ static int ioc_lockdrv(void __user *arg)
34172 ulong flags;
34173 gdth_ha_str *ha;
34174
34175+ pax_track_stack();
34176+
34177 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
34178 return -EFAULT;
34179 ha = gdth_find_ha(ldrv.ionode);
34180@@ -4134,6 +4136,8 @@ static int ioc_resetdrv(void __user *arg
34181 gdth_ha_str *ha;
34182 int rval;
34183
34184+ pax_track_stack();
34185+
34186 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
34187 res.number >= MAX_HDRIVES)
34188 return -EFAULT;
34189@@ -4169,6 +4173,8 @@ static int ioc_general(void __user *arg,
34190 gdth_ha_str *ha;
34191 int rval;
34192
34193+ pax_track_stack();
34194+
34195 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
34196 return -EFAULT;
34197 ha = gdth_find_ha(gen.ionode);
34198@@ -4625,6 +4631,9 @@ static void gdth_flush(gdth_ha_str *ha)
34199 int i;
34200 gdth_cmd_str gdtcmd;
34201 char cmnd[MAX_COMMAND_SIZE];
34202+
34203+ pax_track_stack();
34204+
34205 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
34206
34207 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
34208diff -urNp linux-2.6.32.42/drivers/scsi/gdth_proc.c linux-2.6.32.42/drivers/scsi/gdth_proc.c
34209--- linux-2.6.32.42/drivers/scsi/gdth_proc.c 2011-03-27 14:31:47.000000000 -0400
34210+++ linux-2.6.32.42/drivers/scsi/gdth_proc.c 2011-05-16 21:46:57.000000000 -0400
34211@@ -46,6 +46,9 @@ static int gdth_set_asc_info(struct Scsi
34212 ulong64 paddr;
34213
34214 char cmnd[MAX_COMMAND_SIZE];
34215+
34216+ pax_track_stack();
34217+
34218 memset(cmnd, 0xff, 12);
34219 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
34220
34221@@ -174,6 +177,8 @@ static int gdth_get_info(char *buffer,ch
34222 gdth_hget_str *phg;
34223 char cmnd[MAX_COMMAND_SIZE];
34224
34225+ pax_track_stack();
34226+
34227 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
34228 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
34229 if (!gdtcmd || !estr)
34230diff -urNp linux-2.6.32.42/drivers/scsi/hosts.c linux-2.6.32.42/drivers/scsi/hosts.c
34231--- linux-2.6.32.42/drivers/scsi/hosts.c 2011-03-27 14:31:47.000000000 -0400
34232+++ linux-2.6.32.42/drivers/scsi/hosts.c 2011-05-04 17:56:28.000000000 -0400
34233@@ -40,7 +40,7 @@
34234 #include "scsi_logging.h"
34235
34236
34237-static atomic_t scsi_host_next_hn; /* host_no for next new host */
34238+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
34239
34240
34241 static void scsi_host_cls_release(struct device *dev)
34242@@ -344,7 +344,7 @@ struct Scsi_Host *scsi_host_alloc(struct
34243 * subtract one because we increment first then return, but we need to
34244 * know what the next host number was before increment
34245 */
34246- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
34247+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
34248 shost->dma_channel = 0xff;
34249
34250 /* These three are default values which can be overridden */
34251diff -urNp linux-2.6.32.42/drivers/scsi/ipr.c linux-2.6.32.42/drivers/scsi/ipr.c
34252--- linux-2.6.32.42/drivers/scsi/ipr.c 2011-03-27 14:31:47.000000000 -0400
34253+++ linux-2.6.32.42/drivers/scsi/ipr.c 2011-04-17 15:56:46.000000000 -0400
34254@@ -5286,7 +5286,7 @@ static bool ipr_qc_fill_rtf(struct ata_q
34255 return true;
34256 }
34257
34258-static struct ata_port_operations ipr_sata_ops = {
34259+static const struct ata_port_operations ipr_sata_ops = {
34260 .phy_reset = ipr_ata_phy_reset,
34261 .hardreset = ipr_sata_reset,
34262 .post_internal_cmd = ipr_ata_post_internal,
34263diff -urNp linux-2.6.32.42/drivers/scsi/libfc/fc_exch.c linux-2.6.32.42/drivers/scsi/libfc/fc_exch.c
34264--- linux-2.6.32.42/drivers/scsi/libfc/fc_exch.c 2011-03-27 14:31:47.000000000 -0400
34265+++ linux-2.6.32.42/drivers/scsi/libfc/fc_exch.c 2011-04-17 15:56:46.000000000 -0400
34266@@ -86,12 +86,12 @@ struct fc_exch_mgr {
34267 * all together if not used XXX
34268 */
34269 struct {
34270- atomic_t no_free_exch;
34271- atomic_t no_free_exch_xid;
34272- atomic_t xid_not_found;
34273- atomic_t xid_busy;
34274- atomic_t seq_not_found;
34275- atomic_t non_bls_resp;
34276+ atomic_unchecked_t no_free_exch;
34277+ atomic_unchecked_t no_free_exch_xid;
34278+ atomic_unchecked_t xid_not_found;
34279+ atomic_unchecked_t xid_busy;
34280+ atomic_unchecked_t seq_not_found;
34281+ atomic_unchecked_t non_bls_resp;
34282 } stats;
34283 };
34284 #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
34285@@ -510,7 +510,7 @@ static struct fc_exch *fc_exch_em_alloc(
34286 /* allocate memory for exchange */
34287 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
34288 if (!ep) {
34289- atomic_inc(&mp->stats.no_free_exch);
34290+ atomic_inc_unchecked(&mp->stats.no_free_exch);
34291 goto out;
34292 }
34293 memset(ep, 0, sizeof(*ep));
34294@@ -557,7 +557,7 @@ out:
34295 return ep;
34296 err:
34297 spin_unlock_bh(&pool->lock);
34298- atomic_inc(&mp->stats.no_free_exch_xid);
34299+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
34300 mempool_free(ep, mp->ep_pool);
34301 return NULL;
34302 }
34303@@ -690,7 +690,7 @@ static enum fc_pf_rjt_reason fc_seq_look
34304 xid = ntohs(fh->fh_ox_id); /* we originated exch */
34305 ep = fc_exch_find(mp, xid);
34306 if (!ep) {
34307- atomic_inc(&mp->stats.xid_not_found);
34308+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34309 reject = FC_RJT_OX_ID;
34310 goto out;
34311 }
34312@@ -720,7 +720,7 @@ static enum fc_pf_rjt_reason fc_seq_look
34313 ep = fc_exch_find(mp, xid);
34314 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
34315 if (ep) {
34316- atomic_inc(&mp->stats.xid_busy);
34317+ atomic_inc_unchecked(&mp->stats.xid_busy);
34318 reject = FC_RJT_RX_ID;
34319 goto rel;
34320 }
34321@@ -731,7 +731,7 @@ static enum fc_pf_rjt_reason fc_seq_look
34322 }
34323 xid = ep->xid; /* get our XID */
34324 } else if (!ep) {
34325- atomic_inc(&mp->stats.xid_not_found);
34326+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34327 reject = FC_RJT_RX_ID; /* XID not found */
34328 goto out;
34329 }
34330@@ -752,7 +752,7 @@ static enum fc_pf_rjt_reason fc_seq_look
34331 } else {
34332 sp = &ep->seq;
34333 if (sp->id != fh->fh_seq_id) {
34334- atomic_inc(&mp->stats.seq_not_found);
34335+ atomic_inc_unchecked(&mp->stats.seq_not_found);
34336 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
34337 goto rel;
34338 }
34339@@ -1163,22 +1163,22 @@ static void fc_exch_recv_seq_resp(struct
34340
34341 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
34342 if (!ep) {
34343- atomic_inc(&mp->stats.xid_not_found);
34344+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34345 goto out;
34346 }
34347 if (ep->esb_stat & ESB_ST_COMPLETE) {
34348- atomic_inc(&mp->stats.xid_not_found);
34349+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34350 goto out;
34351 }
34352 if (ep->rxid == FC_XID_UNKNOWN)
34353 ep->rxid = ntohs(fh->fh_rx_id);
34354 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
34355- atomic_inc(&mp->stats.xid_not_found);
34356+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34357 goto rel;
34358 }
34359 if (ep->did != ntoh24(fh->fh_s_id) &&
34360 ep->did != FC_FID_FLOGI) {
34361- atomic_inc(&mp->stats.xid_not_found);
34362+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34363 goto rel;
34364 }
34365 sof = fr_sof(fp);
34366@@ -1189,7 +1189,7 @@ static void fc_exch_recv_seq_resp(struct
34367 } else {
34368 sp = &ep->seq;
34369 if (sp->id != fh->fh_seq_id) {
34370- atomic_inc(&mp->stats.seq_not_found);
34371+ atomic_inc_unchecked(&mp->stats.seq_not_found);
34372 goto rel;
34373 }
34374 }
34375@@ -1249,9 +1249,9 @@ static void fc_exch_recv_resp(struct fc_
34376 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
34377
34378 if (!sp)
34379- atomic_inc(&mp->stats.xid_not_found);
34380+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34381 else
34382- atomic_inc(&mp->stats.non_bls_resp);
34383+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
34384
34385 fc_frame_free(fp);
34386 }
34387diff -urNp linux-2.6.32.42/drivers/scsi/libsas/sas_ata.c linux-2.6.32.42/drivers/scsi/libsas/sas_ata.c
34388--- linux-2.6.32.42/drivers/scsi/libsas/sas_ata.c 2011-03-27 14:31:47.000000000 -0400
34389+++ linux-2.6.32.42/drivers/scsi/libsas/sas_ata.c 2011-04-23 12:56:11.000000000 -0400
34390@@ -343,7 +343,7 @@ static int sas_ata_scr_read(struct ata_l
34391 }
34392 }
34393
34394-static struct ata_port_operations sas_sata_ops = {
34395+static const struct ata_port_operations sas_sata_ops = {
34396 .phy_reset = sas_ata_phy_reset,
34397 .post_internal_cmd = sas_ata_post_internal,
34398 .qc_defer = ata_std_qc_defer,
34399diff -urNp linux-2.6.32.42/drivers/scsi/lpfc/lpfc_debugfs.c linux-2.6.32.42/drivers/scsi/lpfc/lpfc_debugfs.c
34400--- linux-2.6.32.42/drivers/scsi/lpfc/lpfc_debugfs.c 2011-03-27 14:31:47.000000000 -0400
34401+++ linux-2.6.32.42/drivers/scsi/lpfc/lpfc_debugfs.c 2011-05-16 21:46:57.000000000 -0400
34402@@ -124,7 +124,7 @@ struct lpfc_debug {
34403 int len;
34404 };
34405
34406-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
34407+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
34408 static unsigned long lpfc_debugfs_start_time = 0L;
34409
34410 /**
34411@@ -158,7 +158,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
34412 lpfc_debugfs_enable = 0;
34413
34414 len = 0;
34415- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
34416+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
34417 (lpfc_debugfs_max_disc_trc - 1);
34418 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
34419 dtp = vport->disc_trc + i;
34420@@ -219,7 +219,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
34421 lpfc_debugfs_enable = 0;
34422
34423 len = 0;
34424- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
34425+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
34426 (lpfc_debugfs_max_slow_ring_trc - 1);
34427 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
34428 dtp = phba->slow_ring_trc + i;
34429@@ -397,6 +397,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpf
34430 uint32_t *ptr;
34431 char buffer[1024];
34432
34433+ pax_track_stack();
34434+
34435 off = 0;
34436 spin_lock_irq(&phba->hbalock);
34437
34438@@ -634,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
34439 !vport || !vport->disc_trc)
34440 return;
34441
34442- index = atomic_inc_return(&vport->disc_trc_cnt) &
34443+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
34444 (lpfc_debugfs_max_disc_trc - 1);
34445 dtp = vport->disc_trc + index;
34446 dtp->fmt = fmt;
34447 dtp->data1 = data1;
34448 dtp->data2 = data2;
34449 dtp->data3 = data3;
34450- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
34451+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
34452 dtp->jif = jiffies;
34453 #endif
34454 return;
34455@@ -672,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
34456 !phba || !phba->slow_ring_trc)
34457 return;
34458
34459- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
34460+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
34461 (lpfc_debugfs_max_slow_ring_trc - 1);
34462 dtp = phba->slow_ring_trc + index;
34463 dtp->fmt = fmt;
34464 dtp->data1 = data1;
34465 dtp->data2 = data2;
34466 dtp->data3 = data3;
34467- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
34468+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
34469 dtp->jif = jiffies;
34470 #endif
34471 return;
34472@@ -1364,7 +1366,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
34473 "slow_ring buffer\n");
34474 goto debug_failed;
34475 }
34476- atomic_set(&phba->slow_ring_trc_cnt, 0);
34477+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
34478 memset(phba->slow_ring_trc, 0,
34479 (sizeof(struct lpfc_debugfs_trc) *
34480 lpfc_debugfs_max_slow_ring_trc));
34481@@ -1410,7 +1412,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
34482 "buffer\n");
34483 goto debug_failed;
34484 }
34485- atomic_set(&vport->disc_trc_cnt, 0);
34486+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
34487
34488 snprintf(name, sizeof(name), "discovery_trace");
34489 vport->debug_disc_trc =
34490diff -urNp linux-2.6.32.42/drivers/scsi/lpfc/lpfc.h linux-2.6.32.42/drivers/scsi/lpfc/lpfc.h
34491--- linux-2.6.32.42/drivers/scsi/lpfc/lpfc.h 2011-03-27 14:31:47.000000000 -0400
34492+++ linux-2.6.32.42/drivers/scsi/lpfc/lpfc.h 2011-05-04 17:56:28.000000000 -0400
34493@@ -400,7 +400,7 @@ struct lpfc_vport {
34494 struct dentry *debug_nodelist;
34495 struct dentry *vport_debugfs_root;
34496 struct lpfc_debugfs_trc *disc_trc;
34497- atomic_t disc_trc_cnt;
34498+ atomic_unchecked_t disc_trc_cnt;
34499 #endif
34500 uint8_t stat_data_enabled;
34501 uint8_t stat_data_blocked;
34502@@ -725,8 +725,8 @@ struct lpfc_hba {
34503 struct timer_list fabric_block_timer;
34504 unsigned long bit_flags;
34505 #define FABRIC_COMANDS_BLOCKED 0
34506- atomic_t num_rsrc_err;
34507- atomic_t num_cmd_success;
34508+ atomic_unchecked_t num_rsrc_err;
34509+ atomic_unchecked_t num_cmd_success;
34510 unsigned long last_rsrc_error_time;
34511 unsigned long last_ramp_down_time;
34512 unsigned long last_ramp_up_time;
34513@@ -740,7 +740,7 @@ struct lpfc_hba {
34514 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
34515 struct dentry *debug_slow_ring_trc;
34516 struct lpfc_debugfs_trc *slow_ring_trc;
34517- atomic_t slow_ring_trc_cnt;
34518+ atomic_unchecked_t slow_ring_trc_cnt;
34519 #endif
34520
34521 /* Used for deferred freeing of ELS data buffers */
34522diff -urNp linux-2.6.32.42/drivers/scsi/lpfc/lpfc_scsi.c linux-2.6.32.42/drivers/scsi/lpfc/lpfc_scsi.c
34523--- linux-2.6.32.42/drivers/scsi/lpfc/lpfc_scsi.c 2011-03-27 14:31:47.000000000 -0400
34524+++ linux-2.6.32.42/drivers/scsi/lpfc/lpfc_scsi.c 2011-05-04 17:56:28.000000000 -0400
34525@@ -259,7 +259,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
34526 uint32_t evt_posted;
34527
34528 spin_lock_irqsave(&phba->hbalock, flags);
34529- atomic_inc(&phba->num_rsrc_err);
34530+ atomic_inc_unchecked(&phba->num_rsrc_err);
34531 phba->last_rsrc_error_time = jiffies;
34532
34533 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
34534@@ -300,7 +300,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
34535 unsigned long flags;
34536 struct lpfc_hba *phba = vport->phba;
34537 uint32_t evt_posted;
34538- atomic_inc(&phba->num_cmd_success);
34539+ atomic_inc_unchecked(&phba->num_cmd_success);
34540
34541 if (vport->cfg_lun_queue_depth <= queue_depth)
34542 return;
34543@@ -343,8 +343,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
34544 int i;
34545 struct lpfc_rport_data *rdata;
34546
34547- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
34548- num_cmd_success = atomic_read(&phba->num_cmd_success);
34549+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
34550+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
34551
34552 vports = lpfc_create_vport_work_array(phba);
34553 if (vports != NULL)
34554@@ -378,8 +378,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
34555 }
34556 }
34557 lpfc_destroy_vport_work_array(phba, vports);
34558- atomic_set(&phba->num_rsrc_err, 0);
34559- atomic_set(&phba->num_cmd_success, 0);
34560+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
34561+ atomic_set_unchecked(&phba->num_cmd_success, 0);
34562 }
34563
34564 /**
34565@@ -427,8 +427,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
34566 }
34567 }
34568 lpfc_destroy_vport_work_array(phba, vports);
34569- atomic_set(&phba->num_rsrc_err, 0);
34570- atomic_set(&phba->num_cmd_success, 0);
34571+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
34572+ atomic_set_unchecked(&phba->num_cmd_success, 0);
34573 }
34574
34575 /**
34576diff -urNp linux-2.6.32.42/drivers/scsi/megaraid/megaraid_mbox.c linux-2.6.32.42/drivers/scsi/megaraid/megaraid_mbox.c
34577--- linux-2.6.32.42/drivers/scsi/megaraid/megaraid_mbox.c 2011-03-27 14:31:47.000000000 -0400
34578+++ linux-2.6.32.42/drivers/scsi/megaraid/megaraid_mbox.c 2011-05-16 21:46:57.000000000 -0400
34579@@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter
34580 int rval;
34581 int i;
34582
34583+ pax_track_stack();
34584+
34585 // Allocate memory for the base list of scb for management module.
34586 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
34587
34588diff -urNp linux-2.6.32.42/drivers/scsi/osd/osd_initiator.c linux-2.6.32.42/drivers/scsi/osd/osd_initiator.c
34589--- linux-2.6.32.42/drivers/scsi/osd/osd_initiator.c 2011-03-27 14:31:47.000000000 -0400
34590+++ linux-2.6.32.42/drivers/scsi/osd/osd_initiator.c 2011-05-16 21:46:57.000000000 -0400
34591@@ -94,6 +94,8 @@ static int _osd_print_system_info(struct
34592 int nelem = ARRAY_SIZE(get_attrs), a = 0;
34593 int ret;
34594
34595+ pax_track_stack();
34596+
34597 or = osd_start_request(od, GFP_KERNEL);
34598 if (!or)
34599 return -ENOMEM;
34600diff -urNp linux-2.6.32.42/drivers/scsi/pmcraid.c linux-2.6.32.42/drivers/scsi/pmcraid.c
34601--- linux-2.6.32.42/drivers/scsi/pmcraid.c 2011-05-10 22:12:01.000000000 -0400
34602+++ linux-2.6.32.42/drivers/scsi/pmcraid.c 2011-05-10 22:12:33.000000000 -0400
34603@@ -189,8 +189,8 @@ static int pmcraid_slave_alloc(struct sc
34604 res->scsi_dev = scsi_dev;
34605 scsi_dev->hostdata = res;
34606 res->change_detected = 0;
34607- atomic_set(&res->read_failures, 0);
34608- atomic_set(&res->write_failures, 0);
34609+ atomic_set_unchecked(&res->read_failures, 0);
34610+ atomic_set_unchecked(&res->write_failures, 0);
34611 rc = 0;
34612 }
34613 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
34614@@ -2396,9 +2396,9 @@ static int pmcraid_error_handler(struct
34615
34616 /* If this was a SCSI read/write command keep count of errors */
34617 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
34618- atomic_inc(&res->read_failures);
34619+ atomic_inc_unchecked(&res->read_failures);
34620 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
34621- atomic_inc(&res->write_failures);
34622+ atomic_inc_unchecked(&res->write_failures);
34623
34624 if (!RES_IS_GSCSI(res->cfg_entry) &&
34625 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
34626@@ -4113,7 +4113,7 @@ static void pmcraid_worker_function(stru
34627
34628 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
34629 /* add resources only after host is added into system */
34630- if (!atomic_read(&pinstance->expose_resources))
34631+ if (!atomic_read_unchecked(&pinstance->expose_resources))
34632 return;
34633
34634 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
34635@@ -4847,7 +4847,7 @@ static int __devinit pmcraid_init_instan
34636 init_waitqueue_head(&pinstance->reset_wait_q);
34637
34638 atomic_set(&pinstance->outstanding_cmds, 0);
34639- atomic_set(&pinstance->expose_resources, 0);
34640+ atomic_set_unchecked(&pinstance->expose_resources, 0);
34641
34642 INIT_LIST_HEAD(&pinstance->free_res_q);
34643 INIT_LIST_HEAD(&pinstance->used_res_q);
34644@@ -5499,7 +5499,7 @@ static int __devinit pmcraid_probe(
34645 /* Schedule worker thread to handle CCN and take care of adding and
34646 * removing devices to OS
34647 */
34648- atomic_set(&pinstance->expose_resources, 1);
34649+ atomic_set_unchecked(&pinstance->expose_resources, 1);
34650 schedule_work(&pinstance->worker_q);
34651 return rc;
34652
34653diff -urNp linux-2.6.32.42/drivers/scsi/pmcraid.h linux-2.6.32.42/drivers/scsi/pmcraid.h
34654--- linux-2.6.32.42/drivers/scsi/pmcraid.h 2011-03-27 14:31:47.000000000 -0400
34655+++ linux-2.6.32.42/drivers/scsi/pmcraid.h 2011-05-04 17:56:28.000000000 -0400
34656@@ -690,7 +690,7 @@ struct pmcraid_instance {
34657 atomic_t outstanding_cmds;
34658
34659 /* should add/delete resources to mid-layer now ?*/
34660- atomic_t expose_resources;
34661+ atomic_unchecked_t expose_resources;
34662
34663 /* Tasklet to handle deferred processing */
34664 struct tasklet_struct isr_tasklet[PMCRAID_NUM_MSIX_VECTORS];
34665@@ -727,8 +727,8 @@ struct pmcraid_resource_entry {
34666 struct list_head queue; /* link to "to be exposed" resources */
34667 struct pmcraid_config_table_entry cfg_entry;
34668 struct scsi_device *scsi_dev; /* Link scsi_device structure */
34669- atomic_t read_failures; /* count of failed READ commands */
34670- atomic_t write_failures; /* count of failed WRITE commands */
34671+ atomic_unchecked_t read_failures; /* count of failed READ commands */
34672+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
34673
34674 /* To indicate add/delete/modify during CCN */
34675 u8 change_detected;
34676diff -urNp linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_def.h linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_def.h
34677--- linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_def.h 2011-03-27 14:31:47.000000000 -0400
34678+++ linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_def.h 2011-05-04 17:56:28.000000000 -0400
34679@@ -240,7 +240,7 @@ struct ddb_entry {
34680 atomic_t retry_relogin_timer; /* Min Time between relogins
34681 * (4000 only) */
34682 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
34683- atomic_t relogin_retry_count; /* Num of times relogin has been
34684+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
34685 * retried */
34686
34687 uint16_t port;
34688diff -urNp linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_init.c linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_init.c
34689--- linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_init.c 2011-03-27 14:31:47.000000000 -0400
34690+++ linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_init.c 2011-05-04 17:56:28.000000000 -0400
34691@@ -482,7 +482,7 @@ static struct ddb_entry * qla4xxx_alloc_
34692 atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count);
34693 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
34694 atomic_set(&ddb_entry->relogin_timer, 0);
34695- atomic_set(&ddb_entry->relogin_retry_count, 0);
34696+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
34697 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
34698 list_add_tail(&ddb_entry->list, &ha->ddb_list);
34699 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
34700@@ -1308,7 +1308,7 @@ int qla4xxx_process_ddb_changed(struct s
34701 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
34702 atomic_set(&ddb_entry->port_down_timer,
34703 ha->port_down_retry_count);
34704- atomic_set(&ddb_entry->relogin_retry_count, 0);
34705+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
34706 atomic_set(&ddb_entry->relogin_timer, 0);
34707 clear_bit(DF_RELOGIN, &ddb_entry->flags);
34708 clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
34709diff -urNp linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_os.c linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_os.c
34710--- linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_os.c 2011-03-27 14:31:47.000000000 -0400
34711+++ linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_os.c 2011-05-04 17:56:28.000000000 -0400
34712@@ -641,13 +641,13 @@ static void qla4xxx_timer(struct scsi_ql
34713 ddb_entry->fw_ddb_device_state ==
34714 DDB_DS_SESSION_FAILED) {
34715 /* Reset retry relogin timer */
34716- atomic_inc(&ddb_entry->relogin_retry_count);
34717+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
34718 DEBUG2(printk("scsi%ld: index[%d] relogin"
34719 " timed out-retrying"
34720 " relogin (%d)\n",
34721 ha->host_no,
34722 ddb_entry->fw_ddb_index,
34723- atomic_read(&ddb_entry->
34724+ atomic_read_unchecked(&ddb_entry->
34725 relogin_retry_count))
34726 );
34727 start_dpc++;
34728diff -urNp linux-2.6.32.42/drivers/scsi/scsi.c linux-2.6.32.42/drivers/scsi/scsi.c
34729--- linux-2.6.32.42/drivers/scsi/scsi.c 2011-03-27 14:31:47.000000000 -0400
34730+++ linux-2.6.32.42/drivers/scsi/scsi.c 2011-05-04 17:56:28.000000000 -0400
34731@@ -652,7 +652,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
34732 unsigned long timeout;
34733 int rtn = 0;
34734
34735- atomic_inc(&cmd->device->iorequest_cnt);
34736+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
34737
34738 /* check if the device is still usable */
34739 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
34740diff -urNp linux-2.6.32.42/drivers/scsi/scsi_debug.c linux-2.6.32.42/drivers/scsi/scsi_debug.c
34741--- linux-2.6.32.42/drivers/scsi/scsi_debug.c 2011-03-27 14:31:47.000000000 -0400
34742+++ linux-2.6.32.42/drivers/scsi/scsi_debug.c 2011-05-16 21:46:57.000000000 -0400
34743@@ -1395,6 +1395,8 @@ static int resp_mode_select(struct scsi_
34744 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
34745 unsigned char *cmd = (unsigned char *)scp->cmnd;
34746
34747+ pax_track_stack();
34748+
34749 if ((errsts = check_readiness(scp, 1, devip)))
34750 return errsts;
34751 memset(arr, 0, sizeof(arr));
34752@@ -1492,6 +1494,8 @@ static int resp_log_sense(struct scsi_cm
34753 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
34754 unsigned char *cmd = (unsigned char *)scp->cmnd;
34755
34756+ pax_track_stack();
34757+
34758 if ((errsts = check_readiness(scp, 1, devip)))
34759 return errsts;
34760 memset(arr, 0, sizeof(arr));
34761diff -urNp linux-2.6.32.42/drivers/scsi/scsi_lib.c linux-2.6.32.42/drivers/scsi/scsi_lib.c
34762--- linux-2.6.32.42/drivers/scsi/scsi_lib.c 2011-05-10 22:12:01.000000000 -0400
34763+++ linux-2.6.32.42/drivers/scsi/scsi_lib.c 2011-05-10 22:12:33.000000000 -0400
34764@@ -1384,7 +1384,7 @@ static void scsi_kill_request(struct req
34765
34766 scsi_init_cmd_errh(cmd);
34767 cmd->result = DID_NO_CONNECT << 16;
34768- atomic_inc(&cmd->device->iorequest_cnt);
34769+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
34770
34771 /*
34772 * SCSI request completion path will do scsi_device_unbusy(),
34773@@ -1415,9 +1415,9 @@ static void scsi_softirq_done(struct req
34774 */
34775 cmd->serial_number = 0;
34776
34777- atomic_inc(&cmd->device->iodone_cnt);
34778+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
34779 if (cmd->result)
34780- atomic_inc(&cmd->device->ioerr_cnt);
34781+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
34782
34783 disposition = scsi_decide_disposition(cmd);
34784 if (disposition != SUCCESS &&
34785diff -urNp linux-2.6.32.42/drivers/scsi/scsi_sysfs.c linux-2.6.32.42/drivers/scsi/scsi_sysfs.c
34786--- linux-2.6.32.42/drivers/scsi/scsi_sysfs.c 2011-06-25 12:55:34.000000000 -0400
34787+++ linux-2.6.32.42/drivers/scsi/scsi_sysfs.c 2011-06-25 12:56:37.000000000 -0400
34788@@ -662,7 +662,7 @@ show_iostat_##field(struct device *dev,
34789 char *buf) \
34790 { \
34791 struct scsi_device *sdev = to_scsi_device(dev); \
34792- unsigned long long count = atomic_read(&sdev->field); \
34793+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
34794 return snprintf(buf, 20, "0x%llx\n", count); \
34795 } \
34796 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
34797diff -urNp linux-2.6.32.42/drivers/scsi/scsi_transport_fc.c linux-2.6.32.42/drivers/scsi/scsi_transport_fc.c
34798--- linux-2.6.32.42/drivers/scsi/scsi_transport_fc.c 2011-03-27 14:31:47.000000000 -0400
34799+++ linux-2.6.32.42/drivers/scsi/scsi_transport_fc.c 2011-05-04 17:56:28.000000000 -0400
34800@@ -480,7 +480,7 @@ MODULE_PARM_DESC(dev_loss_tmo,
34801 * Netlink Infrastructure
34802 */
34803
34804-static atomic_t fc_event_seq;
34805+static atomic_unchecked_t fc_event_seq;
34806
34807 /**
34808 * fc_get_event_number - Obtain the next sequential FC event number
34809@@ -493,7 +493,7 @@ static atomic_t fc_event_seq;
34810 u32
34811 fc_get_event_number(void)
34812 {
34813- return atomic_add_return(1, &fc_event_seq);
34814+ return atomic_add_return_unchecked(1, &fc_event_seq);
34815 }
34816 EXPORT_SYMBOL(fc_get_event_number);
34817
34818@@ -641,7 +641,7 @@ static __init int fc_transport_init(void
34819 {
34820 int error;
34821
34822- atomic_set(&fc_event_seq, 0);
34823+ atomic_set_unchecked(&fc_event_seq, 0);
34824
34825 error = transport_class_register(&fc_host_class);
34826 if (error)
34827diff -urNp linux-2.6.32.42/drivers/scsi/scsi_transport_iscsi.c linux-2.6.32.42/drivers/scsi/scsi_transport_iscsi.c
34828--- linux-2.6.32.42/drivers/scsi/scsi_transport_iscsi.c 2011-03-27 14:31:47.000000000 -0400
34829+++ linux-2.6.32.42/drivers/scsi/scsi_transport_iscsi.c 2011-05-04 17:56:28.000000000 -0400
34830@@ -81,7 +81,7 @@ struct iscsi_internal {
34831 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
34832 };
34833
34834-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
34835+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
34836 static struct workqueue_struct *iscsi_eh_timer_workq;
34837
34838 /*
34839@@ -728,7 +728,7 @@ int iscsi_add_session(struct iscsi_cls_s
34840 int err;
34841
34842 ihost = shost->shost_data;
34843- session->sid = atomic_add_return(1, &iscsi_session_nr);
34844+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
34845
34846 if (id == ISCSI_MAX_TARGET) {
34847 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
34848@@ -2060,7 +2060,7 @@ static __init int iscsi_transport_init(v
34849 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
34850 ISCSI_TRANSPORT_VERSION);
34851
34852- atomic_set(&iscsi_session_nr, 0);
34853+ atomic_set_unchecked(&iscsi_session_nr, 0);
34854
34855 err = class_register(&iscsi_transport_class);
34856 if (err)
34857diff -urNp linux-2.6.32.42/drivers/scsi/scsi_transport_srp.c linux-2.6.32.42/drivers/scsi/scsi_transport_srp.c
34858--- linux-2.6.32.42/drivers/scsi/scsi_transport_srp.c 2011-03-27 14:31:47.000000000 -0400
34859+++ linux-2.6.32.42/drivers/scsi/scsi_transport_srp.c 2011-05-04 17:56:28.000000000 -0400
34860@@ -33,7 +33,7 @@
34861 #include "scsi_transport_srp_internal.h"
34862
34863 struct srp_host_attrs {
34864- atomic_t next_port_id;
34865+ atomic_unchecked_t next_port_id;
34866 };
34867 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
34868
34869@@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
34870 struct Scsi_Host *shost = dev_to_shost(dev);
34871 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
34872
34873- atomic_set(&srp_host->next_port_id, 0);
34874+ atomic_set_unchecked(&srp_host->next_port_id, 0);
34875 return 0;
34876 }
34877
34878@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
34879 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
34880 rport->roles = ids->roles;
34881
34882- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
34883+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
34884 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
34885
34886 transport_setup_device(&rport->dev);
34887diff -urNp linux-2.6.32.42/drivers/scsi/sg.c linux-2.6.32.42/drivers/scsi/sg.c
34888--- linux-2.6.32.42/drivers/scsi/sg.c 2011-03-27 14:31:47.000000000 -0400
34889+++ linux-2.6.32.42/drivers/scsi/sg.c 2011-04-17 15:56:46.000000000 -0400
34890@@ -2292,7 +2292,7 @@ struct sg_proc_leaf {
34891 const struct file_operations * fops;
34892 };
34893
34894-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
34895+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
34896 {"allow_dio", &adio_fops},
34897 {"debug", &debug_fops},
34898 {"def_reserved_size", &dressz_fops},
34899@@ -2307,7 +2307,7 @@ sg_proc_init(void)
34900 {
34901 int k, mask;
34902 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
34903- struct sg_proc_leaf * leaf;
34904+ const struct sg_proc_leaf * leaf;
34905
34906 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
34907 if (!sg_proc_sgp)
34908diff -urNp linux-2.6.32.42/drivers/scsi/sym53c8xx_2/sym_glue.c linux-2.6.32.42/drivers/scsi/sym53c8xx_2/sym_glue.c
34909--- linux-2.6.32.42/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-03-27 14:31:47.000000000 -0400
34910+++ linux-2.6.32.42/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-05-16 21:46:57.000000000 -0400
34911@@ -1754,6 +1754,8 @@ static int __devinit sym2_probe(struct p
34912 int do_iounmap = 0;
34913 int do_disable_device = 1;
34914
34915+ pax_track_stack();
34916+
34917 memset(&sym_dev, 0, sizeof(sym_dev));
34918 memset(&nvram, 0, sizeof(nvram));
34919 sym_dev.pdev = pdev;
34920diff -urNp linux-2.6.32.42/drivers/serial/kgdboc.c linux-2.6.32.42/drivers/serial/kgdboc.c
34921--- linux-2.6.32.42/drivers/serial/kgdboc.c 2011-03-27 14:31:47.000000000 -0400
34922+++ linux-2.6.32.42/drivers/serial/kgdboc.c 2011-04-17 15:56:46.000000000 -0400
34923@@ -18,7 +18,7 @@
34924
34925 #define MAX_CONFIG_LEN 40
34926
34927-static struct kgdb_io kgdboc_io_ops;
34928+static const struct kgdb_io kgdboc_io_ops;
34929
34930 /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
34931 static int configured = -1;
34932@@ -154,7 +154,7 @@ static void kgdboc_post_exp_handler(void
34933 module_put(THIS_MODULE);
34934 }
34935
34936-static struct kgdb_io kgdboc_io_ops = {
34937+static const struct kgdb_io kgdboc_io_ops = {
34938 .name = "kgdboc",
34939 .read_char = kgdboc_get_char,
34940 .write_char = kgdboc_put_char,
34941diff -urNp linux-2.6.32.42/drivers/spi/spi.c linux-2.6.32.42/drivers/spi/spi.c
34942--- linux-2.6.32.42/drivers/spi/spi.c 2011-03-27 14:31:47.000000000 -0400
34943+++ linux-2.6.32.42/drivers/spi/spi.c 2011-05-04 17:56:28.000000000 -0400
34944@@ -774,7 +774,7 @@ int spi_sync(struct spi_device *spi, str
34945 EXPORT_SYMBOL_GPL(spi_sync);
34946
34947 /* portable code must never pass more than 32 bytes */
34948-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
34949+#define SPI_BUFSIZ max(32U,SMP_CACHE_BYTES)
34950
34951 static u8 *buf;
34952
34953diff -urNp linux-2.6.32.42/drivers/staging/android/binder.c linux-2.6.32.42/drivers/staging/android/binder.c
34954--- linux-2.6.32.42/drivers/staging/android/binder.c 2011-03-27 14:31:47.000000000 -0400
34955+++ linux-2.6.32.42/drivers/staging/android/binder.c 2011-04-17 15:56:46.000000000 -0400
34956@@ -2756,7 +2756,7 @@ static void binder_vma_close(struct vm_a
34957 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
34958 }
34959
34960-static struct vm_operations_struct binder_vm_ops = {
34961+static const struct vm_operations_struct binder_vm_ops = {
34962 .open = binder_vma_open,
34963 .close = binder_vma_close,
34964 };
34965diff -urNp linux-2.6.32.42/drivers/staging/b3dfg/b3dfg.c linux-2.6.32.42/drivers/staging/b3dfg/b3dfg.c
34966--- linux-2.6.32.42/drivers/staging/b3dfg/b3dfg.c 2011-03-27 14:31:47.000000000 -0400
34967+++ linux-2.6.32.42/drivers/staging/b3dfg/b3dfg.c 2011-04-17 15:56:46.000000000 -0400
34968@@ -455,7 +455,7 @@ static int b3dfg_vma_fault(struct vm_are
34969 return VM_FAULT_NOPAGE;
34970 }
34971
34972-static struct vm_operations_struct b3dfg_vm_ops = {
34973+static const struct vm_operations_struct b3dfg_vm_ops = {
34974 .fault = b3dfg_vma_fault,
34975 };
34976
34977@@ -848,7 +848,7 @@ static int b3dfg_mmap(struct file *filp,
34978 return r;
34979 }
34980
34981-static struct file_operations b3dfg_fops = {
34982+static const struct file_operations b3dfg_fops = {
34983 .owner = THIS_MODULE,
34984 .open = b3dfg_open,
34985 .release = b3dfg_release,
34986diff -urNp linux-2.6.32.42/drivers/staging/comedi/comedi_fops.c linux-2.6.32.42/drivers/staging/comedi/comedi_fops.c
34987--- linux-2.6.32.42/drivers/staging/comedi/comedi_fops.c 2011-03-27 14:31:47.000000000 -0400
34988+++ linux-2.6.32.42/drivers/staging/comedi/comedi_fops.c 2011-04-17 15:56:46.000000000 -0400
34989@@ -1389,7 +1389,7 @@ void comedi_unmap(struct vm_area_struct
34990 mutex_unlock(&dev->mutex);
34991 }
34992
34993-static struct vm_operations_struct comedi_vm_ops = {
34994+static const struct vm_operations_struct comedi_vm_ops = {
34995 .close = comedi_unmap,
34996 };
34997
34998diff -urNp linux-2.6.32.42/drivers/staging/dream/qdsp5/adsp_driver.c linux-2.6.32.42/drivers/staging/dream/qdsp5/adsp_driver.c
34999--- linux-2.6.32.42/drivers/staging/dream/qdsp5/adsp_driver.c 2011-03-27 14:31:47.000000000 -0400
35000+++ linux-2.6.32.42/drivers/staging/dream/qdsp5/adsp_driver.c 2011-04-17 15:56:46.000000000 -0400
35001@@ -576,7 +576,7 @@ static struct adsp_device *inode_to_devi
35002 static dev_t adsp_devno;
35003 static struct class *adsp_class;
35004
35005-static struct file_operations adsp_fops = {
35006+static const struct file_operations adsp_fops = {
35007 .owner = THIS_MODULE,
35008 .open = adsp_open,
35009 .unlocked_ioctl = adsp_ioctl,
35010diff -urNp linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_aac.c linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_aac.c
35011--- linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_aac.c 2011-03-27 14:31:47.000000000 -0400
35012+++ linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_aac.c 2011-04-17 15:56:46.000000000 -0400
35013@@ -1022,7 +1022,7 @@ done:
35014 return rc;
35015 }
35016
35017-static struct file_operations audio_aac_fops = {
35018+static const struct file_operations audio_aac_fops = {
35019 .owner = THIS_MODULE,
35020 .open = audio_open,
35021 .release = audio_release,
35022diff -urNp linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_amrnb.c linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_amrnb.c
35023--- linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_amrnb.c 2011-03-27 14:31:47.000000000 -0400
35024+++ linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_amrnb.c 2011-04-17 15:56:46.000000000 -0400
35025@@ -833,7 +833,7 @@ done:
35026 return rc;
35027 }
35028
35029-static struct file_operations audio_amrnb_fops = {
35030+static const struct file_operations audio_amrnb_fops = {
35031 .owner = THIS_MODULE,
35032 .open = audamrnb_open,
35033 .release = audamrnb_release,
35034diff -urNp linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_evrc.c linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_evrc.c
35035--- linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_evrc.c 2011-03-27 14:31:47.000000000 -0400
35036+++ linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_evrc.c 2011-04-17 15:56:46.000000000 -0400
35037@@ -805,7 +805,7 @@ dma_fail:
35038 return rc;
35039 }
35040
35041-static struct file_operations audio_evrc_fops = {
35042+static const struct file_operations audio_evrc_fops = {
35043 .owner = THIS_MODULE,
35044 .open = audevrc_open,
35045 .release = audevrc_release,
35046diff -urNp linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_in.c linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_in.c
35047--- linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_in.c 2011-03-27 14:31:47.000000000 -0400
35048+++ linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_in.c 2011-04-17 15:56:46.000000000 -0400
35049@@ -913,7 +913,7 @@ static int audpre_open(struct inode *ino
35050 return 0;
35051 }
35052
35053-static struct file_operations audio_fops = {
35054+static const struct file_operations audio_fops = {
35055 .owner = THIS_MODULE,
35056 .open = audio_in_open,
35057 .release = audio_in_release,
35058@@ -922,7 +922,7 @@ static struct file_operations audio_fops
35059 .unlocked_ioctl = audio_in_ioctl,
35060 };
35061
35062-static struct file_operations audpre_fops = {
35063+static const struct file_operations audpre_fops = {
35064 .owner = THIS_MODULE,
35065 .open = audpre_open,
35066 .unlocked_ioctl = audpre_ioctl,
35067diff -urNp linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_mp3.c linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_mp3.c
35068--- linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_mp3.c 2011-03-27 14:31:47.000000000 -0400
35069+++ linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_mp3.c 2011-04-17 15:56:46.000000000 -0400
35070@@ -941,7 +941,7 @@ done:
35071 return rc;
35072 }
35073
35074-static struct file_operations audio_mp3_fops = {
35075+static const struct file_operations audio_mp3_fops = {
35076 .owner = THIS_MODULE,
35077 .open = audio_open,
35078 .release = audio_release,
35079diff -urNp linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_out.c linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_out.c
35080--- linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_out.c 2011-03-27 14:31:47.000000000 -0400
35081+++ linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_out.c 2011-04-17 15:56:46.000000000 -0400
35082@@ -810,7 +810,7 @@ static int audpp_open(struct inode *inod
35083 return 0;
35084 }
35085
35086-static struct file_operations audio_fops = {
35087+static const struct file_operations audio_fops = {
35088 .owner = THIS_MODULE,
35089 .open = audio_open,
35090 .release = audio_release,
35091@@ -819,7 +819,7 @@ static struct file_operations audio_fops
35092 .unlocked_ioctl = audio_ioctl,
35093 };
35094
35095-static struct file_operations audpp_fops = {
35096+static const struct file_operations audpp_fops = {
35097 .owner = THIS_MODULE,
35098 .open = audpp_open,
35099 .unlocked_ioctl = audpp_ioctl,
35100diff -urNp linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_qcelp.c linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_qcelp.c
35101--- linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_qcelp.c 2011-03-27 14:31:47.000000000 -0400
35102+++ linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_qcelp.c 2011-04-17 15:56:46.000000000 -0400
35103@@ -816,7 +816,7 @@ err:
35104 return rc;
35105 }
35106
35107-static struct file_operations audio_qcelp_fops = {
35108+static const struct file_operations audio_qcelp_fops = {
35109 .owner = THIS_MODULE,
35110 .open = audqcelp_open,
35111 .release = audqcelp_release,
35112diff -urNp linux-2.6.32.42/drivers/staging/dream/qdsp5/snd.c linux-2.6.32.42/drivers/staging/dream/qdsp5/snd.c
35113--- linux-2.6.32.42/drivers/staging/dream/qdsp5/snd.c 2011-03-27 14:31:47.000000000 -0400
35114+++ linux-2.6.32.42/drivers/staging/dream/qdsp5/snd.c 2011-04-17 15:56:46.000000000 -0400
35115@@ -242,7 +242,7 @@ err:
35116 return rc;
35117 }
35118
35119-static struct file_operations snd_fops = {
35120+static const struct file_operations snd_fops = {
35121 .owner = THIS_MODULE,
35122 .open = snd_open,
35123 .release = snd_release,
35124diff -urNp linux-2.6.32.42/drivers/staging/dream/smd/smd_qmi.c linux-2.6.32.42/drivers/staging/dream/smd/smd_qmi.c
35125--- linux-2.6.32.42/drivers/staging/dream/smd/smd_qmi.c 2011-03-27 14:31:47.000000000 -0400
35126+++ linux-2.6.32.42/drivers/staging/dream/smd/smd_qmi.c 2011-04-17 15:56:46.000000000 -0400
35127@@ -793,7 +793,7 @@ static int qmi_release(struct inode *ip,
35128 return 0;
35129 }
35130
35131-static struct file_operations qmi_fops = {
35132+static const struct file_operations qmi_fops = {
35133 .owner = THIS_MODULE,
35134 .read = qmi_read,
35135 .write = qmi_write,
35136diff -urNp linux-2.6.32.42/drivers/staging/dream/smd/smd_rpcrouter_device.c linux-2.6.32.42/drivers/staging/dream/smd/smd_rpcrouter_device.c
35137--- linux-2.6.32.42/drivers/staging/dream/smd/smd_rpcrouter_device.c 2011-03-27 14:31:47.000000000 -0400
35138+++ linux-2.6.32.42/drivers/staging/dream/smd/smd_rpcrouter_device.c 2011-04-17 15:56:46.000000000 -0400
35139@@ -214,7 +214,7 @@ static long rpcrouter_ioctl(struct file
35140 return rc;
35141 }
35142
35143-static struct file_operations rpcrouter_server_fops = {
35144+static const struct file_operations rpcrouter_server_fops = {
35145 .owner = THIS_MODULE,
35146 .open = rpcrouter_open,
35147 .release = rpcrouter_release,
35148@@ -224,7 +224,7 @@ static struct file_operations rpcrouter_
35149 .unlocked_ioctl = rpcrouter_ioctl,
35150 };
35151
35152-static struct file_operations rpcrouter_router_fops = {
35153+static const struct file_operations rpcrouter_router_fops = {
35154 .owner = THIS_MODULE,
35155 .open = rpcrouter_open,
35156 .release = rpcrouter_release,
35157diff -urNp linux-2.6.32.42/drivers/staging/dst/dcore.c linux-2.6.32.42/drivers/staging/dst/dcore.c
35158--- linux-2.6.32.42/drivers/staging/dst/dcore.c 2011-03-27 14:31:47.000000000 -0400
35159+++ linux-2.6.32.42/drivers/staging/dst/dcore.c 2011-04-17 15:56:46.000000000 -0400
35160@@ -149,7 +149,7 @@ static int dst_bdev_release(struct gendi
35161 return 0;
35162 }
35163
35164-static struct block_device_operations dst_blk_ops = {
35165+static const struct block_device_operations dst_blk_ops = {
35166 .open = dst_bdev_open,
35167 .release = dst_bdev_release,
35168 .owner = THIS_MODULE,
35169@@ -588,7 +588,7 @@ static struct dst_node *dst_alloc_node(s
35170 n->size = ctl->size;
35171
35172 atomic_set(&n->refcnt, 1);
35173- atomic_long_set(&n->gen, 0);
35174+ atomic_long_set_unchecked(&n->gen, 0);
35175 snprintf(n->name, sizeof(n->name), "%s", ctl->name);
35176
35177 err = dst_node_sysfs_init(n);
35178diff -urNp linux-2.6.32.42/drivers/staging/dst/trans.c linux-2.6.32.42/drivers/staging/dst/trans.c
35179--- linux-2.6.32.42/drivers/staging/dst/trans.c 2011-03-27 14:31:47.000000000 -0400
35180+++ linux-2.6.32.42/drivers/staging/dst/trans.c 2011-04-17 15:56:46.000000000 -0400
35181@@ -169,7 +169,7 @@ int dst_process_bio(struct dst_node *n,
35182 t->error = 0;
35183 t->retries = 0;
35184 atomic_set(&t->refcnt, 1);
35185- t->gen = atomic_long_inc_return(&n->gen);
35186+ t->gen = atomic_long_inc_return_unchecked(&n->gen);
35187
35188 t->enc = bio_data_dir(bio);
35189 dst_bio_to_cmd(bio, &t->cmd, DST_IO, t->gen);
35190diff -urNp linux-2.6.32.42/drivers/staging/et131x/et1310_tx.c linux-2.6.32.42/drivers/staging/et131x/et1310_tx.c
35191--- linux-2.6.32.42/drivers/staging/et131x/et1310_tx.c 2011-03-27 14:31:47.000000000 -0400
35192+++ linux-2.6.32.42/drivers/staging/et131x/et1310_tx.c 2011-05-04 17:56:28.000000000 -0400
35193@@ -710,11 +710,11 @@ inline void et131x_free_send_packet(stru
35194 struct net_device_stats *stats = &etdev->net_stats;
35195
35196 if (pMpTcb->Flags & fMP_DEST_BROAD)
35197- atomic_inc(&etdev->Stats.brdcstxmt);
35198+ atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
35199 else if (pMpTcb->Flags & fMP_DEST_MULTI)
35200- atomic_inc(&etdev->Stats.multixmt);
35201+ atomic_inc_unchecked(&etdev->Stats.multixmt);
35202 else
35203- atomic_inc(&etdev->Stats.unixmt);
35204+ atomic_inc_unchecked(&etdev->Stats.unixmt);
35205
35206 if (pMpTcb->Packet) {
35207 stats->tx_bytes += pMpTcb->Packet->len;
35208diff -urNp linux-2.6.32.42/drivers/staging/et131x/et131x_adapter.h linux-2.6.32.42/drivers/staging/et131x/et131x_adapter.h
35209--- linux-2.6.32.42/drivers/staging/et131x/et131x_adapter.h 2011-03-27 14:31:47.000000000 -0400
35210+++ linux-2.6.32.42/drivers/staging/et131x/et131x_adapter.h 2011-05-04 17:56:28.000000000 -0400
35211@@ -145,11 +145,11 @@ typedef struct _ce_stats_t {
35212 * operations
35213 */
35214 u32 unircv; /* # multicast packets received */
35215- atomic_t unixmt; /* # multicast packets for Tx */
35216+ atomic_unchecked_t unixmt; /* # multicast packets for Tx */
35217 u32 multircv; /* # multicast packets received */
35218- atomic_t multixmt; /* # multicast packets for Tx */
35219+ atomic_unchecked_t multixmt; /* # multicast packets for Tx */
35220 u32 brdcstrcv; /* # broadcast packets received */
35221- atomic_t brdcstxmt; /* # broadcast packets for Tx */
35222+ atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
35223 u32 norcvbuf; /* # Rx packets discarded */
35224 u32 noxmtbuf; /* # Tx packets discarded */
35225
35226diff -urNp linux-2.6.32.42/drivers/staging/go7007/go7007-v4l2.c linux-2.6.32.42/drivers/staging/go7007/go7007-v4l2.c
35227--- linux-2.6.32.42/drivers/staging/go7007/go7007-v4l2.c 2011-03-27 14:31:47.000000000 -0400
35228+++ linux-2.6.32.42/drivers/staging/go7007/go7007-v4l2.c 2011-04-17 15:56:46.000000000 -0400
35229@@ -1700,7 +1700,7 @@ static int go7007_vm_fault(struct vm_are
35230 return 0;
35231 }
35232
35233-static struct vm_operations_struct go7007_vm_ops = {
35234+static const struct vm_operations_struct go7007_vm_ops = {
35235 .open = go7007_vm_open,
35236 .close = go7007_vm_close,
35237 .fault = go7007_vm_fault,
35238diff -urNp linux-2.6.32.42/drivers/staging/hv/blkvsc_drv.c linux-2.6.32.42/drivers/staging/hv/blkvsc_drv.c
35239--- linux-2.6.32.42/drivers/staging/hv/blkvsc_drv.c 2011-03-27 14:31:47.000000000 -0400
35240+++ linux-2.6.32.42/drivers/staging/hv/blkvsc_drv.c 2011-04-17 15:56:46.000000000 -0400
35241@@ -153,7 +153,7 @@ static int blkvsc_ringbuffer_size = BLKV
35242 /* The one and only one */
35243 static struct blkvsc_driver_context g_blkvsc_drv;
35244
35245-static struct block_device_operations block_ops = {
35246+static const struct block_device_operations block_ops = {
35247 .owner = THIS_MODULE,
35248 .open = blkvsc_open,
35249 .release = blkvsc_release,
35250diff -urNp linux-2.6.32.42/drivers/staging/hv/Channel.c linux-2.6.32.42/drivers/staging/hv/Channel.c
35251--- linux-2.6.32.42/drivers/staging/hv/Channel.c 2011-04-17 17:00:52.000000000 -0400
35252+++ linux-2.6.32.42/drivers/staging/hv/Channel.c 2011-05-04 17:56:28.000000000 -0400
35253@@ -464,8 +464,8 @@ int VmbusChannelEstablishGpadl(struct vm
35254
35255 DPRINT_ENTER(VMBUS);
35256
35257- nextGpadlHandle = atomic_read(&gVmbusConnection.NextGpadlHandle);
35258- atomic_inc(&gVmbusConnection.NextGpadlHandle);
35259+ nextGpadlHandle = atomic_read_unchecked(&gVmbusConnection.NextGpadlHandle);
35260+ atomic_inc_unchecked(&gVmbusConnection.NextGpadlHandle);
35261
35262 VmbusChannelCreateGpadlHeader(Kbuffer, Size, &msgInfo, &msgCount);
35263 ASSERT(msgInfo != NULL);
35264diff -urNp linux-2.6.32.42/drivers/staging/hv/Hv.c linux-2.6.32.42/drivers/staging/hv/Hv.c
35265--- linux-2.6.32.42/drivers/staging/hv/Hv.c 2011-03-27 14:31:47.000000000 -0400
35266+++ linux-2.6.32.42/drivers/staging/hv/Hv.c 2011-04-17 15:56:46.000000000 -0400
35267@@ -161,7 +161,7 @@ static u64 HvDoHypercall(u64 Control, vo
35268 u64 outputAddress = (Output) ? virt_to_phys(Output) : 0;
35269 u32 outputAddressHi = outputAddress >> 32;
35270 u32 outputAddressLo = outputAddress & 0xFFFFFFFF;
35271- volatile void *hypercallPage = gHvContext.HypercallPage;
35272+ volatile void *hypercallPage = ktva_ktla(gHvContext.HypercallPage);
35273
35274 DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
35275 Control, Input, Output);
35276diff -urNp linux-2.6.32.42/drivers/staging/hv/vmbus_drv.c linux-2.6.32.42/drivers/staging/hv/vmbus_drv.c
35277--- linux-2.6.32.42/drivers/staging/hv/vmbus_drv.c 2011-03-27 14:31:47.000000000 -0400
35278+++ linux-2.6.32.42/drivers/staging/hv/vmbus_drv.c 2011-05-04 17:56:28.000000000 -0400
35279@@ -532,7 +532,7 @@ static int vmbus_child_device_register(s
35280 to_device_context(root_device_obj);
35281 struct device_context *child_device_ctx =
35282 to_device_context(child_device_obj);
35283- static atomic_t device_num = ATOMIC_INIT(0);
35284+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
35285
35286 DPRINT_ENTER(VMBUS_DRV);
35287
35288@@ -541,7 +541,7 @@ static int vmbus_child_device_register(s
35289
35290 /* Set the device name. Otherwise, device_register() will fail. */
35291 dev_set_name(&child_device_ctx->device, "vmbus_0_%d",
35292- atomic_inc_return(&device_num));
35293+ atomic_inc_return_unchecked(&device_num));
35294
35295 /* The new device belongs to this bus */
35296 child_device_ctx->device.bus = &g_vmbus_drv.bus; /* device->dev.bus; */
35297diff -urNp linux-2.6.32.42/drivers/staging/hv/VmbusPrivate.h linux-2.6.32.42/drivers/staging/hv/VmbusPrivate.h
35298--- linux-2.6.32.42/drivers/staging/hv/VmbusPrivate.h 2011-04-17 17:00:52.000000000 -0400
35299+++ linux-2.6.32.42/drivers/staging/hv/VmbusPrivate.h 2011-05-04 17:56:28.000000000 -0400
35300@@ -59,7 +59,7 @@ enum VMBUS_CONNECT_STATE {
35301 struct VMBUS_CONNECTION {
35302 enum VMBUS_CONNECT_STATE ConnectState;
35303
35304- atomic_t NextGpadlHandle;
35305+ atomic_unchecked_t NextGpadlHandle;
35306
35307 /*
35308 * Represents channel interrupts. Each bit position represents a
35309diff -urNp linux-2.6.32.42/drivers/staging/octeon/ethernet.c linux-2.6.32.42/drivers/staging/octeon/ethernet.c
35310--- linux-2.6.32.42/drivers/staging/octeon/ethernet.c 2011-03-27 14:31:47.000000000 -0400
35311+++ linux-2.6.32.42/drivers/staging/octeon/ethernet.c 2011-05-04 17:56:28.000000000 -0400
35312@@ -294,11 +294,11 @@ static struct net_device_stats *cvm_oct_
35313 * since the RX tasklet also increments it.
35314 */
35315 #ifdef CONFIG_64BIT
35316- atomic64_add(rx_status.dropped_packets,
35317- (atomic64_t *)&priv->stats.rx_dropped);
35318+ atomic64_add_unchecked(rx_status.dropped_packets,
35319+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
35320 #else
35321- atomic_add(rx_status.dropped_packets,
35322- (atomic_t *)&priv->stats.rx_dropped);
35323+ atomic_add_unchecked(rx_status.dropped_packets,
35324+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
35325 #endif
35326 }
35327
35328diff -urNp linux-2.6.32.42/drivers/staging/octeon/ethernet-rx.c linux-2.6.32.42/drivers/staging/octeon/ethernet-rx.c
35329--- linux-2.6.32.42/drivers/staging/octeon/ethernet-rx.c 2011-03-27 14:31:47.000000000 -0400
35330+++ linux-2.6.32.42/drivers/staging/octeon/ethernet-rx.c 2011-05-04 17:56:28.000000000 -0400
35331@@ -406,11 +406,11 @@ void cvm_oct_tasklet_rx(unsigned long un
35332 /* Increment RX stats for virtual ports */
35333 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
35334 #ifdef CONFIG_64BIT
35335- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
35336- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
35337+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
35338+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
35339 #else
35340- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
35341- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
35342+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
35343+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
35344 #endif
35345 }
35346 netif_receive_skb(skb);
35347@@ -424,9 +424,9 @@ void cvm_oct_tasklet_rx(unsigned long un
35348 dev->name);
35349 */
35350 #ifdef CONFIG_64BIT
35351- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
35352+ atomic64_add_unchecked(1, (atomic64_t *)&priv->stats.rx_dropped);
35353 #else
35354- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
35355+ atomic_add_unchecked(1, (atomic_t *)&priv->stats.rx_dropped);
35356 #endif
35357 dev_kfree_skb_irq(skb);
35358 }
35359diff -urNp linux-2.6.32.42/drivers/staging/panel/panel.c linux-2.6.32.42/drivers/staging/panel/panel.c
35360--- linux-2.6.32.42/drivers/staging/panel/panel.c 2011-03-27 14:31:47.000000000 -0400
35361+++ linux-2.6.32.42/drivers/staging/panel/panel.c 2011-04-17 15:56:46.000000000 -0400
35362@@ -1305,7 +1305,7 @@ static int lcd_release(struct inode *ino
35363 return 0;
35364 }
35365
35366-static struct file_operations lcd_fops = {
35367+static const struct file_operations lcd_fops = {
35368 .write = lcd_write,
35369 .open = lcd_open,
35370 .release = lcd_release,
35371@@ -1565,7 +1565,7 @@ static int keypad_release(struct inode *
35372 return 0;
35373 }
35374
35375-static struct file_operations keypad_fops = {
35376+static const struct file_operations keypad_fops = {
35377 .read = keypad_read, /* read */
35378 .open = keypad_open, /* open */
35379 .release = keypad_release, /* close */
35380diff -urNp linux-2.6.32.42/drivers/staging/phison/phison.c linux-2.6.32.42/drivers/staging/phison/phison.c
35381--- linux-2.6.32.42/drivers/staging/phison/phison.c 2011-03-27 14:31:47.000000000 -0400
35382+++ linux-2.6.32.42/drivers/staging/phison/phison.c 2011-04-17 15:56:46.000000000 -0400
35383@@ -43,7 +43,7 @@ static struct scsi_host_template phison_
35384 ATA_BMDMA_SHT(DRV_NAME),
35385 };
35386
35387-static struct ata_port_operations phison_ops = {
35388+static const struct ata_port_operations phison_ops = {
35389 .inherits = &ata_bmdma_port_ops,
35390 .prereset = phison_pre_reset,
35391 };
35392diff -urNp linux-2.6.32.42/drivers/staging/poch/poch.c linux-2.6.32.42/drivers/staging/poch/poch.c
35393--- linux-2.6.32.42/drivers/staging/poch/poch.c 2011-03-27 14:31:47.000000000 -0400
35394+++ linux-2.6.32.42/drivers/staging/poch/poch.c 2011-04-17 15:56:46.000000000 -0400
35395@@ -1057,7 +1057,7 @@ static int poch_ioctl(struct inode *inod
35396 return 0;
35397 }
35398
35399-static struct file_operations poch_fops = {
35400+static const struct file_operations poch_fops = {
35401 .owner = THIS_MODULE,
35402 .open = poch_open,
35403 .release = poch_release,
35404diff -urNp linux-2.6.32.42/drivers/staging/pohmelfs/inode.c linux-2.6.32.42/drivers/staging/pohmelfs/inode.c
35405--- linux-2.6.32.42/drivers/staging/pohmelfs/inode.c 2011-03-27 14:31:47.000000000 -0400
35406+++ linux-2.6.32.42/drivers/staging/pohmelfs/inode.c 2011-05-04 17:56:20.000000000 -0400
35407@@ -1850,7 +1850,7 @@ static int pohmelfs_fill_super(struct su
35408 mutex_init(&psb->mcache_lock);
35409 psb->mcache_root = RB_ROOT;
35410 psb->mcache_timeout = msecs_to_jiffies(5000);
35411- atomic_long_set(&psb->mcache_gen, 0);
35412+ atomic_long_set_unchecked(&psb->mcache_gen, 0);
35413
35414 psb->trans_max_pages = 100;
35415
35416@@ -1865,7 +1865,7 @@ static int pohmelfs_fill_super(struct su
35417 INIT_LIST_HEAD(&psb->crypto_ready_list);
35418 INIT_LIST_HEAD(&psb->crypto_active_list);
35419
35420- atomic_set(&psb->trans_gen, 1);
35421+ atomic_set_unchecked(&psb->trans_gen, 1);
35422 atomic_long_set(&psb->total_inodes, 0);
35423
35424 mutex_init(&psb->state_lock);
35425diff -urNp linux-2.6.32.42/drivers/staging/pohmelfs/mcache.c linux-2.6.32.42/drivers/staging/pohmelfs/mcache.c
35426--- linux-2.6.32.42/drivers/staging/pohmelfs/mcache.c 2011-03-27 14:31:47.000000000 -0400
35427+++ linux-2.6.32.42/drivers/staging/pohmelfs/mcache.c 2011-04-17 15:56:46.000000000 -0400
35428@@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
35429 m->data = data;
35430 m->start = start;
35431 m->size = size;
35432- m->gen = atomic_long_inc_return(&psb->mcache_gen);
35433+ m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
35434
35435 mutex_lock(&psb->mcache_lock);
35436 err = pohmelfs_mcache_insert(psb, m);
35437diff -urNp linux-2.6.32.42/drivers/staging/pohmelfs/netfs.h linux-2.6.32.42/drivers/staging/pohmelfs/netfs.h
35438--- linux-2.6.32.42/drivers/staging/pohmelfs/netfs.h 2011-03-27 14:31:47.000000000 -0400
35439+++ linux-2.6.32.42/drivers/staging/pohmelfs/netfs.h 2011-05-04 17:56:20.000000000 -0400
35440@@ -570,14 +570,14 @@ struct pohmelfs_config;
35441 struct pohmelfs_sb {
35442 struct rb_root mcache_root;
35443 struct mutex mcache_lock;
35444- atomic_long_t mcache_gen;
35445+ atomic_long_unchecked_t mcache_gen;
35446 unsigned long mcache_timeout;
35447
35448 unsigned int idx;
35449
35450 unsigned int trans_retries;
35451
35452- atomic_t trans_gen;
35453+ atomic_unchecked_t trans_gen;
35454
35455 unsigned int crypto_attached_size;
35456 unsigned int crypto_align_size;
35457diff -urNp linux-2.6.32.42/drivers/staging/pohmelfs/trans.c linux-2.6.32.42/drivers/staging/pohmelfs/trans.c
35458--- linux-2.6.32.42/drivers/staging/pohmelfs/trans.c 2011-03-27 14:31:47.000000000 -0400
35459+++ linux-2.6.32.42/drivers/staging/pohmelfs/trans.c 2011-05-04 17:56:28.000000000 -0400
35460@@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
35461 int err;
35462 struct netfs_cmd *cmd = t->iovec.iov_base;
35463
35464- t->gen = atomic_inc_return(&psb->trans_gen);
35465+ t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
35466
35467 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
35468 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
35469diff -urNp linux-2.6.32.42/drivers/staging/sep/sep_driver.c linux-2.6.32.42/drivers/staging/sep/sep_driver.c
35470--- linux-2.6.32.42/drivers/staging/sep/sep_driver.c 2011-03-27 14:31:47.000000000 -0400
35471+++ linux-2.6.32.42/drivers/staging/sep/sep_driver.c 2011-04-17 15:56:46.000000000 -0400
35472@@ -2603,7 +2603,7 @@ static struct pci_driver sep_pci_driver
35473 static dev_t sep_devno;
35474
35475 /* the files operations structure of the driver */
35476-static struct file_operations sep_file_operations = {
35477+static const struct file_operations sep_file_operations = {
35478 .owner = THIS_MODULE,
35479 .ioctl = sep_ioctl,
35480 .poll = sep_poll,
35481diff -urNp linux-2.6.32.42/drivers/staging/usbip/vhci.h linux-2.6.32.42/drivers/staging/usbip/vhci.h
35482--- linux-2.6.32.42/drivers/staging/usbip/vhci.h 2011-03-27 14:31:47.000000000 -0400
35483+++ linux-2.6.32.42/drivers/staging/usbip/vhci.h 2011-05-04 17:56:28.000000000 -0400
35484@@ -92,7 +92,7 @@ struct vhci_hcd {
35485 unsigned resuming:1;
35486 unsigned long re_timeout;
35487
35488- atomic_t seqnum;
35489+ atomic_unchecked_t seqnum;
35490
35491 /*
35492 * NOTE:
35493diff -urNp linux-2.6.32.42/drivers/staging/usbip/vhci_hcd.c linux-2.6.32.42/drivers/staging/usbip/vhci_hcd.c
35494--- linux-2.6.32.42/drivers/staging/usbip/vhci_hcd.c 2011-05-10 22:12:01.000000000 -0400
35495+++ linux-2.6.32.42/drivers/staging/usbip/vhci_hcd.c 2011-05-10 22:12:33.000000000 -0400
35496@@ -534,7 +534,7 @@ static void vhci_tx_urb(struct urb *urb)
35497 return;
35498 }
35499
35500- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
35501+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
35502 if (priv->seqnum == 0xffff)
35503 usbip_uinfo("seqnum max\n");
35504
35505@@ -793,7 +793,7 @@ static int vhci_urb_dequeue(struct usb_h
35506 return -ENOMEM;
35507 }
35508
35509- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
35510+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
35511 if (unlink->seqnum == 0xffff)
35512 usbip_uinfo("seqnum max\n");
35513
35514@@ -988,7 +988,7 @@ static int vhci_start(struct usb_hcd *hc
35515 vdev->rhport = rhport;
35516 }
35517
35518- atomic_set(&vhci->seqnum, 0);
35519+ atomic_set_unchecked(&vhci->seqnum, 0);
35520 spin_lock_init(&vhci->lock);
35521
35522
35523diff -urNp linux-2.6.32.42/drivers/staging/usbip/vhci_rx.c linux-2.6.32.42/drivers/staging/usbip/vhci_rx.c
35524--- linux-2.6.32.42/drivers/staging/usbip/vhci_rx.c 2011-04-17 17:00:52.000000000 -0400
35525+++ linux-2.6.32.42/drivers/staging/usbip/vhci_rx.c 2011-05-04 17:56:28.000000000 -0400
35526@@ -78,7 +78,7 @@ static void vhci_recv_ret_submit(struct
35527 usbip_uerr("cannot find a urb of seqnum %u\n",
35528 pdu->base.seqnum);
35529 usbip_uinfo("max seqnum %d\n",
35530- atomic_read(&the_controller->seqnum));
35531+ atomic_read_unchecked(&the_controller->seqnum));
35532 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
35533 return;
35534 }
35535diff -urNp linux-2.6.32.42/drivers/staging/vme/devices/vme_user.c linux-2.6.32.42/drivers/staging/vme/devices/vme_user.c
35536--- linux-2.6.32.42/drivers/staging/vme/devices/vme_user.c 2011-03-27 14:31:47.000000000 -0400
35537+++ linux-2.6.32.42/drivers/staging/vme/devices/vme_user.c 2011-04-17 15:56:46.000000000 -0400
35538@@ -136,7 +136,7 @@ static int vme_user_ioctl(struct inode *
35539 static int __init vme_user_probe(struct device *, int, int);
35540 static int __exit vme_user_remove(struct device *, int, int);
35541
35542-static struct file_operations vme_user_fops = {
35543+static const struct file_operations vme_user_fops = {
35544 .open = vme_user_open,
35545 .release = vme_user_release,
35546 .read = vme_user_read,
35547diff -urNp linux-2.6.32.42/drivers/telephony/ixj.c linux-2.6.32.42/drivers/telephony/ixj.c
35548--- linux-2.6.32.42/drivers/telephony/ixj.c 2011-03-27 14:31:47.000000000 -0400
35549+++ linux-2.6.32.42/drivers/telephony/ixj.c 2011-05-16 21:46:57.000000000 -0400
35550@@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
35551 bool mContinue;
35552 char *pIn, *pOut;
35553
35554+ pax_track_stack();
35555+
35556 if (!SCI_Prepare(j))
35557 return 0;
35558
35559diff -urNp linux-2.6.32.42/drivers/uio/uio.c linux-2.6.32.42/drivers/uio/uio.c
35560--- linux-2.6.32.42/drivers/uio/uio.c 2011-03-27 14:31:47.000000000 -0400
35561+++ linux-2.6.32.42/drivers/uio/uio.c 2011-05-04 17:56:20.000000000 -0400
35562@@ -23,6 +23,7 @@
35563 #include <linux/string.h>
35564 #include <linux/kobject.h>
35565 #include <linux/uio_driver.h>
35566+#include <asm/local.h>
35567
35568 #define UIO_MAX_DEVICES 255
35569
35570@@ -30,10 +31,10 @@ struct uio_device {
35571 struct module *owner;
35572 struct device *dev;
35573 int minor;
35574- atomic_t event;
35575+ atomic_unchecked_t event;
35576 struct fasync_struct *async_queue;
35577 wait_queue_head_t wait;
35578- int vma_count;
35579+ local_t vma_count;
35580 struct uio_info *info;
35581 struct kobject *map_dir;
35582 struct kobject *portio_dir;
35583@@ -129,7 +130,7 @@ static ssize_t map_type_show(struct kobj
35584 return entry->show(mem, buf);
35585 }
35586
35587-static struct sysfs_ops map_sysfs_ops = {
35588+static const struct sysfs_ops map_sysfs_ops = {
35589 .show = map_type_show,
35590 };
35591
35592@@ -217,7 +218,7 @@ static ssize_t portio_type_show(struct k
35593 return entry->show(port, buf);
35594 }
35595
35596-static struct sysfs_ops portio_sysfs_ops = {
35597+static const struct sysfs_ops portio_sysfs_ops = {
35598 .show = portio_type_show,
35599 };
35600
35601@@ -255,7 +256,7 @@ static ssize_t show_event(struct device
35602 struct uio_device *idev = dev_get_drvdata(dev);
35603 if (idev)
35604 return sprintf(buf, "%u\n",
35605- (unsigned int)atomic_read(&idev->event));
35606+ (unsigned int)atomic_read_unchecked(&idev->event));
35607 else
35608 return -ENODEV;
35609 }
35610@@ -424,7 +425,7 @@ void uio_event_notify(struct uio_info *i
35611 {
35612 struct uio_device *idev = info->uio_dev;
35613
35614- atomic_inc(&idev->event);
35615+ atomic_inc_unchecked(&idev->event);
35616 wake_up_interruptible(&idev->wait);
35617 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
35618 }
35619@@ -477,7 +478,7 @@ static int uio_open(struct inode *inode,
35620 }
35621
35622 listener->dev = idev;
35623- listener->event_count = atomic_read(&idev->event);
35624+ listener->event_count = atomic_read_unchecked(&idev->event);
35625 filep->private_data = listener;
35626
35627 if (idev->info->open) {
35628@@ -528,7 +529,7 @@ static unsigned int uio_poll(struct file
35629 return -EIO;
35630
35631 poll_wait(filep, &idev->wait, wait);
35632- if (listener->event_count != atomic_read(&idev->event))
35633+ if (listener->event_count != atomic_read_unchecked(&idev->event))
35634 return POLLIN | POLLRDNORM;
35635 return 0;
35636 }
35637@@ -553,7 +554,7 @@ static ssize_t uio_read(struct file *fil
35638 do {
35639 set_current_state(TASK_INTERRUPTIBLE);
35640
35641- event_count = atomic_read(&idev->event);
35642+ event_count = atomic_read_unchecked(&idev->event);
35643 if (event_count != listener->event_count) {
35644 if (copy_to_user(buf, &event_count, count))
35645 retval = -EFAULT;
35646@@ -624,13 +625,13 @@ static int uio_find_mem_index(struct vm_
35647 static void uio_vma_open(struct vm_area_struct *vma)
35648 {
35649 struct uio_device *idev = vma->vm_private_data;
35650- idev->vma_count++;
35651+ local_inc(&idev->vma_count);
35652 }
35653
35654 static void uio_vma_close(struct vm_area_struct *vma)
35655 {
35656 struct uio_device *idev = vma->vm_private_data;
35657- idev->vma_count--;
35658+ local_dec(&idev->vma_count);
35659 }
35660
35661 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
35662@@ -840,7 +841,7 @@ int __uio_register_device(struct module
35663 idev->owner = owner;
35664 idev->info = info;
35665 init_waitqueue_head(&idev->wait);
35666- atomic_set(&idev->event, 0);
35667+ atomic_set_unchecked(&idev->event, 0);
35668
35669 ret = uio_get_minor(idev);
35670 if (ret)
35671diff -urNp linux-2.6.32.42/drivers/usb/atm/usbatm.c linux-2.6.32.42/drivers/usb/atm/usbatm.c
35672--- linux-2.6.32.42/drivers/usb/atm/usbatm.c 2011-03-27 14:31:47.000000000 -0400
35673+++ linux-2.6.32.42/drivers/usb/atm/usbatm.c 2011-04-17 15:56:46.000000000 -0400
35674@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(stru
35675 if (printk_ratelimit())
35676 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
35677 __func__, vpi, vci);
35678- atomic_inc(&vcc->stats->rx_err);
35679+ atomic_inc_unchecked(&vcc->stats->rx_err);
35680 return;
35681 }
35682
35683@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(stru
35684 if (length > ATM_MAX_AAL5_PDU) {
35685 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
35686 __func__, length, vcc);
35687- atomic_inc(&vcc->stats->rx_err);
35688+ atomic_inc_unchecked(&vcc->stats->rx_err);
35689 goto out;
35690 }
35691
35692@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(stru
35693 if (sarb->len < pdu_length) {
35694 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
35695 __func__, pdu_length, sarb->len, vcc);
35696- atomic_inc(&vcc->stats->rx_err);
35697+ atomic_inc_unchecked(&vcc->stats->rx_err);
35698 goto out;
35699 }
35700
35701 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
35702 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
35703 __func__, vcc);
35704- atomic_inc(&vcc->stats->rx_err);
35705+ atomic_inc_unchecked(&vcc->stats->rx_err);
35706 goto out;
35707 }
35708
35709@@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(stru
35710 if (printk_ratelimit())
35711 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
35712 __func__, length);
35713- atomic_inc(&vcc->stats->rx_drop);
35714+ atomic_inc_unchecked(&vcc->stats->rx_drop);
35715 goto out;
35716 }
35717
35718@@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(stru
35719
35720 vcc->push(vcc, skb);
35721
35722- atomic_inc(&vcc->stats->rx);
35723+ atomic_inc_unchecked(&vcc->stats->rx);
35724 out:
35725 skb_trim(sarb, 0);
35726 }
35727@@ -616,7 +616,7 @@ static void usbatm_tx_process(unsigned l
35728 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
35729
35730 usbatm_pop(vcc, skb);
35731- atomic_inc(&vcc->stats->tx);
35732+ atomic_inc_unchecked(&vcc->stats->tx);
35733
35734 skb = skb_dequeue(&instance->sndqueue);
35735 }
35736@@ -775,11 +775,11 @@ static int usbatm_atm_proc_read(struct a
35737 if (!left--)
35738 return sprintf(page,
35739 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
35740- atomic_read(&atm_dev->stats.aal5.tx),
35741- atomic_read(&atm_dev->stats.aal5.tx_err),
35742- atomic_read(&atm_dev->stats.aal5.rx),
35743- atomic_read(&atm_dev->stats.aal5.rx_err),
35744- atomic_read(&atm_dev->stats.aal5.rx_drop));
35745+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
35746+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
35747+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
35748+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
35749+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
35750
35751 if (!left--) {
35752 if (instance->disconnected)
35753diff -urNp linux-2.6.32.42/drivers/usb/class/cdc-wdm.c linux-2.6.32.42/drivers/usb/class/cdc-wdm.c
35754--- linux-2.6.32.42/drivers/usb/class/cdc-wdm.c 2011-03-27 14:31:47.000000000 -0400
35755+++ linux-2.6.32.42/drivers/usb/class/cdc-wdm.c 2011-04-17 15:56:46.000000000 -0400
35756@@ -314,7 +314,7 @@ static ssize_t wdm_write
35757 if (r < 0)
35758 goto outnp;
35759
35760- if (!file->f_flags && O_NONBLOCK)
35761+ if (!(file->f_flags & O_NONBLOCK))
35762 r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE,
35763 &desc->flags));
35764 else
35765diff -urNp linux-2.6.32.42/drivers/usb/core/hcd.c linux-2.6.32.42/drivers/usb/core/hcd.c
35766--- linux-2.6.32.42/drivers/usb/core/hcd.c 2011-03-27 14:31:47.000000000 -0400
35767+++ linux-2.6.32.42/drivers/usb/core/hcd.c 2011-04-17 15:56:46.000000000 -0400
35768@@ -2216,7 +2216,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_platform_shutd
35769
35770 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
35771
35772-struct usb_mon_operations *mon_ops;
35773+const struct usb_mon_operations *mon_ops;
35774
35775 /*
35776 * The registration is unlocked.
35777@@ -2226,7 +2226,7 @@ struct usb_mon_operations *mon_ops;
35778 * symbols from usbcore, usbcore gets referenced and cannot be unloaded first.
35779 */
35780
35781-int usb_mon_register (struct usb_mon_operations *ops)
35782+int usb_mon_register (const struct usb_mon_operations *ops)
35783 {
35784
35785 if (mon_ops)
35786diff -urNp linux-2.6.32.42/drivers/usb/core/hcd.h linux-2.6.32.42/drivers/usb/core/hcd.h
35787--- linux-2.6.32.42/drivers/usb/core/hcd.h 2011-03-27 14:31:47.000000000 -0400
35788+++ linux-2.6.32.42/drivers/usb/core/hcd.h 2011-04-17 15:56:46.000000000 -0400
35789@@ -486,13 +486,13 @@ static inline void usbfs_cleanup(void) {
35790 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
35791
35792 struct usb_mon_operations {
35793- void (*urb_submit)(struct usb_bus *bus, struct urb *urb);
35794- void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
35795- void (*urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
35796+ void (* const urb_submit)(struct usb_bus *bus, struct urb *urb);
35797+ void (* const urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
35798+ void (* const urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
35799 /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */
35800 };
35801
35802-extern struct usb_mon_operations *mon_ops;
35803+extern const struct usb_mon_operations *mon_ops;
35804
35805 static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb)
35806 {
35807@@ -514,7 +514,7 @@ static inline void usbmon_urb_complete(s
35808 (*mon_ops->urb_complete)(bus, urb, status);
35809 }
35810
35811-int usb_mon_register(struct usb_mon_operations *ops);
35812+int usb_mon_register(const struct usb_mon_operations *ops);
35813 void usb_mon_deregister(void);
35814
35815 #else
35816diff -urNp linux-2.6.32.42/drivers/usb/core/message.c linux-2.6.32.42/drivers/usb/core/message.c
35817--- linux-2.6.32.42/drivers/usb/core/message.c 2011-03-27 14:31:47.000000000 -0400
35818+++ linux-2.6.32.42/drivers/usb/core/message.c 2011-04-17 15:56:46.000000000 -0400
35819@@ -914,8 +914,8 @@ char *usb_cache_string(struct usb_device
35820 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
35821 if (buf) {
35822 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
35823- if (len > 0) {
35824- smallbuf = kmalloc(++len, GFP_NOIO);
35825+ if (len++ > 0) {
35826+ smallbuf = kmalloc(len, GFP_NOIO);
35827 if (!smallbuf)
35828 return buf;
35829 memcpy(smallbuf, buf, len);
35830diff -urNp linux-2.6.32.42/drivers/usb/misc/appledisplay.c linux-2.6.32.42/drivers/usb/misc/appledisplay.c
35831--- linux-2.6.32.42/drivers/usb/misc/appledisplay.c 2011-03-27 14:31:47.000000000 -0400
35832+++ linux-2.6.32.42/drivers/usb/misc/appledisplay.c 2011-04-17 15:56:46.000000000 -0400
35833@@ -178,7 +178,7 @@ static int appledisplay_bl_get_brightnes
35834 return pdata->msgdata[1];
35835 }
35836
35837-static struct backlight_ops appledisplay_bl_data = {
35838+static const struct backlight_ops appledisplay_bl_data = {
35839 .get_brightness = appledisplay_bl_get_brightness,
35840 .update_status = appledisplay_bl_update_status,
35841 };
35842diff -urNp linux-2.6.32.42/drivers/usb/mon/mon_main.c linux-2.6.32.42/drivers/usb/mon/mon_main.c
35843--- linux-2.6.32.42/drivers/usb/mon/mon_main.c 2011-03-27 14:31:47.000000000 -0400
35844+++ linux-2.6.32.42/drivers/usb/mon/mon_main.c 2011-04-17 15:56:46.000000000 -0400
35845@@ -238,7 +238,7 @@ static struct notifier_block mon_nb = {
35846 /*
35847 * Ops
35848 */
35849-static struct usb_mon_operations mon_ops_0 = {
35850+static const struct usb_mon_operations mon_ops_0 = {
35851 .urb_submit = mon_submit,
35852 .urb_submit_error = mon_submit_error,
35853 .urb_complete = mon_complete,
35854diff -urNp linux-2.6.32.42/drivers/usb/wusbcore/wa-hc.h linux-2.6.32.42/drivers/usb/wusbcore/wa-hc.h
35855--- linux-2.6.32.42/drivers/usb/wusbcore/wa-hc.h 2011-03-27 14:31:47.000000000 -0400
35856+++ linux-2.6.32.42/drivers/usb/wusbcore/wa-hc.h 2011-05-04 17:56:28.000000000 -0400
35857@@ -192,7 +192,7 @@ struct wahc {
35858 struct list_head xfer_delayed_list;
35859 spinlock_t xfer_list_lock;
35860 struct work_struct xfer_work;
35861- atomic_t xfer_id_count;
35862+ atomic_unchecked_t xfer_id_count;
35863 };
35864
35865
35866@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
35867 INIT_LIST_HEAD(&wa->xfer_delayed_list);
35868 spin_lock_init(&wa->xfer_list_lock);
35869 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
35870- atomic_set(&wa->xfer_id_count, 1);
35871+ atomic_set_unchecked(&wa->xfer_id_count, 1);
35872 }
35873
35874 /**
35875diff -urNp linux-2.6.32.42/drivers/usb/wusbcore/wa-xfer.c linux-2.6.32.42/drivers/usb/wusbcore/wa-xfer.c
35876--- linux-2.6.32.42/drivers/usb/wusbcore/wa-xfer.c 2011-03-27 14:31:47.000000000 -0400
35877+++ linux-2.6.32.42/drivers/usb/wusbcore/wa-xfer.c 2011-05-04 17:56:28.000000000 -0400
35878@@ -293,7 +293,7 @@ out:
35879 */
35880 static void wa_xfer_id_init(struct wa_xfer *xfer)
35881 {
35882- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
35883+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
35884 }
35885
35886 /*
35887diff -urNp linux-2.6.32.42/drivers/uwb/wlp/messages.c linux-2.6.32.42/drivers/uwb/wlp/messages.c
35888--- linux-2.6.32.42/drivers/uwb/wlp/messages.c 2011-03-27 14:31:47.000000000 -0400
35889+++ linux-2.6.32.42/drivers/uwb/wlp/messages.c 2011-04-17 15:56:46.000000000 -0400
35890@@ -903,7 +903,7 @@ int wlp_parse_f0(struct wlp *wlp, struct
35891 size_t len = skb->len;
35892 size_t used;
35893 ssize_t result;
35894- struct wlp_nonce enonce, rnonce;
35895+ struct wlp_nonce enonce = {{0}}, rnonce = {{0}};
35896 enum wlp_assc_error assc_err;
35897 char enonce_buf[WLP_WSS_NONCE_STRSIZE];
35898 char rnonce_buf[WLP_WSS_NONCE_STRSIZE];
35899diff -urNp linux-2.6.32.42/drivers/uwb/wlp/sysfs.c linux-2.6.32.42/drivers/uwb/wlp/sysfs.c
35900--- linux-2.6.32.42/drivers/uwb/wlp/sysfs.c 2011-03-27 14:31:47.000000000 -0400
35901+++ linux-2.6.32.42/drivers/uwb/wlp/sysfs.c 2011-04-17 15:56:46.000000000 -0400
35902@@ -615,8 +615,7 @@ ssize_t wlp_wss_attr_store(struct kobjec
35903 return ret;
35904 }
35905
35906-static
35907-struct sysfs_ops wss_sysfs_ops = {
35908+static const struct sysfs_ops wss_sysfs_ops = {
35909 .show = wlp_wss_attr_show,
35910 .store = wlp_wss_attr_store,
35911 };
35912diff -urNp linux-2.6.32.42/drivers/video/atmel_lcdfb.c linux-2.6.32.42/drivers/video/atmel_lcdfb.c
35913--- linux-2.6.32.42/drivers/video/atmel_lcdfb.c 2011-03-27 14:31:47.000000000 -0400
35914+++ linux-2.6.32.42/drivers/video/atmel_lcdfb.c 2011-04-17 15:56:46.000000000 -0400
35915@@ -110,7 +110,7 @@ static int atmel_bl_get_brightness(struc
35916 return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
35917 }
35918
35919-static struct backlight_ops atmel_lcdc_bl_ops = {
35920+static const struct backlight_ops atmel_lcdc_bl_ops = {
35921 .update_status = atmel_bl_update_status,
35922 .get_brightness = atmel_bl_get_brightness,
35923 };
35924diff -urNp linux-2.6.32.42/drivers/video/aty/aty128fb.c linux-2.6.32.42/drivers/video/aty/aty128fb.c
35925--- linux-2.6.32.42/drivers/video/aty/aty128fb.c 2011-03-27 14:31:47.000000000 -0400
35926+++ linux-2.6.32.42/drivers/video/aty/aty128fb.c 2011-04-17 15:56:46.000000000 -0400
35927@@ -1787,7 +1787,7 @@ static int aty128_bl_get_brightness(stru
35928 return bd->props.brightness;
35929 }
35930
35931-static struct backlight_ops aty128_bl_data = {
35932+static const struct backlight_ops aty128_bl_data = {
35933 .get_brightness = aty128_bl_get_brightness,
35934 .update_status = aty128_bl_update_status,
35935 };
35936diff -urNp linux-2.6.32.42/drivers/video/aty/atyfb_base.c linux-2.6.32.42/drivers/video/aty/atyfb_base.c
35937--- linux-2.6.32.42/drivers/video/aty/atyfb_base.c 2011-03-27 14:31:47.000000000 -0400
35938+++ linux-2.6.32.42/drivers/video/aty/atyfb_base.c 2011-04-17 15:56:46.000000000 -0400
35939@@ -2225,7 +2225,7 @@ static int aty_bl_get_brightness(struct
35940 return bd->props.brightness;
35941 }
35942
35943-static struct backlight_ops aty_bl_data = {
35944+static const struct backlight_ops aty_bl_data = {
35945 .get_brightness = aty_bl_get_brightness,
35946 .update_status = aty_bl_update_status,
35947 };
35948diff -urNp linux-2.6.32.42/drivers/video/aty/radeon_backlight.c linux-2.6.32.42/drivers/video/aty/radeon_backlight.c
35949--- linux-2.6.32.42/drivers/video/aty/radeon_backlight.c 2011-03-27 14:31:47.000000000 -0400
35950+++ linux-2.6.32.42/drivers/video/aty/radeon_backlight.c 2011-04-17 15:56:46.000000000 -0400
35951@@ -127,7 +127,7 @@ static int radeon_bl_get_brightness(stru
35952 return bd->props.brightness;
35953 }
35954
35955-static struct backlight_ops radeon_bl_data = {
35956+static const struct backlight_ops radeon_bl_data = {
35957 .get_brightness = radeon_bl_get_brightness,
35958 .update_status = radeon_bl_update_status,
35959 };
35960diff -urNp linux-2.6.32.42/drivers/video/backlight/adp5520_bl.c linux-2.6.32.42/drivers/video/backlight/adp5520_bl.c
35961--- linux-2.6.32.42/drivers/video/backlight/adp5520_bl.c 2011-03-27 14:31:47.000000000 -0400
35962+++ linux-2.6.32.42/drivers/video/backlight/adp5520_bl.c 2011-04-17 15:56:46.000000000 -0400
35963@@ -84,7 +84,7 @@ static int adp5520_bl_get_brightness(str
35964 return error ? data->current_brightness : reg_val;
35965 }
35966
35967-static struct backlight_ops adp5520_bl_ops = {
35968+static const struct backlight_ops adp5520_bl_ops = {
35969 .update_status = adp5520_bl_update_status,
35970 .get_brightness = adp5520_bl_get_brightness,
35971 };
35972diff -urNp linux-2.6.32.42/drivers/video/backlight/adx_bl.c linux-2.6.32.42/drivers/video/backlight/adx_bl.c
35973--- linux-2.6.32.42/drivers/video/backlight/adx_bl.c 2011-03-27 14:31:47.000000000 -0400
35974+++ linux-2.6.32.42/drivers/video/backlight/adx_bl.c 2011-04-17 15:56:46.000000000 -0400
35975@@ -61,7 +61,7 @@ static int adx_backlight_check_fb(struct
35976 return 1;
35977 }
35978
35979-static struct backlight_ops adx_backlight_ops = {
35980+static const struct backlight_ops adx_backlight_ops = {
35981 .options = 0,
35982 .update_status = adx_backlight_update_status,
35983 .get_brightness = adx_backlight_get_brightness,
35984diff -urNp linux-2.6.32.42/drivers/video/backlight/atmel-pwm-bl.c linux-2.6.32.42/drivers/video/backlight/atmel-pwm-bl.c
35985--- linux-2.6.32.42/drivers/video/backlight/atmel-pwm-bl.c 2011-03-27 14:31:47.000000000 -0400
35986+++ linux-2.6.32.42/drivers/video/backlight/atmel-pwm-bl.c 2011-04-17 15:56:46.000000000 -0400
35987@@ -113,7 +113,7 @@ static int atmel_pwm_bl_init_pwm(struct
35988 return pwm_channel_enable(&pwmbl->pwmc);
35989 }
35990
35991-static struct backlight_ops atmel_pwm_bl_ops = {
35992+static const struct backlight_ops atmel_pwm_bl_ops = {
35993 .get_brightness = atmel_pwm_bl_get_intensity,
35994 .update_status = atmel_pwm_bl_set_intensity,
35995 };
35996diff -urNp linux-2.6.32.42/drivers/video/backlight/backlight.c linux-2.6.32.42/drivers/video/backlight/backlight.c
35997--- linux-2.6.32.42/drivers/video/backlight/backlight.c 2011-03-27 14:31:47.000000000 -0400
35998+++ linux-2.6.32.42/drivers/video/backlight/backlight.c 2011-04-17 15:56:46.000000000 -0400
35999@@ -269,7 +269,7 @@ EXPORT_SYMBOL(backlight_force_update);
36000 * ERR_PTR() or a pointer to the newly allocated device.
36001 */
36002 struct backlight_device *backlight_device_register(const char *name,
36003- struct device *parent, void *devdata, struct backlight_ops *ops)
36004+ struct device *parent, void *devdata, const struct backlight_ops *ops)
36005 {
36006 struct backlight_device *new_bd;
36007 int rc;
36008diff -urNp linux-2.6.32.42/drivers/video/backlight/corgi_lcd.c linux-2.6.32.42/drivers/video/backlight/corgi_lcd.c
36009--- linux-2.6.32.42/drivers/video/backlight/corgi_lcd.c 2011-03-27 14:31:47.000000000 -0400
36010+++ linux-2.6.32.42/drivers/video/backlight/corgi_lcd.c 2011-04-17 15:56:46.000000000 -0400
36011@@ -451,7 +451,7 @@ void corgi_lcd_limit_intensity(int limit
36012 }
36013 EXPORT_SYMBOL(corgi_lcd_limit_intensity);
36014
36015-static struct backlight_ops corgi_bl_ops = {
36016+static const struct backlight_ops corgi_bl_ops = {
36017 .get_brightness = corgi_bl_get_intensity,
36018 .update_status = corgi_bl_update_status,
36019 };
36020diff -urNp linux-2.6.32.42/drivers/video/backlight/cr_bllcd.c linux-2.6.32.42/drivers/video/backlight/cr_bllcd.c
36021--- linux-2.6.32.42/drivers/video/backlight/cr_bllcd.c 2011-03-27 14:31:47.000000000 -0400
36022+++ linux-2.6.32.42/drivers/video/backlight/cr_bllcd.c 2011-04-17 15:56:46.000000000 -0400
36023@@ -108,7 +108,7 @@ static int cr_backlight_get_intensity(st
36024 return intensity;
36025 }
36026
36027-static struct backlight_ops cr_backlight_ops = {
36028+static const struct backlight_ops cr_backlight_ops = {
36029 .get_brightness = cr_backlight_get_intensity,
36030 .update_status = cr_backlight_set_intensity,
36031 };
36032diff -urNp linux-2.6.32.42/drivers/video/backlight/da903x_bl.c linux-2.6.32.42/drivers/video/backlight/da903x_bl.c
36033--- linux-2.6.32.42/drivers/video/backlight/da903x_bl.c 2011-03-27 14:31:47.000000000 -0400
36034+++ linux-2.6.32.42/drivers/video/backlight/da903x_bl.c 2011-04-17 15:56:46.000000000 -0400
36035@@ -94,7 +94,7 @@ static int da903x_backlight_get_brightne
36036 return data->current_brightness;
36037 }
36038
36039-static struct backlight_ops da903x_backlight_ops = {
36040+static const struct backlight_ops da903x_backlight_ops = {
36041 .update_status = da903x_backlight_update_status,
36042 .get_brightness = da903x_backlight_get_brightness,
36043 };
36044diff -urNp linux-2.6.32.42/drivers/video/backlight/generic_bl.c linux-2.6.32.42/drivers/video/backlight/generic_bl.c
36045--- linux-2.6.32.42/drivers/video/backlight/generic_bl.c 2011-03-27 14:31:47.000000000 -0400
36046+++ linux-2.6.32.42/drivers/video/backlight/generic_bl.c 2011-04-17 15:56:46.000000000 -0400
36047@@ -70,7 +70,7 @@ void corgibl_limit_intensity(int limit)
36048 }
36049 EXPORT_SYMBOL(corgibl_limit_intensity);
36050
36051-static struct backlight_ops genericbl_ops = {
36052+static const struct backlight_ops genericbl_ops = {
36053 .options = BL_CORE_SUSPENDRESUME,
36054 .get_brightness = genericbl_get_intensity,
36055 .update_status = genericbl_send_intensity,
36056diff -urNp linux-2.6.32.42/drivers/video/backlight/hp680_bl.c linux-2.6.32.42/drivers/video/backlight/hp680_bl.c
36057--- linux-2.6.32.42/drivers/video/backlight/hp680_bl.c 2011-03-27 14:31:47.000000000 -0400
36058+++ linux-2.6.32.42/drivers/video/backlight/hp680_bl.c 2011-04-17 15:56:46.000000000 -0400
36059@@ -98,7 +98,7 @@ static int hp680bl_get_intensity(struct
36060 return current_intensity;
36061 }
36062
36063-static struct backlight_ops hp680bl_ops = {
36064+static const struct backlight_ops hp680bl_ops = {
36065 .get_brightness = hp680bl_get_intensity,
36066 .update_status = hp680bl_set_intensity,
36067 };
36068diff -urNp linux-2.6.32.42/drivers/video/backlight/jornada720_bl.c linux-2.6.32.42/drivers/video/backlight/jornada720_bl.c
36069--- linux-2.6.32.42/drivers/video/backlight/jornada720_bl.c 2011-03-27 14:31:47.000000000 -0400
36070+++ linux-2.6.32.42/drivers/video/backlight/jornada720_bl.c 2011-04-17 15:56:46.000000000 -0400
36071@@ -93,7 +93,7 @@ out:
36072 return ret;
36073 }
36074
36075-static struct backlight_ops jornada_bl_ops = {
36076+static const struct backlight_ops jornada_bl_ops = {
36077 .get_brightness = jornada_bl_get_brightness,
36078 .update_status = jornada_bl_update_status,
36079 .options = BL_CORE_SUSPENDRESUME,
36080diff -urNp linux-2.6.32.42/drivers/video/backlight/kb3886_bl.c linux-2.6.32.42/drivers/video/backlight/kb3886_bl.c
36081--- linux-2.6.32.42/drivers/video/backlight/kb3886_bl.c 2011-03-27 14:31:47.000000000 -0400
36082+++ linux-2.6.32.42/drivers/video/backlight/kb3886_bl.c 2011-04-17 15:56:46.000000000 -0400
36083@@ -134,7 +134,7 @@ static int kb3886bl_get_intensity(struct
36084 return kb3886bl_intensity;
36085 }
36086
36087-static struct backlight_ops kb3886bl_ops = {
36088+static const struct backlight_ops kb3886bl_ops = {
36089 .get_brightness = kb3886bl_get_intensity,
36090 .update_status = kb3886bl_send_intensity,
36091 };
36092diff -urNp linux-2.6.32.42/drivers/video/backlight/locomolcd.c linux-2.6.32.42/drivers/video/backlight/locomolcd.c
36093--- linux-2.6.32.42/drivers/video/backlight/locomolcd.c 2011-03-27 14:31:47.000000000 -0400
36094+++ linux-2.6.32.42/drivers/video/backlight/locomolcd.c 2011-04-17 15:56:46.000000000 -0400
36095@@ -141,7 +141,7 @@ static int locomolcd_get_intensity(struc
36096 return current_intensity;
36097 }
36098
36099-static struct backlight_ops locomobl_data = {
36100+static const struct backlight_ops locomobl_data = {
36101 .get_brightness = locomolcd_get_intensity,
36102 .update_status = locomolcd_set_intensity,
36103 };
36104diff -urNp linux-2.6.32.42/drivers/video/backlight/mbp_nvidia_bl.c linux-2.6.32.42/drivers/video/backlight/mbp_nvidia_bl.c
36105--- linux-2.6.32.42/drivers/video/backlight/mbp_nvidia_bl.c 2011-05-10 22:12:01.000000000 -0400
36106+++ linux-2.6.32.42/drivers/video/backlight/mbp_nvidia_bl.c 2011-05-10 22:12:33.000000000 -0400
36107@@ -33,7 +33,7 @@ struct dmi_match_data {
36108 unsigned long iostart;
36109 unsigned long iolen;
36110 /* Backlight operations structure. */
36111- struct backlight_ops backlight_ops;
36112+ const struct backlight_ops backlight_ops;
36113 };
36114
36115 /* Module parameters. */
36116diff -urNp linux-2.6.32.42/drivers/video/backlight/omap1_bl.c linux-2.6.32.42/drivers/video/backlight/omap1_bl.c
36117--- linux-2.6.32.42/drivers/video/backlight/omap1_bl.c 2011-03-27 14:31:47.000000000 -0400
36118+++ linux-2.6.32.42/drivers/video/backlight/omap1_bl.c 2011-04-17 15:56:46.000000000 -0400
36119@@ -125,7 +125,7 @@ static int omapbl_get_intensity(struct b
36120 return bl->current_intensity;
36121 }
36122
36123-static struct backlight_ops omapbl_ops = {
36124+static const struct backlight_ops omapbl_ops = {
36125 .get_brightness = omapbl_get_intensity,
36126 .update_status = omapbl_update_status,
36127 };
36128diff -urNp linux-2.6.32.42/drivers/video/backlight/progear_bl.c linux-2.6.32.42/drivers/video/backlight/progear_bl.c
36129--- linux-2.6.32.42/drivers/video/backlight/progear_bl.c 2011-03-27 14:31:47.000000000 -0400
36130+++ linux-2.6.32.42/drivers/video/backlight/progear_bl.c 2011-04-17 15:56:46.000000000 -0400
36131@@ -54,7 +54,7 @@ static int progearbl_get_intensity(struc
36132 return intensity - HW_LEVEL_MIN;
36133 }
36134
36135-static struct backlight_ops progearbl_ops = {
36136+static const struct backlight_ops progearbl_ops = {
36137 .get_brightness = progearbl_get_intensity,
36138 .update_status = progearbl_set_intensity,
36139 };
36140diff -urNp linux-2.6.32.42/drivers/video/backlight/pwm_bl.c linux-2.6.32.42/drivers/video/backlight/pwm_bl.c
36141--- linux-2.6.32.42/drivers/video/backlight/pwm_bl.c 2011-03-27 14:31:47.000000000 -0400
36142+++ linux-2.6.32.42/drivers/video/backlight/pwm_bl.c 2011-04-17 15:56:46.000000000 -0400
36143@@ -56,7 +56,7 @@ static int pwm_backlight_get_brightness(
36144 return bl->props.brightness;
36145 }
36146
36147-static struct backlight_ops pwm_backlight_ops = {
36148+static const struct backlight_ops pwm_backlight_ops = {
36149 .update_status = pwm_backlight_update_status,
36150 .get_brightness = pwm_backlight_get_brightness,
36151 };
36152diff -urNp linux-2.6.32.42/drivers/video/backlight/tosa_bl.c linux-2.6.32.42/drivers/video/backlight/tosa_bl.c
36153--- linux-2.6.32.42/drivers/video/backlight/tosa_bl.c 2011-03-27 14:31:47.000000000 -0400
36154+++ linux-2.6.32.42/drivers/video/backlight/tosa_bl.c 2011-04-17 15:56:46.000000000 -0400
36155@@ -72,7 +72,7 @@ static int tosa_bl_get_brightness(struct
36156 return props->brightness;
36157 }
36158
36159-static struct backlight_ops bl_ops = {
36160+static const struct backlight_ops bl_ops = {
36161 .get_brightness = tosa_bl_get_brightness,
36162 .update_status = tosa_bl_update_status,
36163 };
36164diff -urNp linux-2.6.32.42/drivers/video/backlight/wm831x_bl.c linux-2.6.32.42/drivers/video/backlight/wm831x_bl.c
36165--- linux-2.6.32.42/drivers/video/backlight/wm831x_bl.c 2011-03-27 14:31:47.000000000 -0400
36166+++ linux-2.6.32.42/drivers/video/backlight/wm831x_bl.c 2011-04-17 15:56:46.000000000 -0400
36167@@ -112,7 +112,7 @@ static int wm831x_backlight_get_brightne
36168 return data->current_brightness;
36169 }
36170
36171-static struct backlight_ops wm831x_backlight_ops = {
36172+static const struct backlight_ops wm831x_backlight_ops = {
36173 .options = BL_CORE_SUSPENDRESUME,
36174 .update_status = wm831x_backlight_update_status,
36175 .get_brightness = wm831x_backlight_get_brightness,
36176diff -urNp linux-2.6.32.42/drivers/video/bf54x-lq043fb.c linux-2.6.32.42/drivers/video/bf54x-lq043fb.c
36177--- linux-2.6.32.42/drivers/video/bf54x-lq043fb.c 2011-03-27 14:31:47.000000000 -0400
36178+++ linux-2.6.32.42/drivers/video/bf54x-lq043fb.c 2011-04-17 15:56:46.000000000 -0400
36179@@ -463,7 +463,7 @@ static int bl_get_brightness(struct back
36180 return 0;
36181 }
36182
36183-static struct backlight_ops bfin_lq043fb_bl_ops = {
36184+static const struct backlight_ops bfin_lq043fb_bl_ops = {
36185 .get_brightness = bl_get_brightness,
36186 };
36187
36188diff -urNp linux-2.6.32.42/drivers/video/bfin-t350mcqb-fb.c linux-2.6.32.42/drivers/video/bfin-t350mcqb-fb.c
36189--- linux-2.6.32.42/drivers/video/bfin-t350mcqb-fb.c 2011-03-27 14:31:47.000000000 -0400
36190+++ linux-2.6.32.42/drivers/video/bfin-t350mcqb-fb.c 2011-04-17 15:56:46.000000000 -0400
36191@@ -381,7 +381,7 @@ static int bl_get_brightness(struct back
36192 return 0;
36193 }
36194
36195-static struct backlight_ops bfin_lq043fb_bl_ops = {
36196+static const struct backlight_ops bfin_lq043fb_bl_ops = {
36197 .get_brightness = bl_get_brightness,
36198 };
36199
36200diff -urNp linux-2.6.32.42/drivers/video/fbcmap.c linux-2.6.32.42/drivers/video/fbcmap.c
36201--- linux-2.6.32.42/drivers/video/fbcmap.c 2011-03-27 14:31:47.000000000 -0400
36202+++ linux-2.6.32.42/drivers/video/fbcmap.c 2011-04-17 15:56:46.000000000 -0400
36203@@ -266,8 +266,7 @@ int fb_set_user_cmap(struct fb_cmap_user
36204 rc = -ENODEV;
36205 goto out;
36206 }
36207- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
36208- !info->fbops->fb_setcmap)) {
36209+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
36210 rc = -EINVAL;
36211 goto out1;
36212 }
36213diff -urNp linux-2.6.32.42/drivers/video/fbmem.c linux-2.6.32.42/drivers/video/fbmem.c
36214--- linux-2.6.32.42/drivers/video/fbmem.c 2011-03-27 14:31:47.000000000 -0400
36215+++ linux-2.6.32.42/drivers/video/fbmem.c 2011-05-16 21:46:57.000000000 -0400
36216@@ -403,7 +403,7 @@ static void fb_do_show_logo(struct fb_in
36217 image->dx += image->width + 8;
36218 }
36219 } else if (rotate == FB_ROTATE_UD) {
36220- for (x = 0; x < num && image->dx >= 0; x++) {
36221+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
36222 info->fbops->fb_imageblit(info, image);
36223 image->dx -= image->width + 8;
36224 }
36225@@ -415,7 +415,7 @@ static void fb_do_show_logo(struct fb_in
36226 image->dy += image->height + 8;
36227 }
36228 } else if (rotate == FB_ROTATE_CCW) {
36229- for (x = 0; x < num && image->dy >= 0; x++) {
36230+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
36231 info->fbops->fb_imageblit(info, image);
36232 image->dy -= image->height + 8;
36233 }
36234@@ -915,6 +915,8 @@ fb_set_var(struct fb_info *info, struct
36235 int flags = info->flags;
36236 int ret = 0;
36237
36238+ pax_track_stack();
36239+
36240 if (var->activate & FB_ACTIVATE_INV_MODE) {
36241 struct fb_videomode mode1, mode2;
36242
36243@@ -1040,6 +1042,8 @@ static long do_fb_ioctl(struct fb_info *
36244 void __user *argp = (void __user *)arg;
36245 long ret = 0;
36246
36247+ pax_track_stack();
36248+
36249 switch (cmd) {
36250 case FBIOGET_VSCREENINFO:
36251 if (!lock_fb_info(info))
36252@@ -1119,7 +1123,7 @@ static long do_fb_ioctl(struct fb_info *
36253 return -EFAULT;
36254 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
36255 return -EINVAL;
36256- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
36257+ if (con2fb.framebuffer >= FB_MAX)
36258 return -EINVAL;
36259 if (!registered_fb[con2fb.framebuffer])
36260 request_module("fb%d", con2fb.framebuffer);
36261diff -urNp linux-2.6.32.42/drivers/video/i810/i810_accel.c linux-2.6.32.42/drivers/video/i810/i810_accel.c
36262--- linux-2.6.32.42/drivers/video/i810/i810_accel.c 2011-03-27 14:31:47.000000000 -0400
36263+++ linux-2.6.32.42/drivers/video/i810/i810_accel.c 2011-04-17 15:56:46.000000000 -0400
36264@@ -73,6 +73,7 @@ static inline int wait_for_space(struct
36265 }
36266 }
36267 printk("ringbuffer lockup!!!\n");
36268+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
36269 i810_report_error(mmio);
36270 par->dev_flags |= LOCKUP;
36271 info->pixmap.scan_align = 1;
36272diff -urNp linux-2.6.32.42/drivers/video/nvidia/nv_backlight.c linux-2.6.32.42/drivers/video/nvidia/nv_backlight.c
36273--- linux-2.6.32.42/drivers/video/nvidia/nv_backlight.c 2011-03-27 14:31:47.000000000 -0400
36274+++ linux-2.6.32.42/drivers/video/nvidia/nv_backlight.c 2011-04-17 15:56:46.000000000 -0400
36275@@ -87,7 +87,7 @@ static int nvidia_bl_get_brightness(stru
36276 return bd->props.brightness;
36277 }
36278
36279-static struct backlight_ops nvidia_bl_ops = {
36280+static const struct backlight_ops nvidia_bl_ops = {
36281 .get_brightness = nvidia_bl_get_brightness,
36282 .update_status = nvidia_bl_update_status,
36283 };
36284diff -urNp linux-2.6.32.42/drivers/video/riva/fbdev.c linux-2.6.32.42/drivers/video/riva/fbdev.c
36285--- linux-2.6.32.42/drivers/video/riva/fbdev.c 2011-03-27 14:31:47.000000000 -0400
36286+++ linux-2.6.32.42/drivers/video/riva/fbdev.c 2011-04-17 15:56:46.000000000 -0400
36287@@ -331,7 +331,7 @@ static int riva_bl_get_brightness(struct
36288 return bd->props.brightness;
36289 }
36290
36291-static struct backlight_ops riva_bl_ops = {
36292+static const struct backlight_ops riva_bl_ops = {
36293 .get_brightness = riva_bl_get_brightness,
36294 .update_status = riva_bl_update_status,
36295 };
36296diff -urNp linux-2.6.32.42/drivers/video/uvesafb.c linux-2.6.32.42/drivers/video/uvesafb.c
36297--- linux-2.6.32.42/drivers/video/uvesafb.c 2011-03-27 14:31:47.000000000 -0400
36298+++ linux-2.6.32.42/drivers/video/uvesafb.c 2011-04-17 15:56:46.000000000 -0400
36299@@ -18,6 +18,7 @@
36300 #include <linux/fb.h>
36301 #include <linux/io.h>
36302 #include <linux/mutex.h>
36303+#include <linux/moduleloader.h>
36304 #include <video/edid.h>
36305 #include <video/uvesafb.h>
36306 #ifdef CONFIG_X86
36307@@ -120,7 +121,7 @@ static int uvesafb_helper_start(void)
36308 NULL,
36309 };
36310
36311- return call_usermodehelper(v86d_path, argv, envp, 1);
36312+ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
36313 }
36314
36315 /*
36316@@ -568,10 +569,32 @@ static int __devinit uvesafb_vbe_getpmi(
36317 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
36318 par->pmi_setpal = par->ypan = 0;
36319 } else {
36320+
36321+#ifdef CONFIG_PAX_KERNEXEC
36322+#ifdef CONFIG_MODULES
36323+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
36324+#endif
36325+ if (!par->pmi_code) {
36326+ par->pmi_setpal = par->ypan = 0;
36327+ return 0;
36328+ }
36329+#endif
36330+
36331 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
36332 + task->t.regs.edi);
36333+
36334+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36335+ pax_open_kernel();
36336+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
36337+ pax_close_kernel();
36338+
36339+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
36340+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
36341+#else
36342 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
36343 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
36344+#endif
36345+
36346 printk(KERN_INFO "uvesafb: protected mode interface info at "
36347 "%04x:%04x\n",
36348 (u16)task->t.regs.es, (u16)task->t.regs.edi);
36349@@ -1799,6 +1822,11 @@ out:
36350 if (par->vbe_modes)
36351 kfree(par->vbe_modes);
36352
36353+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36354+ if (par->pmi_code)
36355+ module_free_exec(NULL, par->pmi_code);
36356+#endif
36357+
36358 framebuffer_release(info);
36359 return err;
36360 }
36361@@ -1825,6 +1853,12 @@ static int uvesafb_remove(struct platfor
36362 kfree(par->vbe_state_orig);
36363 if (par->vbe_state_saved)
36364 kfree(par->vbe_state_saved);
36365+
36366+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36367+ if (par->pmi_code)
36368+ module_free_exec(NULL, par->pmi_code);
36369+#endif
36370+
36371 }
36372
36373 framebuffer_release(info);
36374diff -urNp linux-2.6.32.42/drivers/video/vesafb.c linux-2.6.32.42/drivers/video/vesafb.c
36375--- linux-2.6.32.42/drivers/video/vesafb.c 2011-03-27 14:31:47.000000000 -0400
36376+++ linux-2.6.32.42/drivers/video/vesafb.c 2011-04-17 15:56:46.000000000 -0400
36377@@ -9,6 +9,7 @@
36378 */
36379
36380 #include <linux/module.h>
36381+#include <linux/moduleloader.h>
36382 #include <linux/kernel.h>
36383 #include <linux/errno.h>
36384 #include <linux/string.h>
36385@@ -53,8 +54,8 @@ static int vram_remap __initdata; /*
36386 static int vram_total __initdata; /* Set total amount of memory */
36387 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
36388 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
36389-static void (*pmi_start)(void) __read_mostly;
36390-static void (*pmi_pal) (void) __read_mostly;
36391+static void (*pmi_start)(void) __read_only;
36392+static void (*pmi_pal) (void) __read_only;
36393 static int depth __read_mostly;
36394 static int vga_compat __read_mostly;
36395 /* --------------------------------------------------------------------- */
36396@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct pl
36397 unsigned int size_vmode;
36398 unsigned int size_remap;
36399 unsigned int size_total;
36400+ void *pmi_code = NULL;
36401
36402 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
36403 return -ENODEV;
36404@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct pl
36405 size_remap = size_total;
36406 vesafb_fix.smem_len = size_remap;
36407
36408-#ifndef __i386__
36409- screen_info.vesapm_seg = 0;
36410-#endif
36411-
36412 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
36413 printk(KERN_WARNING
36414 "vesafb: cannot reserve video memory at 0x%lx\n",
36415@@ -315,9 +313,21 @@ static int __init vesafb_probe(struct pl
36416 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
36417 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
36418
36419+#ifdef __i386__
36420+
36421+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36422+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
36423+ if (!pmi_code)
36424+#elif !defined(CONFIG_PAX_KERNEXEC)
36425+ if (0)
36426+#endif
36427+
36428+#endif
36429+ screen_info.vesapm_seg = 0;
36430+
36431 if (screen_info.vesapm_seg) {
36432- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
36433- screen_info.vesapm_seg,screen_info.vesapm_off);
36434+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
36435+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
36436 }
36437
36438 if (screen_info.vesapm_seg < 0xc000)
36439@@ -325,9 +335,25 @@ static int __init vesafb_probe(struct pl
36440
36441 if (ypan || pmi_setpal) {
36442 unsigned short *pmi_base;
36443- pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
36444- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
36445- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
36446+
36447+ pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
36448+
36449+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36450+ pax_open_kernel();
36451+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
36452+#else
36453+ pmi_code = pmi_base;
36454+#endif
36455+
36456+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
36457+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
36458+
36459+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36460+ pmi_start = ktva_ktla(pmi_start);
36461+ pmi_pal = ktva_ktla(pmi_pal);
36462+ pax_close_kernel();
36463+#endif
36464+
36465 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
36466 if (pmi_base[3]) {
36467 printk(KERN_INFO "vesafb: pmi: ports = ");
36468@@ -469,6 +495,11 @@ static int __init vesafb_probe(struct pl
36469 info->node, info->fix.id);
36470 return 0;
36471 err:
36472+
36473+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36474+ module_free_exec(NULL, pmi_code);
36475+#endif
36476+
36477 if (info->screen_base)
36478 iounmap(info->screen_base);
36479 framebuffer_release(info);
36480diff -urNp linux-2.6.32.42/drivers/xen/sys-hypervisor.c linux-2.6.32.42/drivers/xen/sys-hypervisor.c
36481--- linux-2.6.32.42/drivers/xen/sys-hypervisor.c 2011-03-27 14:31:47.000000000 -0400
36482+++ linux-2.6.32.42/drivers/xen/sys-hypervisor.c 2011-04-17 15:56:46.000000000 -0400
36483@@ -425,7 +425,7 @@ static ssize_t hyp_sysfs_store(struct ko
36484 return 0;
36485 }
36486
36487-static struct sysfs_ops hyp_sysfs_ops = {
36488+static const struct sysfs_ops hyp_sysfs_ops = {
36489 .show = hyp_sysfs_show,
36490 .store = hyp_sysfs_store,
36491 };
36492diff -urNp linux-2.6.32.42/fs/9p/vfs_inode.c linux-2.6.32.42/fs/9p/vfs_inode.c
36493--- linux-2.6.32.42/fs/9p/vfs_inode.c 2011-03-27 14:31:47.000000000 -0400
36494+++ linux-2.6.32.42/fs/9p/vfs_inode.c 2011-04-17 15:56:46.000000000 -0400
36495@@ -1079,7 +1079,7 @@ static void *v9fs_vfs_follow_link(struct
36496 static void
36497 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
36498 {
36499- char *s = nd_get_link(nd);
36500+ const char *s = nd_get_link(nd);
36501
36502 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
36503 IS_ERR(s) ? "<error>" : s);
36504diff -urNp linux-2.6.32.42/fs/aio.c linux-2.6.32.42/fs/aio.c
36505--- linux-2.6.32.42/fs/aio.c 2011-03-27 14:31:47.000000000 -0400
36506+++ linux-2.6.32.42/fs/aio.c 2011-06-04 20:40:21.000000000 -0400
36507@@ -115,7 +115,7 @@ static int aio_setup_ring(struct kioctx
36508 size += sizeof(struct io_event) * nr_events;
36509 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
36510
36511- if (nr_pages < 0)
36512+ if (nr_pages <= 0)
36513 return -EINVAL;
36514
36515 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
36516@@ -1089,6 +1089,8 @@ static int read_events(struct kioctx *ct
36517 struct aio_timeout to;
36518 int retry = 0;
36519
36520+ pax_track_stack();
36521+
36522 /* needed to zero any padding within an entry (there shouldn't be
36523 * any, but C is fun!
36524 */
36525@@ -1382,13 +1384,18 @@ static ssize_t aio_fsync(struct kiocb *i
36526 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb)
36527 {
36528 ssize_t ret;
36529+ struct iovec iovstack;
36530
36531 ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf,
36532 kiocb->ki_nbytes, 1,
36533- &kiocb->ki_inline_vec, &kiocb->ki_iovec);
36534+ &iovstack, &kiocb->ki_iovec);
36535 if (ret < 0)
36536 goto out;
36537
36538+ if (kiocb->ki_iovec == &iovstack) {
36539+ kiocb->ki_inline_vec = iovstack;
36540+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
36541+ }
36542 kiocb->ki_nr_segs = kiocb->ki_nbytes;
36543 kiocb->ki_cur_seg = 0;
36544 /* ki_nbytes/left now reflect bytes instead of segs */
36545diff -urNp linux-2.6.32.42/fs/attr.c linux-2.6.32.42/fs/attr.c
36546--- linux-2.6.32.42/fs/attr.c 2011-03-27 14:31:47.000000000 -0400
36547+++ linux-2.6.32.42/fs/attr.c 2011-04-17 15:56:46.000000000 -0400
36548@@ -83,6 +83,7 @@ int inode_newsize_ok(const struct inode
36549 unsigned long limit;
36550
36551 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
36552+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
36553 if (limit != RLIM_INFINITY && offset > limit)
36554 goto out_sig;
36555 if (offset > inode->i_sb->s_maxbytes)
36556diff -urNp linux-2.6.32.42/fs/autofs/root.c linux-2.6.32.42/fs/autofs/root.c
36557--- linux-2.6.32.42/fs/autofs/root.c 2011-03-27 14:31:47.000000000 -0400
36558+++ linux-2.6.32.42/fs/autofs/root.c 2011-04-17 15:56:46.000000000 -0400
36559@@ -299,7 +299,8 @@ static int autofs_root_symlink(struct in
36560 set_bit(n,sbi->symlink_bitmap);
36561 sl = &sbi->symlink[n];
36562 sl->len = strlen(symname);
36563- sl->data = kmalloc(slsize = sl->len+1, GFP_KERNEL);
36564+ slsize = sl->len+1;
36565+ sl->data = kmalloc(slsize, GFP_KERNEL);
36566 if (!sl->data) {
36567 clear_bit(n,sbi->symlink_bitmap);
36568 unlock_kernel();
36569diff -urNp linux-2.6.32.42/fs/autofs4/symlink.c linux-2.6.32.42/fs/autofs4/symlink.c
36570--- linux-2.6.32.42/fs/autofs4/symlink.c 2011-03-27 14:31:47.000000000 -0400
36571+++ linux-2.6.32.42/fs/autofs4/symlink.c 2011-04-17 15:56:46.000000000 -0400
36572@@ -15,7 +15,7 @@
36573 static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
36574 {
36575 struct autofs_info *ino = autofs4_dentry_ino(dentry);
36576- nd_set_link(nd, (char *)ino->u.symlink);
36577+ nd_set_link(nd, ino->u.symlink);
36578 return NULL;
36579 }
36580
36581diff -urNp linux-2.6.32.42/fs/befs/linuxvfs.c linux-2.6.32.42/fs/befs/linuxvfs.c
36582--- linux-2.6.32.42/fs/befs/linuxvfs.c 2011-03-27 14:31:47.000000000 -0400
36583+++ linux-2.6.32.42/fs/befs/linuxvfs.c 2011-04-17 15:56:46.000000000 -0400
36584@@ -493,7 +493,7 @@ static void befs_put_link(struct dentry
36585 {
36586 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
36587 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
36588- char *link = nd_get_link(nd);
36589+ const char *link = nd_get_link(nd);
36590 if (!IS_ERR(link))
36591 kfree(link);
36592 }
36593diff -urNp linux-2.6.32.42/fs/binfmt_aout.c linux-2.6.32.42/fs/binfmt_aout.c
36594--- linux-2.6.32.42/fs/binfmt_aout.c 2011-03-27 14:31:47.000000000 -0400
36595+++ linux-2.6.32.42/fs/binfmt_aout.c 2011-04-17 15:56:46.000000000 -0400
36596@@ -16,6 +16,7 @@
36597 #include <linux/string.h>
36598 #include <linux/fs.h>
36599 #include <linux/file.h>
36600+#include <linux/security.h>
36601 #include <linux/stat.h>
36602 #include <linux/fcntl.h>
36603 #include <linux/ptrace.h>
36604@@ -102,6 +103,8 @@ static int aout_core_dump(long signr, st
36605 #endif
36606 # define START_STACK(u) (u.start_stack)
36607
36608+ memset(&dump, 0, sizeof(dump));
36609+
36610 fs = get_fs();
36611 set_fs(KERNEL_DS);
36612 has_dumped = 1;
36613@@ -113,10 +116,12 @@ static int aout_core_dump(long signr, st
36614
36615 /* If the size of the dump file exceeds the rlimit, then see what would happen
36616 if we wrote the stack, but not the data area. */
36617+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
36618 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > limit)
36619 dump.u_dsize = 0;
36620
36621 /* Make sure we have enough room to write the stack and data areas. */
36622+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
36623 if ((dump.u_ssize + 1) * PAGE_SIZE > limit)
36624 dump.u_ssize = 0;
36625
36626@@ -146,9 +151,7 @@ static int aout_core_dump(long signr, st
36627 dump_size = dump.u_ssize << PAGE_SHIFT;
36628 DUMP_WRITE(dump_start,dump_size);
36629 }
36630-/* Finally dump the task struct. Not be used by gdb, but could be useful */
36631- set_fs(KERNEL_DS);
36632- DUMP_WRITE(current,sizeof(*current));
36633+/* Finally, let's not dump the task struct. Not be used by gdb, but could be useful to an attacker */
36634 end_coredump:
36635 set_fs(fs);
36636 return has_dumped;
36637@@ -249,6 +252,8 @@ static int load_aout_binary(struct linux
36638 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
36639 if (rlim >= RLIM_INFINITY)
36640 rlim = ~0;
36641+
36642+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
36643 if (ex.a_data + ex.a_bss > rlim)
36644 return -ENOMEM;
36645
36646@@ -277,6 +282,27 @@ static int load_aout_binary(struct linux
36647 install_exec_creds(bprm);
36648 current->flags &= ~PF_FORKNOEXEC;
36649
36650+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
36651+ current->mm->pax_flags = 0UL;
36652+#endif
36653+
36654+#ifdef CONFIG_PAX_PAGEEXEC
36655+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
36656+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
36657+
36658+#ifdef CONFIG_PAX_EMUTRAMP
36659+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
36660+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
36661+#endif
36662+
36663+#ifdef CONFIG_PAX_MPROTECT
36664+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
36665+ current->mm->pax_flags |= MF_PAX_MPROTECT;
36666+#endif
36667+
36668+ }
36669+#endif
36670+
36671 if (N_MAGIC(ex) == OMAGIC) {
36672 unsigned long text_addr, map_size;
36673 loff_t pos;
36674@@ -349,7 +375,7 @@ static int load_aout_binary(struct linux
36675
36676 down_write(&current->mm->mmap_sem);
36677 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
36678- PROT_READ | PROT_WRITE | PROT_EXEC,
36679+ PROT_READ | PROT_WRITE,
36680 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
36681 fd_offset + ex.a_text);
36682 up_write(&current->mm->mmap_sem);
36683diff -urNp linux-2.6.32.42/fs/binfmt_elf.c linux-2.6.32.42/fs/binfmt_elf.c
36684--- linux-2.6.32.42/fs/binfmt_elf.c 2011-03-27 14:31:47.000000000 -0400
36685+++ linux-2.6.32.42/fs/binfmt_elf.c 2011-05-16 21:46:57.000000000 -0400
36686@@ -50,6 +50,10 @@ static int elf_core_dump(long signr, str
36687 #define elf_core_dump NULL
36688 #endif
36689
36690+#ifdef CONFIG_PAX_MPROTECT
36691+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
36692+#endif
36693+
36694 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
36695 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
36696 #else
36697@@ -69,6 +73,11 @@ static struct linux_binfmt elf_format =
36698 .load_binary = load_elf_binary,
36699 .load_shlib = load_elf_library,
36700 .core_dump = elf_core_dump,
36701+
36702+#ifdef CONFIG_PAX_MPROTECT
36703+ .handle_mprotect= elf_handle_mprotect,
36704+#endif
36705+
36706 .min_coredump = ELF_EXEC_PAGESIZE,
36707 .hasvdso = 1
36708 };
36709@@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
36710
36711 static int set_brk(unsigned long start, unsigned long end)
36712 {
36713+ unsigned long e = end;
36714+
36715 start = ELF_PAGEALIGN(start);
36716 end = ELF_PAGEALIGN(end);
36717 if (end > start) {
36718@@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
36719 if (BAD_ADDR(addr))
36720 return addr;
36721 }
36722- current->mm->start_brk = current->mm->brk = end;
36723+ current->mm->start_brk = current->mm->brk = e;
36724 return 0;
36725 }
36726
36727@@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
36728 elf_addr_t __user *u_rand_bytes;
36729 const char *k_platform = ELF_PLATFORM;
36730 const char *k_base_platform = ELF_BASE_PLATFORM;
36731- unsigned char k_rand_bytes[16];
36732+ u32 k_rand_bytes[4];
36733 int items;
36734 elf_addr_t *elf_info;
36735 int ei_index = 0;
36736 const struct cred *cred = current_cred();
36737 struct vm_area_struct *vma;
36738+ unsigned long saved_auxv[AT_VECTOR_SIZE];
36739+
36740+ pax_track_stack();
36741
36742 /*
36743 * In some cases (e.g. Hyper-Threading), we want to avoid L1
36744@@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
36745 * Generate 16 random bytes for userspace PRNG seeding.
36746 */
36747 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
36748- u_rand_bytes = (elf_addr_t __user *)
36749- STACK_ALLOC(p, sizeof(k_rand_bytes));
36750+ srandom32(k_rand_bytes[0] ^ random32());
36751+ srandom32(k_rand_bytes[1] ^ random32());
36752+ srandom32(k_rand_bytes[2] ^ random32());
36753+ srandom32(k_rand_bytes[3] ^ random32());
36754+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
36755+ u_rand_bytes = (elf_addr_t __user *) p;
36756 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
36757 return -EFAULT;
36758
36759@@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
36760 return -EFAULT;
36761 current->mm->env_end = p;
36762
36763+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
36764+
36765 /* Put the elf_info on the stack in the right place. */
36766 sp = (elf_addr_t __user *)envp + 1;
36767- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
36768+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
36769 return -EFAULT;
36770 return 0;
36771 }
36772@@ -385,10 +405,10 @@ static unsigned long load_elf_interp(str
36773 {
36774 struct elf_phdr *elf_phdata;
36775 struct elf_phdr *eppnt;
36776- unsigned long load_addr = 0;
36777+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
36778 int load_addr_set = 0;
36779 unsigned long last_bss = 0, elf_bss = 0;
36780- unsigned long error = ~0UL;
36781+ unsigned long error = -EINVAL;
36782 unsigned long total_size;
36783 int retval, i, size;
36784
36785@@ -434,6 +454,11 @@ static unsigned long load_elf_interp(str
36786 goto out_close;
36787 }
36788
36789+#ifdef CONFIG_PAX_SEGMEXEC
36790+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
36791+ pax_task_size = SEGMEXEC_TASK_SIZE;
36792+#endif
36793+
36794 eppnt = elf_phdata;
36795 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
36796 if (eppnt->p_type == PT_LOAD) {
36797@@ -477,8 +502,8 @@ static unsigned long load_elf_interp(str
36798 k = load_addr + eppnt->p_vaddr;
36799 if (BAD_ADDR(k) ||
36800 eppnt->p_filesz > eppnt->p_memsz ||
36801- eppnt->p_memsz > TASK_SIZE ||
36802- TASK_SIZE - eppnt->p_memsz < k) {
36803+ eppnt->p_memsz > pax_task_size ||
36804+ pax_task_size - eppnt->p_memsz < k) {
36805 error = -ENOMEM;
36806 goto out_close;
36807 }
36808@@ -532,6 +557,194 @@ out:
36809 return error;
36810 }
36811
36812+#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
36813+static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
36814+{
36815+ unsigned long pax_flags = 0UL;
36816+
36817+#ifdef CONFIG_PAX_PAGEEXEC
36818+ if (elf_phdata->p_flags & PF_PAGEEXEC)
36819+ pax_flags |= MF_PAX_PAGEEXEC;
36820+#endif
36821+
36822+#ifdef CONFIG_PAX_SEGMEXEC
36823+ if (elf_phdata->p_flags & PF_SEGMEXEC)
36824+ pax_flags |= MF_PAX_SEGMEXEC;
36825+#endif
36826+
36827+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
36828+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36829+ if (nx_enabled)
36830+ pax_flags &= ~MF_PAX_SEGMEXEC;
36831+ else
36832+ pax_flags &= ~MF_PAX_PAGEEXEC;
36833+ }
36834+#endif
36835+
36836+#ifdef CONFIG_PAX_EMUTRAMP
36837+ if (elf_phdata->p_flags & PF_EMUTRAMP)
36838+ pax_flags |= MF_PAX_EMUTRAMP;
36839+#endif
36840+
36841+#ifdef CONFIG_PAX_MPROTECT
36842+ if (elf_phdata->p_flags & PF_MPROTECT)
36843+ pax_flags |= MF_PAX_MPROTECT;
36844+#endif
36845+
36846+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
36847+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
36848+ pax_flags |= MF_PAX_RANDMMAP;
36849+#endif
36850+
36851+ return pax_flags;
36852+}
36853+#endif
36854+
36855+#ifdef CONFIG_PAX_PT_PAX_FLAGS
36856+static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
36857+{
36858+ unsigned long pax_flags = 0UL;
36859+
36860+#ifdef CONFIG_PAX_PAGEEXEC
36861+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
36862+ pax_flags |= MF_PAX_PAGEEXEC;
36863+#endif
36864+
36865+#ifdef CONFIG_PAX_SEGMEXEC
36866+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
36867+ pax_flags |= MF_PAX_SEGMEXEC;
36868+#endif
36869+
36870+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
36871+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36872+ if (nx_enabled)
36873+ pax_flags &= ~MF_PAX_SEGMEXEC;
36874+ else
36875+ pax_flags &= ~MF_PAX_PAGEEXEC;
36876+ }
36877+#endif
36878+
36879+#ifdef CONFIG_PAX_EMUTRAMP
36880+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
36881+ pax_flags |= MF_PAX_EMUTRAMP;
36882+#endif
36883+
36884+#ifdef CONFIG_PAX_MPROTECT
36885+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
36886+ pax_flags |= MF_PAX_MPROTECT;
36887+#endif
36888+
36889+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
36890+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
36891+ pax_flags |= MF_PAX_RANDMMAP;
36892+#endif
36893+
36894+ return pax_flags;
36895+}
36896+#endif
36897+
36898+#ifdef CONFIG_PAX_EI_PAX
36899+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
36900+{
36901+ unsigned long pax_flags = 0UL;
36902+
36903+#ifdef CONFIG_PAX_PAGEEXEC
36904+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
36905+ pax_flags |= MF_PAX_PAGEEXEC;
36906+#endif
36907+
36908+#ifdef CONFIG_PAX_SEGMEXEC
36909+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
36910+ pax_flags |= MF_PAX_SEGMEXEC;
36911+#endif
36912+
36913+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
36914+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36915+ if (nx_enabled)
36916+ pax_flags &= ~MF_PAX_SEGMEXEC;
36917+ else
36918+ pax_flags &= ~MF_PAX_PAGEEXEC;
36919+ }
36920+#endif
36921+
36922+#ifdef CONFIG_PAX_EMUTRAMP
36923+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
36924+ pax_flags |= MF_PAX_EMUTRAMP;
36925+#endif
36926+
36927+#ifdef CONFIG_PAX_MPROTECT
36928+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
36929+ pax_flags |= MF_PAX_MPROTECT;
36930+#endif
36931+
36932+#ifdef CONFIG_PAX_ASLR
36933+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
36934+ pax_flags |= MF_PAX_RANDMMAP;
36935+#endif
36936+
36937+ return pax_flags;
36938+}
36939+#endif
36940+
36941+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
36942+static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
36943+{
36944+ unsigned long pax_flags = 0UL;
36945+
36946+#ifdef CONFIG_PAX_PT_PAX_FLAGS
36947+ unsigned long i;
36948+ int found_flags = 0;
36949+#endif
36950+
36951+#ifdef CONFIG_PAX_EI_PAX
36952+ pax_flags = pax_parse_ei_pax(elf_ex);
36953+#endif
36954+
36955+#ifdef CONFIG_PAX_PT_PAX_FLAGS
36956+ for (i = 0UL; i < elf_ex->e_phnum; i++)
36957+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
36958+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
36959+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
36960+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
36961+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
36962+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
36963+ return -EINVAL;
36964+
36965+#ifdef CONFIG_PAX_SOFTMODE
36966+ if (pax_softmode)
36967+ pax_flags = pax_parse_softmode(&elf_phdata[i]);
36968+ else
36969+#endif
36970+
36971+ pax_flags = pax_parse_hardmode(&elf_phdata[i]);
36972+ found_flags = 1;
36973+ break;
36974+ }
36975+#endif
36976+
36977+#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
36978+ if (found_flags == 0) {
36979+ struct elf_phdr phdr;
36980+ memset(&phdr, 0, sizeof(phdr));
36981+ phdr.p_flags = PF_NOEMUTRAMP;
36982+#ifdef CONFIG_PAX_SOFTMODE
36983+ if (pax_softmode)
36984+ pax_flags = pax_parse_softmode(&phdr);
36985+ else
36986+#endif
36987+ pax_flags = pax_parse_hardmode(&phdr);
36988+ }
36989+#endif
36990+
36991+
36992+ if (0 > pax_check_flags(&pax_flags))
36993+ return -EINVAL;
36994+
36995+ current->mm->pax_flags = pax_flags;
36996+ return 0;
36997+}
36998+#endif
36999+
37000 /*
37001 * These are the functions used to load ELF style executables and shared
37002 * libraries. There is no binary dependent code anywhere else.
37003@@ -548,6 +761,11 @@ static unsigned long randomize_stack_top
37004 {
37005 unsigned int random_variable = 0;
37006
37007+#ifdef CONFIG_PAX_RANDUSTACK
37008+ if (randomize_va_space)
37009+ return stack_top - current->mm->delta_stack;
37010+#endif
37011+
37012 if ((current->flags & PF_RANDOMIZE) &&
37013 !(current->personality & ADDR_NO_RANDOMIZE)) {
37014 random_variable = get_random_int() & STACK_RND_MASK;
37015@@ -566,7 +784,7 @@ static int load_elf_binary(struct linux_
37016 unsigned long load_addr = 0, load_bias = 0;
37017 int load_addr_set = 0;
37018 char * elf_interpreter = NULL;
37019- unsigned long error;
37020+ unsigned long error = 0;
37021 struct elf_phdr *elf_ppnt, *elf_phdata;
37022 unsigned long elf_bss, elf_brk;
37023 int retval, i;
37024@@ -576,11 +794,11 @@ static int load_elf_binary(struct linux_
37025 unsigned long start_code, end_code, start_data, end_data;
37026 unsigned long reloc_func_desc = 0;
37027 int executable_stack = EXSTACK_DEFAULT;
37028- unsigned long def_flags = 0;
37029 struct {
37030 struct elfhdr elf_ex;
37031 struct elfhdr interp_elf_ex;
37032 } *loc;
37033+ unsigned long pax_task_size = TASK_SIZE;
37034
37035 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
37036 if (!loc) {
37037@@ -718,11 +936,80 @@ static int load_elf_binary(struct linux_
37038
37039 /* OK, This is the point of no return */
37040 current->flags &= ~PF_FORKNOEXEC;
37041- current->mm->def_flags = def_flags;
37042+
37043+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
37044+ current->mm->pax_flags = 0UL;
37045+#endif
37046+
37047+#ifdef CONFIG_PAX_DLRESOLVE
37048+ current->mm->call_dl_resolve = 0UL;
37049+#endif
37050+
37051+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
37052+ current->mm->call_syscall = 0UL;
37053+#endif
37054+
37055+#ifdef CONFIG_PAX_ASLR
37056+ current->mm->delta_mmap = 0UL;
37057+ current->mm->delta_stack = 0UL;
37058+#endif
37059+
37060+ current->mm->def_flags = 0;
37061+
37062+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
37063+ if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
37064+ send_sig(SIGKILL, current, 0);
37065+ goto out_free_dentry;
37066+ }
37067+#endif
37068+
37069+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
37070+ pax_set_initial_flags(bprm);
37071+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
37072+ if (pax_set_initial_flags_func)
37073+ (pax_set_initial_flags_func)(bprm);
37074+#endif
37075+
37076+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
37077+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !nx_enabled) {
37078+ current->mm->context.user_cs_limit = PAGE_SIZE;
37079+ current->mm->def_flags |= VM_PAGEEXEC;
37080+ }
37081+#endif
37082+
37083+#ifdef CONFIG_PAX_SEGMEXEC
37084+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
37085+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
37086+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
37087+ pax_task_size = SEGMEXEC_TASK_SIZE;
37088+ }
37089+#endif
37090+
37091+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
37092+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
37093+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
37094+ put_cpu();
37095+ }
37096+#endif
37097
37098 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
37099 may depend on the personality. */
37100 SET_PERSONALITY(loc->elf_ex);
37101+
37102+#ifdef CONFIG_PAX_ASLR
37103+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
37104+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
37105+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
37106+ }
37107+#endif
37108+
37109+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
37110+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
37111+ executable_stack = EXSTACK_DISABLE_X;
37112+ current->personality &= ~READ_IMPLIES_EXEC;
37113+ } else
37114+#endif
37115+
37116 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
37117 current->personality |= READ_IMPLIES_EXEC;
37118
37119@@ -804,6 +1091,20 @@ static int load_elf_binary(struct linux_
37120 #else
37121 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
37122 #endif
37123+
37124+#ifdef CONFIG_PAX_RANDMMAP
37125+ /* PaX: randomize base address at the default exe base if requested */
37126+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
37127+#ifdef CONFIG_SPARC64
37128+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
37129+#else
37130+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
37131+#endif
37132+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
37133+ elf_flags |= MAP_FIXED;
37134+ }
37135+#endif
37136+
37137 }
37138
37139 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
37140@@ -836,9 +1137,9 @@ static int load_elf_binary(struct linux_
37141 * allowed task size. Note that p_filesz must always be
37142 * <= p_memsz so it is only necessary to check p_memsz.
37143 */
37144- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
37145- elf_ppnt->p_memsz > TASK_SIZE ||
37146- TASK_SIZE - elf_ppnt->p_memsz < k) {
37147+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
37148+ elf_ppnt->p_memsz > pax_task_size ||
37149+ pax_task_size - elf_ppnt->p_memsz < k) {
37150 /* set_brk can never work. Avoid overflows. */
37151 send_sig(SIGKILL, current, 0);
37152 retval = -EINVAL;
37153@@ -866,6 +1167,11 @@ static int load_elf_binary(struct linux_
37154 start_data += load_bias;
37155 end_data += load_bias;
37156
37157+#ifdef CONFIG_PAX_RANDMMAP
37158+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
37159+ elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
37160+#endif
37161+
37162 /* Calling set_brk effectively mmaps the pages that we need
37163 * for the bss and break sections. We must do this before
37164 * mapping in the interpreter, to make sure it doesn't wind
37165@@ -877,9 +1183,11 @@ static int load_elf_binary(struct linux_
37166 goto out_free_dentry;
37167 }
37168 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
37169- send_sig(SIGSEGV, current, 0);
37170- retval = -EFAULT; /* Nobody gets to see this, but.. */
37171- goto out_free_dentry;
37172+ /*
37173+ * This bss-zeroing can fail if the ELF
37174+ * file specifies odd protections. So
37175+ * we don't check the return value
37176+ */
37177 }
37178
37179 if (elf_interpreter) {
37180@@ -1112,8 +1420,10 @@ static int dump_seek(struct file *file,
37181 unsigned long n = off;
37182 if (n > PAGE_SIZE)
37183 n = PAGE_SIZE;
37184- if (!dump_write(file, buf, n))
37185+ if (!dump_write(file, buf, n)) {
37186+ free_page((unsigned long)buf);
37187 return 0;
37188+ }
37189 off -= n;
37190 }
37191 free_page((unsigned long)buf);
37192@@ -1125,7 +1435,7 @@ static int dump_seek(struct file *file,
37193 * Decide what to dump of a segment, part, all or none.
37194 */
37195 static unsigned long vma_dump_size(struct vm_area_struct *vma,
37196- unsigned long mm_flags)
37197+ unsigned long mm_flags, long signr)
37198 {
37199 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
37200
37201@@ -1159,7 +1469,7 @@ static unsigned long vma_dump_size(struc
37202 if (vma->vm_file == NULL)
37203 return 0;
37204
37205- if (FILTER(MAPPED_PRIVATE))
37206+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
37207 goto whole;
37208
37209 /*
37210@@ -1255,8 +1565,11 @@ static int writenote(struct memelfnote *
37211 #undef DUMP_WRITE
37212
37213 #define DUMP_WRITE(addr, nr) \
37214+ do { \
37215+ gr_learn_resource(current, RLIMIT_CORE, size + (nr), 1); \
37216 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
37217- goto end_coredump;
37218+ goto end_coredump; \
37219+ } while (0);
37220
37221 static void fill_elf_header(struct elfhdr *elf, int segs,
37222 u16 machine, u32 flags, u8 osabi)
37223@@ -1385,9 +1698,9 @@ static void fill_auxv_note(struct memelf
37224 {
37225 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
37226 int i = 0;
37227- do
37228+ do {
37229 i += 2;
37230- while (auxv[i - 2] != AT_NULL);
37231+ } while (auxv[i - 2] != AT_NULL);
37232 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
37233 }
37234
37235@@ -1973,7 +2286,7 @@ static int elf_core_dump(long signr, str
37236 phdr.p_offset = offset;
37237 phdr.p_vaddr = vma->vm_start;
37238 phdr.p_paddr = 0;
37239- phdr.p_filesz = vma_dump_size(vma, mm_flags);
37240+ phdr.p_filesz = vma_dump_size(vma, mm_flags, signr);
37241 phdr.p_memsz = vma->vm_end - vma->vm_start;
37242 offset += phdr.p_filesz;
37243 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
37244@@ -2006,7 +2319,7 @@ static int elf_core_dump(long signr, str
37245 unsigned long addr;
37246 unsigned long end;
37247
37248- end = vma->vm_start + vma_dump_size(vma, mm_flags);
37249+ end = vma->vm_start + vma_dump_size(vma, mm_flags, signr);
37250
37251 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
37252 struct page *page;
37253@@ -2015,6 +2328,7 @@ static int elf_core_dump(long signr, str
37254 page = get_dump_page(addr);
37255 if (page) {
37256 void *kaddr = kmap(page);
37257+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
37258 stop = ((size += PAGE_SIZE) > limit) ||
37259 !dump_write(file, kaddr, PAGE_SIZE);
37260 kunmap(page);
37261@@ -2042,6 +2356,97 @@ out:
37262
37263 #endif /* USE_ELF_CORE_DUMP */
37264
37265+#ifdef CONFIG_PAX_MPROTECT
37266+/* PaX: non-PIC ELF libraries need relocations on their executable segments
37267+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
37268+ * we'll remove VM_MAYWRITE for good on RELRO segments.
37269+ *
37270+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
37271+ * basis because we want to allow the common case and not the special ones.
37272+ */
37273+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
37274+{
37275+ struct elfhdr elf_h;
37276+ struct elf_phdr elf_p;
37277+ unsigned long i;
37278+ unsigned long oldflags;
37279+ bool is_textrel_rw, is_textrel_rx, is_relro;
37280+
37281+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
37282+ return;
37283+
37284+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
37285+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
37286+
37287+#ifdef CONFIG_PAX_ELFRELOCS
37288+ /* possible TEXTREL */
37289+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
37290+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
37291+#else
37292+ is_textrel_rw = false;
37293+ is_textrel_rx = false;
37294+#endif
37295+
37296+ /* possible RELRO */
37297+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
37298+
37299+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
37300+ return;
37301+
37302+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
37303+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
37304+
37305+#ifdef CONFIG_PAX_ETEXECRELOCS
37306+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
37307+#else
37308+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
37309+#endif
37310+
37311+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
37312+ !elf_check_arch(&elf_h) ||
37313+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
37314+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
37315+ return;
37316+
37317+ for (i = 0UL; i < elf_h.e_phnum; i++) {
37318+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
37319+ return;
37320+ switch (elf_p.p_type) {
37321+ case PT_DYNAMIC:
37322+ if (!is_textrel_rw && !is_textrel_rx)
37323+ continue;
37324+ i = 0UL;
37325+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
37326+ elf_dyn dyn;
37327+
37328+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
37329+ return;
37330+ if (dyn.d_tag == DT_NULL)
37331+ return;
37332+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
37333+ gr_log_textrel(vma);
37334+ if (is_textrel_rw)
37335+ vma->vm_flags |= VM_MAYWRITE;
37336+ else
37337+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
37338+ vma->vm_flags &= ~VM_MAYWRITE;
37339+ return;
37340+ }
37341+ i++;
37342+ }
37343+ return;
37344+
37345+ case PT_GNU_RELRO:
37346+ if (!is_relro)
37347+ continue;
37348+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
37349+ vma->vm_flags &= ~VM_MAYWRITE;
37350+ return;
37351+ }
37352+ }
37353+}
37354+#endif
37355+
37356 static int __init init_elf_binfmt(void)
37357 {
37358 return register_binfmt(&elf_format);
37359diff -urNp linux-2.6.32.42/fs/binfmt_flat.c linux-2.6.32.42/fs/binfmt_flat.c
37360--- linux-2.6.32.42/fs/binfmt_flat.c 2011-03-27 14:31:47.000000000 -0400
37361+++ linux-2.6.32.42/fs/binfmt_flat.c 2011-04-17 15:56:46.000000000 -0400
37362@@ -564,7 +564,9 @@ static int load_flat_file(struct linux_b
37363 realdatastart = (unsigned long) -ENOMEM;
37364 printk("Unable to allocate RAM for process data, errno %d\n",
37365 (int)-realdatastart);
37366+ down_write(&current->mm->mmap_sem);
37367 do_munmap(current->mm, textpos, text_len);
37368+ up_write(&current->mm->mmap_sem);
37369 ret = realdatastart;
37370 goto err;
37371 }
37372@@ -588,8 +590,10 @@ static int load_flat_file(struct linux_b
37373 }
37374 if (IS_ERR_VALUE(result)) {
37375 printk("Unable to read data+bss, errno %d\n", (int)-result);
37376+ down_write(&current->mm->mmap_sem);
37377 do_munmap(current->mm, textpos, text_len);
37378 do_munmap(current->mm, realdatastart, data_len + extra);
37379+ up_write(&current->mm->mmap_sem);
37380 ret = result;
37381 goto err;
37382 }
37383@@ -658,8 +662,10 @@ static int load_flat_file(struct linux_b
37384 }
37385 if (IS_ERR_VALUE(result)) {
37386 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
37387+ down_write(&current->mm->mmap_sem);
37388 do_munmap(current->mm, textpos, text_len + data_len + extra +
37389 MAX_SHARED_LIBS * sizeof(unsigned long));
37390+ up_write(&current->mm->mmap_sem);
37391 ret = result;
37392 goto err;
37393 }
37394diff -urNp linux-2.6.32.42/fs/bio.c linux-2.6.32.42/fs/bio.c
37395--- linux-2.6.32.42/fs/bio.c 2011-03-27 14:31:47.000000000 -0400
37396+++ linux-2.6.32.42/fs/bio.c 2011-04-17 15:56:46.000000000 -0400
37397@@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_cr
37398
37399 i = 0;
37400 while (i < bio_slab_nr) {
37401- struct bio_slab *bslab = &bio_slabs[i];
37402+ bslab = &bio_slabs[i];
37403
37404 if (!bslab->slab && entry == -1)
37405 entry = i;
37406@@ -1236,7 +1236,7 @@ static void bio_copy_kern_endio(struct b
37407 const int read = bio_data_dir(bio) == READ;
37408 struct bio_map_data *bmd = bio->bi_private;
37409 int i;
37410- char *p = bmd->sgvecs[0].iov_base;
37411+ char *p = (__force char *)bmd->sgvecs[0].iov_base;
37412
37413 __bio_for_each_segment(bvec, bio, i, 0) {
37414 char *addr = page_address(bvec->bv_page);
37415diff -urNp linux-2.6.32.42/fs/block_dev.c linux-2.6.32.42/fs/block_dev.c
37416--- linux-2.6.32.42/fs/block_dev.c 2011-06-25 12:55:34.000000000 -0400
37417+++ linux-2.6.32.42/fs/block_dev.c 2011-06-25 12:56:37.000000000 -0400
37418@@ -664,7 +664,7 @@ int bd_claim(struct block_device *bdev,
37419 else if (bdev->bd_contains == bdev)
37420 res = 0; /* is a whole device which isn't held */
37421
37422- else if (bdev->bd_contains->bd_holder == bd_claim)
37423+ else if (bdev->bd_contains->bd_holder == (void *)bd_claim)
37424 res = 0; /* is a partition of a device that is being partitioned */
37425 else if (bdev->bd_contains->bd_holder != NULL)
37426 res = -EBUSY; /* is a partition of a held device */
37427diff -urNp linux-2.6.32.42/fs/btrfs/ctree.c linux-2.6.32.42/fs/btrfs/ctree.c
37428--- linux-2.6.32.42/fs/btrfs/ctree.c 2011-03-27 14:31:47.000000000 -0400
37429+++ linux-2.6.32.42/fs/btrfs/ctree.c 2011-04-17 15:56:46.000000000 -0400
37430@@ -461,9 +461,12 @@ static noinline int __btrfs_cow_block(st
37431 free_extent_buffer(buf);
37432 add_root_to_dirty_list(root);
37433 } else {
37434- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
37435- parent_start = parent->start;
37436- else
37437+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
37438+ if (parent)
37439+ parent_start = parent->start;
37440+ else
37441+ parent_start = 0;
37442+ } else
37443 parent_start = 0;
37444
37445 WARN_ON(trans->transid != btrfs_header_generation(parent));
37446@@ -3645,7 +3648,6 @@ setup_items_for_insert(struct btrfs_tran
37447
37448 ret = 0;
37449 if (slot == 0) {
37450- struct btrfs_disk_key disk_key;
37451 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
37452 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
37453 }
37454diff -urNp linux-2.6.32.42/fs/btrfs/disk-io.c linux-2.6.32.42/fs/btrfs/disk-io.c
37455--- linux-2.6.32.42/fs/btrfs/disk-io.c 2011-04-17 17:00:52.000000000 -0400
37456+++ linux-2.6.32.42/fs/btrfs/disk-io.c 2011-04-17 17:03:11.000000000 -0400
37457@@ -39,7 +39,7 @@
37458 #include "tree-log.h"
37459 #include "free-space-cache.h"
37460
37461-static struct extent_io_ops btree_extent_io_ops;
37462+static const struct extent_io_ops btree_extent_io_ops;
37463 static void end_workqueue_fn(struct btrfs_work *work);
37464 static void free_fs_root(struct btrfs_root *root);
37465
37466@@ -2607,7 +2607,7 @@ out:
37467 return 0;
37468 }
37469
37470-static struct extent_io_ops btree_extent_io_ops = {
37471+static const struct extent_io_ops btree_extent_io_ops = {
37472 .write_cache_pages_lock_hook = btree_lock_page_hook,
37473 .readpage_end_io_hook = btree_readpage_end_io_hook,
37474 .submit_bio_hook = btree_submit_bio_hook,
37475diff -urNp linux-2.6.32.42/fs/btrfs/extent_io.h linux-2.6.32.42/fs/btrfs/extent_io.h
37476--- linux-2.6.32.42/fs/btrfs/extent_io.h 2011-03-27 14:31:47.000000000 -0400
37477+++ linux-2.6.32.42/fs/btrfs/extent_io.h 2011-04-17 15:56:46.000000000 -0400
37478@@ -49,36 +49,36 @@ typedef int (extent_submit_bio_hook_t)(s
37479 struct bio *bio, int mirror_num,
37480 unsigned long bio_flags);
37481 struct extent_io_ops {
37482- int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
37483+ int (* const fill_delalloc)(struct inode *inode, struct page *locked_page,
37484 u64 start, u64 end, int *page_started,
37485 unsigned long *nr_written);
37486- int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
37487- int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
37488+ int (* const writepage_start_hook)(struct page *page, u64 start, u64 end);
37489+ int (* const writepage_io_hook)(struct page *page, u64 start, u64 end);
37490 extent_submit_bio_hook_t *submit_bio_hook;
37491- int (*merge_bio_hook)(struct page *page, unsigned long offset,
37492+ int (* const merge_bio_hook)(struct page *page, unsigned long offset,
37493 size_t size, struct bio *bio,
37494 unsigned long bio_flags);
37495- int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
37496- int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
37497+ int (* const readpage_io_hook)(struct page *page, u64 start, u64 end);
37498+ int (* const readpage_io_failed_hook)(struct bio *bio, struct page *page,
37499 u64 start, u64 end,
37500 struct extent_state *state);
37501- int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
37502+ int (* const writepage_io_failed_hook)(struct bio *bio, struct page *page,
37503 u64 start, u64 end,
37504 struct extent_state *state);
37505- int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
37506+ int (* const readpage_end_io_hook)(struct page *page, u64 start, u64 end,
37507 struct extent_state *state);
37508- int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
37509+ int (* const writepage_end_io_hook)(struct page *page, u64 start, u64 end,
37510 struct extent_state *state, int uptodate);
37511- int (*set_bit_hook)(struct inode *inode, u64 start, u64 end,
37512+ int (* const set_bit_hook)(struct inode *inode, u64 start, u64 end,
37513 unsigned long old, unsigned long bits);
37514- int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
37515+ int (* const clear_bit_hook)(struct inode *inode, struct extent_state *state,
37516 unsigned long bits);
37517- int (*merge_extent_hook)(struct inode *inode,
37518+ int (* const merge_extent_hook)(struct inode *inode,
37519 struct extent_state *new,
37520 struct extent_state *other);
37521- int (*split_extent_hook)(struct inode *inode,
37522+ int (* const split_extent_hook)(struct inode *inode,
37523 struct extent_state *orig, u64 split);
37524- int (*write_cache_pages_lock_hook)(struct page *page);
37525+ int (* const write_cache_pages_lock_hook)(struct page *page);
37526 };
37527
37528 struct extent_io_tree {
37529@@ -88,7 +88,7 @@ struct extent_io_tree {
37530 u64 dirty_bytes;
37531 spinlock_t lock;
37532 spinlock_t buffer_lock;
37533- struct extent_io_ops *ops;
37534+ const struct extent_io_ops *ops;
37535 };
37536
37537 struct extent_state {
37538diff -urNp linux-2.6.32.42/fs/btrfs/extent-tree.c linux-2.6.32.42/fs/btrfs/extent-tree.c
37539--- linux-2.6.32.42/fs/btrfs/extent-tree.c 2011-03-27 14:31:47.000000000 -0400
37540+++ linux-2.6.32.42/fs/btrfs/extent-tree.c 2011-06-12 06:39:08.000000000 -0400
37541@@ -7141,6 +7141,10 @@ static noinline int relocate_one_extent(
37542 u64 group_start = group->key.objectid;
37543 new_extents = kmalloc(sizeof(*new_extents),
37544 GFP_NOFS);
37545+ if (!new_extents) {
37546+ ret = -ENOMEM;
37547+ goto out;
37548+ }
37549 nr_extents = 1;
37550 ret = get_new_locations(reloc_inode,
37551 extent_key,
37552diff -urNp linux-2.6.32.42/fs/btrfs/free-space-cache.c linux-2.6.32.42/fs/btrfs/free-space-cache.c
37553--- linux-2.6.32.42/fs/btrfs/free-space-cache.c 2011-03-27 14:31:47.000000000 -0400
37554+++ linux-2.6.32.42/fs/btrfs/free-space-cache.c 2011-04-17 15:56:46.000000000 -0400
37555@@ -1074,8 +1074,6 @@ u64 btrfs_alloc_from_cluster(struct btrf
37556
37557 while(1) {
37558 if (entry->bytes < bytes || entry->offset < min_start) {
37559- struct rb_node *node;
37560-
37561 node = rb_next(&entry->offset_index);
37562 if (!node)
37563 break;
37564@@ -1226,7 +1224,7 @@ again:
37565 */
37566 while (entry->bitmap || found_bitmap ||
37567 (!entry->bitmap && entry->bytes < min_bytes)) {
37568- struct rb_node *node = rb_next(&entry->offset_index);
37569+ node = rb_next(&entry->offset_index);
37570
37571 if (entry->bitmap && entry->bytes > bytes + empty_size) {
37572 ret = btrfs_bitmap_cluster(block_group, entry, cluster,
37573diff -urNp linux-2.6.32.42/fs/btrfs/inode.c linux-2.6.32.42/fs/btrfs/inode.c
37574--- linux-2.6.32.42/fs/btrfs/inode.c 2011-03-27 14:31:47.000000000 -0400
37575+++ linux-2.6.32.42/fs/btrfs/inode.c 2011-06-12 06:39:58.000000000 -0400
37576@@ -63,7 +63,7 @@ static const struct inode_operations btr
37577 static const struct address_space_operations btrfs_aops;
37578 static const struct address_space_operations btrfs_symlink_aops;
37579 static const struct file_operations btrfs_dir_file_operations;
37580-static struct extent_io_ops btrfs_extent_io_ops;
37581+static const struct extent_io_ops btrfs_extent_io_ops;
37582
37583 static struct kmem_cache *btrfs_inode_cachep;
37584 struct kmem_cache *btrfs_trans_handle_cachep;
37585@@ -925,6 +925,7 @@ static int cow_file_range_async(struct i
37586 1, 0, NULL, GFP_NOFS);
37587 while (start < end) {
37588 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
37589+ BUG_ON(!async_cow);
37590 async_cow->inode = inode;
37591 async_cow->root = root;
37592 async_cow->locked_page = locked_page;
37593@@ -4591,6 +4592,8 @@ static noinline int uncompress_inline(st
37594 inline_size = btrfs_file_extent_inline_item_len(leaf,
37595 btrfs_item_nr(leaf, path->slots[0]));
37596 tmp = kmalloc(inline_size, GFP_NOFS);
37597+ if (!tmp)
37598+ return -ENOMEM;
37599 ptr = btrfs_file_extent_inline_start(item);
37600
37601 read_extent_buffer(leaf, tmp, ptr, inline_size);
37602@@ -5410,7 +5413,7 @@ fail:
37603 return -ENOMEM;
37604 }
37605
37606-static int btrfs_getattr(struct vfsmount *mnt,
37607+int btrfs_getattr(struct vfsmount *mnt,
37608 struct dentry *dentry, struct kstat *stat)
37609 {
37610 struct inode *inode = dentry->d_inode;
37611@@ -5422,6 +5425,14 @@ static int btrfs_getattr(struct vfsmount
37612 return 0;
37613 }
37614
37615+EXPORT_SYMBOL(btrfs_getattr);
37616+
37617+dev_t get_btrfs_dev_from_inode(struct inode *inode)
37618+{
37619+ return BTRFS_I(inode)->root->anon_super.s_dev;
37620+}
37621+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
37622+
37623 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
37624 struct inode *new_dir, struct dentry *new_dentry)
37625 {
37626@@ -5972,7 +5983,7 @@ static const struct file_operations btrf
37627 .fsync = btrfs_sync_file,
37628 };
37629
37630-static struct extent_io_ops btrfs_extent_io_ops = {
37631+static const struct extent_io_ops btrfs_extent_io_ops = {
37632 .fill_delalloc = run_delalloc_range,
37633 .submit_bio_hook = btrfs_submit_bio_hook,
37634 .merge_bio_hook = btrfs_merge_bio_hook,
37635diff -urNp linux-2.6.32.42/fs/btrfs/relocation.c linux-2.6.32.42/fs/btrfs/relocation.c
37636--- linux-2.6.32.42/fs/btrfs/relocation.c 2011-03-27 14:31:47.000000000 -0400
37637+++ linux-2.6.32.42/fs/btrfs/relocation.c 2011-04-17 15:56:46.000000000 -0400
37638@@ -884,7 +884,7 @@ static int __update_reloc_root(struct bt
37639 }
37640 spin_unlock(&rc->reloc_root_tree.lock);
37641
37642- BUG_ON((struct btrfs_root *)node->data != root);
37643+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
37644
37645 if (!del) {
37646 spin_lock(&rc->reloc_root_tree.lock);
37647diff -urNp linux-2.6.32.42/fs/btrfs/sysfs.c linux-2.6.32.42/fs/btrfs/sysfs.c
37648--- linux-2.6.32.42/fs/btrfs/sysfs.c 2011-03-27 14:31:47.000000000 -0400
37649+++ linux-2.6.32.42/fs/btrfs/sysfs.c 2011-04-17 15:56:46.000000000 -0400
37650@@ -164,12 +164,12 @@ static void btrfs_root_release(struct ko
37651 complete(&root->kobj_unregister);
37652 }
37653
37654-static struct sysfs_ops btrfs_super_attr_ops = {
37655+static const struct sysfs_ops btrfs_super_attr_ops = {
37656 .show = btrfs_super_attr_show,
37657 .store = btrfs_super_attr_store,
37658 };
37659
37660-static struct sysfs_ops btrfs_root_attr_ops = {
37661+static const struct sysfs_ops btrfs_root_attr_ops = {
37662 .show = btrfs_root_attr_show,
37663 .store = btrfs_root_attr_store,
37664 };
37665diff -urNp linux-2.6.32.42/fs/buffer.c linux-2.6.32.42/fs/buffer.c
37666--- linux-2.6.32.42/fs/buffer.c 2011-03-27 14:31:47.000000000 -0400
37667+++ linux-2.6.32.42/fs/buffer.c 2011-04-17 15:56:46.000000000 -0400
37668@@ -25,6 +25,7 @@
37669 #include <linux/percpu.h>
37670 #include <linux/slab.h>
37671 #include <linux/capability.h>
37672+#include <linux/security.h>
37673 #include <linux/blkdev.h>
37674 #include <linux/file.h>
37675 #include <linux/quotaops.h>
37676diff -urNp linux-2.6.32.42/fs/cachefiles/bind.c linux-2.6.32.42/fs/cachefiles/bind.c
37677--- linux-2.6.32.42/fs/cachefiles/bind.c 2011-03-27 14:31:47.000000000 -0400
37678+++ linux-2.6.32.42/fs/cachefiles/bind.c 2011-04-17 15:56:46.000000000 -0400
37679@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
37680 args);
37681
37682 /* start by checking things over */
37683- ASSERT(cache->fstop_percent >= 0 &&
37684- cache->fstop_percent < cache->fcull_percent &&
37685+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
37686 cache->fcull_percent < cache->frun_percent &&
37687 cache->frun_percent < 100);
37688
37689- ASSERT(cache->bstop_percent >= 0 &&
37690- cache->bstop_percent < cache->bcull_percent &&
37691+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
37692 cache->bcull_percent < cache->brun_percent &&
37693 cache->brun_percent < 100);
37694
37695diff -urNp linux-2.6.32.42/fs/cachefiles/daemon.c linux-2.6.32.42/fs/cachefiles/daemon.c
37696--- linux-2.6.32.42/fs/cachefiles/daemon.c 2011-03-27 14:31:47.000000000 -0400
37697+++ linux-2.6.32.42/fs/cachefiles/daemon.c 2011-04-17 15:56:46.000000000 -0400
37698@@ -220,7 +220,7 @@ static ssize_t cachefiles_daemon_write(s
37699 if (test_bit(CACHEFILES_DEAD, &cache->flags))
37700 return -EIO;
37701
37702- if (datalen < 0 || datalen > PAGE_SIZE - 1)
37703+ if (datalen > PAGE_SIZE - 1)
37704 return -EOPNOTSUPP;
37705
37706 /* drag the command string into the kernel so we can parse it */
37707@@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struc
37708 if (args[0] != '%' || args[1] != '\0')
37709 return -EINVAL;
37710
37711- if (fstop < 0 || fstop >= cache->fcull_percent)
37712+ if (fstop >= cache->fcull_percent)
37713 return cachefiles_daemon_range_error(cache, args);
37714
37715 cache->fstop_percent = fstop;
37716@@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struc
37717 if (args[0] != '%' || args[1] != '\0')
37718 return -EINVAL;
37719
37720- if (bstop < 0 || bstop >= cache->bcull_percent)
37721+ if (bstop >= cache->bcull_percent)
37722 return cachefiles_daemon_range_error(cache, args);
37723
37724 cache->bstop_percent = bstop;
37725diff -urNp linux-2.6.32.42/fs/cachefiles/internal.h linux-2.6.32.42/fs/cachefiles/internal.h
37726--- linux-2.6.32.42/fs/cachefiles/internal.h 2011-03-27 14:31:47.000000000 -0400
37727+++ linux-2.6.32.42/fs/cachefiles/internal.h 2011-05-04 17:56:28.000000000 -0400
37728@@ -56,7 +56,7 @@ struct cachefiles_cache {
37729 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
37730 struct rb_root active_nodes; /* active nodes (can't be culled) */
37731 rwlock_t active_lock; /* lock for active_nodes */
37732- atomic_t gravecounter; /* graveyard uniquifier */
37733+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
37734 unsigned frun_percent; /* when to stop culling (% files) */
37735 unsigned fcull_percent; /* when to start culling (% files) */
37736 unsigned fstop_percent; /* when to stop allocating (% files) */
37737@@ -168,19 +168,19 @@ extern int cachefiles_check_in_use(struc
37738 * proc.c
37739 */
37740 #ifdef CONFIG_CACHEFILES_HISTOGRAM
37741-extern atomic_t cachefiles_lookup_histogram[HZ];
37742-extern atomic_t cachefiles_mkdir_histogram[HZ];
37743-extern atomic_t cachefiles_create_histogram[HZ];
37744+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
37745+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
37746+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
37747
37748 extern int __init cachefiles_proc_init(void);
37749 extern void cachefiles_proc_cleanup(void);
37750 static inline
37751-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
37752+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
37753 {
37754 unsigned long jif = jiffies - start_jif;
37755 if (jif >= HZ)
37756 jif = HZ - 1;
37757- atomic_inc(&histogram[jif]);
37758+ atomic_inc_unchecked(&histogram[jif]);
37759 }
37760
37761 #else
37762diff -urNp linux-2.6.32.42/fs/cachefiles/namei.c linux-2.6.32.42/fs/cachefiles/namei.c
37763--- linux-2.6.32.42/fs/cachefiles/namei.c 2011-03-27 14:31:47.000000000 -0400
37764+++ linux-2.6.32.42/fs/cachefiles/namei.c 2011-05-04 17:56:28.000000000 -0400
37765@@ -250,7 +250,7 @@ try_again:
37766 /* first step is to make up a grave dentry in the graveyard */
37767 sprintf(nbuffer, "%08x%08x",
37768 (uint32_t) get_seconds(),
37769- (uint32_t) atomic_inc_return(&cache->gravecounter));
37770+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
37771
37772 /* do the multiway lock magic */
37773 trap = lock_rename(cache->graveyard, dir);
37774diff -urNp linux-2.6.32.42/fs/cachefiles/proc.c linux-2.6.32.42/fs/cachefiles/proc.c
37775--- linux-2.6.32.42/fs/cachefiles/proc.c 2011-03-27 14:31:47.000000000 -0400
37776+++ linux-2.6.32.42/fs/cachefiles/proc.c 2011-05-04 17:56:28.000000000 -0400
37777@@ -14,9 +14,9 @@
37778 #include <linux/seq_file.h>
37779 #include "internal.h"
37780
37781-atomic_t cachefiles_lookup_histogram[HZ];
37782-atomic_t cachefiles_mkdir_histogram[HZ];
37783-atomic_t cachefiles_create_histogram[HZ];
37784+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
37785+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
37786+atomic_unchecked_t cachefiles_create_histogram[HZ];
37787
37788 /*
37789 * display the latency histogram
37790@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
37791 return 0;
37792 default:
37793 index = (unsigned long) v - 3;
37794- x = atomic_read(&cachefiles_lookup_histogram[index]);
37795- y = atomic_read(&cachefiles_mkdir_histogram[index]);
37796- z = atomic_read(&cachefiles_create_histogram[index]);
37797+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
37798+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
37799+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
37800 if (x == 0 && y == 0 && z == 0)
37801 return 0;
37802
37803diff -urNp linux-2.6.32.42/fs/cachefiles/rdwr.c linux-2.6.32.42/fs/cachefiles/rdwr.c
37804--- linux-2.6.32.42/fs/cachefiles/rdwr.c 2011-03-27 14:31:47.000000000 -0400
37805+++ linux-2.6.32.42/fs/cachefiles/rdwr.c 2011-04-17 15:56:46.000000000 -0400
37806@@ -946,7 +946,7 @@ int cachefiles_write_page(struct fscache
37807 old_fs = get_fs();
37808 set_fs(KERNEL_DS);
37809 ret = file->f_op->write(
37810- file, (const void __user *) data, len, &pos);
37811+ file, (__force const void __user *) data, len, &pos);
37812 set_fs(old_fs);
37813 kunmap(page);
37814 if (ret != len)
37815diff -urNp linux-2.6.32.42/fs/cifs/cifs_debug.c linux-2.6.32.42/fs/cifs/cifs_debug.c
37816--- linux-2.6.32.42/fs/cifs/cifs_debug.c 2011-03-27 14:31:47.000000000 -0400
37817+++ linux-2.6.32.42/fs/cifs/cifs_debug.c 2011-05-04 17:56:28.000000000 -0400
37818@@ -256,25 +256,25 @@ static ssize_t cifs_stats_proc_write(str
37819 tcon = list_entry(tmp3,
37820 struct cifsTconInfo,
37821 tcon_list);
37822- atomic_set(&tcon->num_smbs_sent, 0);
37823- atomic_set(&tcon->num_writes, 0);
37824- atomic_set(&tcon->num_reads, 0);
37825- atomic_set(&tcon->num_oplock_brks, 0);
37826- atomic_set(&tcon->num_opens, 0);
37827- atomic_set(&tcon->num_posixopens, 0);
37828- atomic_set(&tcon->num_posixmkdirs, 0);
37829- atomic_set(&tcon->num_closes, 0);
37830- atomic_set(&tcon->num_deletes, 0);
37831- atomic_set(&tcon->num_mkdirs, 0);
37832- atomic_set(&tcon->num_rmdirs, 0);
37833- atomic_set(&tcon->num_renames, 0);
37834- atomic_set(&tcon->num_t2renames, 0);
37835- atomic_set(&tcon->num_ffirst, 0);
37836- atomic_set(&tcon->num_fnext, 0);
37837- atomic_set(&tcon->num_fclose, 0);
37838- atomic_set(&tcon->num_hardlinks, 0);
37839- atomic_set(&tcon->num_symlinks, 0);
37840- atomic_set(&tcon->num_locks, 0);
37841+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
37842+ atomic_set_unchecked(&tcon->num_writes, 0);
37843+ atomic_set_unchecked(&tcon->num_reads, 0);
37844+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
37845+ atomic_set_unchecked(&tcon->num_opens, 0);
37846+ atomic_set_unchecked(&tcon->num_posixopens, 0);
37847+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
37848+ atomic_set_unchecked(&tcon->num_closes, 0);
37849+ atomic_set_unchecked(&tcon->num_deletes, 0);
37850+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
37851+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
37852+ atomic_set_unchecked(&tcon->num_renames, 0);
37853+ atomic_set_unchecked(&tcon->num_t2renames, 0);
37854+ atomic_set_unchecked(&tcon->num_ffirst, 0);
37855+ atomic_set_unchecked(&tcon->num_fnext, 0);
37856+ atomic_set_unchecked(&tcon->num_fclose, 0);
37857+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
37858+ atomic_set_unchecked(&tcon->num_symlinks, 0);
37859+ atomic_set_unchecked(&tcon->num_locks, 0);
37860 }
37861 }
37862 }
37863@@ -334,41 +334,41 @@ static int cifs_stats_proc_show(struct s
37864 if (tcon->need_reconnect)
37865 seq_puts(m, "\tDISCONNECTED ");
37866 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
37867- atomic_read(&tcon->num_smbs_sent),
37868- atomic_read(&tcon->num_oplock_brks));
37869+ atomic_read_unchecked(&tcon->num_smbs_sent),
37870+ atomic_read_unchecked(&tcon->num_oplock_brks));
37871 seq_printf(m, "\nReads: %d Bytes: %lld",
37872- atomic_read(&tcon->num_reads),
37873+ atomic_read_unchecked(&tcon->num_reads),
37874 (long long)(tcon->bytes_read));
37875 seq_printf(m, "\nWrites: %d Bytes: %lld",
37876- atomic_read(&tcon->num_writes),
37877+ atomic_read_unchecked(&tcon->num_writes),
37878 (long long)(tcon->bytes_written));
37879 seq_printf(m, "\nFlushes: %d",
37880- atomic_read(&tcon->num_flushes));
37881+ atomic_read_unchecked(&tcon->num_flushes));
37882 seq_printf(m, "\nLocks: %d HardLinks: %d "
37883 "Symlinks: %d",
37884- atomic_read(&tcon->num_locks),
37885- atomic_read(&tcon->num_hardlinks),
37886- atomic_read(&tcon->num_symlinks));
37887+ atomic_read_unchecked(&tcon->num_locks),
37888+ atomic_read_unchecked(&tcon->num_hardlinks),
37889+ atomic_read_unchecked(&tcon->num_symlinks));
37890 seq_printf(m, "\nOpens: %d Closes: %d "
37891 "Deletes: %d",
37892- atomic_read(&tcon->num_opens),
37893- atomic_read(&tcon->num_closes),
37894- atomic_read(&tcon->num_deletes));
37895+ atomic_read_unchecked(&tcon->num_opens),
37896+ atomic_read_unchecked(&tcon->num_closes),
37897+ atomic_read_unchecked(&tcon->num_deletes));
37898 seq_printf(m, "\nPosix Opens: %d "
37899 "Posix Mkdirs: %d",
37900- atomic_read(&tcon->num_posixopens),
37901- atomic_read(&tcon->num_posixmkdirs));
37902+ atomic_read_unchecked(&tcon->num_posixopens),
37903+ atomic_read_unchecked(&tcon->num_posixmkdirs));
37904 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
37905- atomic_read(&tcon->num_mkdirs),
37906- atomic_read(&tcon->num_rmdirs));
37907+ atomic_read_unchecked(&tcon->num_mkdirs),
37908+ atomic_read_unchecked(&tcon->num_rmdirs));
37909 seq_printf(m, "\nRenames: %d T2 Renames %d",
37910- atomic_read(&tcon->num_renames),
37911- atomic_read(&tcon->num_t2renames));
37912+ atomic_read_unchecked(&tcon->num_renames),
37913+ atomic_read_unchecked(&tcon->num_t2renames));
37914 seq_printf(m, "\nFindFirst: %d FNext %d "
37915 "FClose %d",
37916- atomic_read(&tcon->num_ffirst),
37917- atomic_read(&tcon->num_fnext),
37918- atomic_read(&tcon->num_fclose));
37919+ atomic_read_unchecked(&tcon->num_ffirst),
37920+ atomic_read_unchecked(&tcon->num_fnext),
37921+ atomic_read_unchecked(&tcon->num_fclose));
37922 }
37923 }
37924 }
37925diff -urNp linux-2.6.32.42/fs/cifs/cifsglob.h linux-2.6.32.42/fs/cifs/cifsglob.h
37926--- linux-2.6.32.42/fs/cifs/cifsglob.h 2011-03-27 14:31:47.000000000 -0400
37927+++ linux-2.6.32.42/fs/cifs/cifsglob.h 2011-05-04 17:56:28.000000000 -0400
37928@@ -252,28 +252,28 @@ struct cifsTconInfo {
37929 __u16 Flags; /* optional support bits */
37930 enum statusEnum tidStatus;
37931 #ifdef CONFIG_CIFS_STATS
37932- atomic_t num_smbs_sent;
37933- atomic_t num_writes;
37934- atomic_t num_reads;
37935- atomic_t num_flushes;
37936- atomic_t num_oplock_brks;
37937- atomic_t num_opens;
37938- atomic_t num_closes;
37939- atomic_t num_deletes;
37940- atomic_t num_mkdirs;
37941- atomic_t num_posixopens;
37942- atomic_t num_posixmkdirs;
37943- atomic_t num_rmdirs;
37944- atomic_t num_renames;
37945- atomic_t num_t2renames;
37946- atomic_t num_ffirst;
37947- atomic_t num_fnext;
37948- atomic_t num_fclose;
37949- atomic_t num_hardlinks;
37950- atomic_t num_symlinks;
37951- atomic_t num_locks;
37952- atomic_t num_acl_get;
37953- atomic_t num_acl_set;
37954+ atomic_unchecked_t num_smbs_sent;
37955+ atomic_unchecked_t num_writes;
37956+ atomic_unchecked_t num_reads;
37957+ atomic_unchecked_t num_flushes;
37958+ atomic_unchecked_t num_oplock_brks;
37959+ atomic_unchecked_t num_opens;
37960+ atomic_unchecked_t num_closes;
37961+ atomic_unchecked_t num_deletes;
37962+ atomic_unchecked_t num_mkdirs;
37963+ atomic_unchecked_t num_posixopens;
37964+ atomic_unchecked_t num_posixmkdirs;
37965+ atomic_unchecked_t num_rmdirs;
37966+ atomic_unchecked_t num_renames;
37967+ atomic_unchecked_t num_t2renames;
37968+ atomic_unchecked_t num_ffirst;
37969+ atomic_unchecked_t num_fnext;
37970+ atomic_unchecked_t num_fclose;
37971+ atomic_unchecked_t num_hardlinks;
37972+ atomic_unchecked_t num_symlinks;
37973+ atomic_unchecked_t num_locks;
37974+ atomic_unchecked_t num_acl_get;
37975+ atomic_unchecked_t num_acl_set;
37976 #ifdef CONFIG_CIFS_STATS2
37977 unsigned long long time_writes;
37978 unsigned long long time_reads;
37979@@ -414,7 +414,7 @@ static inline char CIFS_DIR_SEP(const st
37980 }
37981
37982 #ifdef CONFIG_CIFS_STATS
37983-#define cifs_stats_inc atomic_inc
37984+#define cifs_stats_inc atomic_inc_unchecked
37985
37986 static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
37987 unsigned int bytes)
37988diff -urNp linux-2.6.32.42/fs/cifs/link.c linux-2.6.32.42/fs/cifs/link.c
37989--- linux-2.6.32.42/fs/cifs/link.c 2011-03-27 14:31:47.000000000 -0400
37990+++ linux-2.6.32.42/fs/cifs/link.c 2011-04-17 15:56:46.000000000 -0400
37991@@ -215,7 +215,7 @@ cifs_symlink(struct inode *inode, struct
37992
37993 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
37994 {
37995- char *p = nd_get_link(nd);
37996+ const char *p = nd_get_link(nd);
37997 if (!IS_ERR(p))
37998 kfree(p);
37999 }
38000diff -urNp linux-2.6.32.42/fs/coda/cache.c linux-2.6.32.42/fs/coda/cache.c
38001--- linux-2.6.32.42/fs/coda/cache.c 2011-03-27 14:31:47.000000000 -0400
38002+++ linux-2.6.32.42/fs/coda/cache.c 2011-05-04 17:56:28.000000000 -0400
38003@@ -24,14 +24,14 @@
38004 #include <linux/coda_fs_i.h>
38005 #include <linux/coda_cache.h>
38006
38007-static atomic_t permission_epoch = ATOMIC_INIT(0);
38008+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
38009
38010 /* replace or extend an acl cache hit */
38011 void coda_cache_enter(struct inode *inode, int mask)
38012 {
38013 struct coda_inode_info *cii = ITOC(inode);
38014
38015- cii->c_cached_epoch = atomic_read(&permission_epoch);
38016+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
38017 if (cii->c_uid != current_fsuid()) {
38018 cii->c_uid = current_fsuid();
38019 cii->c_cached_perm = mask;
38020@@ -43,13 +43,13 @@ void coda_cache_enter(struct inode *inod
38021 void coda_cache_clear_inode(struct inode *inode)
38022 {
38023 struct coda_inode_info *cii = ITOC(inode);
38024- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
38025+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
38026 }
38027
38028 /* remove all acl caches */
38029 void coda_cache_clear_all(struct super_block *sb)
38030 {
38031- atomic_inc(&permission_epoch);
38032+ atomic_inc_unchecked(&permission_epoch);
38033 }
38034
38035
38036@@ -61,7 +61,7 @@ int coda_cache_check(struct inode *inode
38037
38038 hit = (mask & cii->c_cached_perm) == mask &&
38039 cii->c_uid == current_fsuid() &&
38040- cii->c_cached_epoch == atomic_read(&permission_epoch);
38041+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
38042
38043 return hit;
38044 }
38045diff -urNp linux-2.6.32.42/fs/compat_binfmt_elf.c linux-2.6.32.42/fs/compat_binfmt_elf.c
38046--- linux-2.6.32.42/fs/compat_binfmt_elf.c 2011-03-27 14:31:47.000000000 -0400
38047+++ linux-2.6.32.42/fs/compat_binfmt_elf.c 2011-04-17 15:56:46.000000000 -0400
38048@@ -29,10 +29,12 @@
38049 #undef elfhdr
38050 #undef elf_phdr
38051 #undef elf_note
38052+#undef elf_dyn
38053 #undef elf_addr_t
38054 #define elfhdr elf32_hdr
38055 #define elf_phdr elf32_phdr
38056 #define elf_note elf32_note
38057+#define elf_dyn Elf32_Dyn
38058 #define elf_addr_t Elf32_Addr
38059
38060 /*
38061diff -urNp linux-2.6.32.42/fs/compat.c linux-2.6.32.42/fs/compat.c
38062--- linux-2.6.32.42/fs/compat.c 2011-04-17 17:00:52.000000000 -0400
38063+++ linux-2.6.32.42/fs/compat.c 2011-05-16 21:46:57.000000000 -0400
38064@@ -830,6 +830,7 @@ struct compat_old_linux_dirent {
38065
38066 struct compat_readdir_callback {
38067 struct compat_old_linux_dirent __user *dirent;
38068+ struct file * file;
38069 int result;
38070 };
38071
38072@@ -847,6 +848,10 @@ static int compat_fillonedir(void *__buf
38073 buf->result = -EOVERFLOW;
38074 return -EOVERFLOW;
38075 }
38076+
38077+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
38078+ return 0;
38079+
38080 buf->result++;
38081 dirent = buf->dirent;
38082 if (!access_ok(VERIFY_WRITE, dirent,
38083@@ -879,6 +884,7 @@ asmlinkage long compat_sys_old_readdir(u
38084
38085 buf.result = 0;
38086 buf.dirent = dirent;
38087+ buf.file = file;
38088
38089 error = vfs_readdir(file, compat_fillonedir, &buf);
38090 if (buf.result)
38091@@ -899,6 +905,7 @@ struct compat_linux_dirent {
38092 struct compat_getdents_callback {
38093 struct compat_linux_dirent __user *current_dir;
38094 struct compat_linux_dirent __user *previous;
38095+ struct file * file;
38096 int count;
38097 int error;
38098 };
38099@@ -919,6 +926,10 @@ static int compat_filldir(void *__buf, c
38100 buf->error = -EOVERFLOW;
38101 return -EOVERFLOW;
38102 }
38103+
38104+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
38105+ return 0;
38106+
38107 dirent = buf->previous;
38108 if (dirent) {
38109 if (__put_user(offset, &dirent->d_off))
38110@@ -966,6 +977,7 @@ asmlinkage long compat_sys_getdents(unsi
38111 buf.previous = NULL;
38112 buf.count = count;
38113 buf.error = 0;
38114+ buf.file = file;
38115
38116 error = vfs_readdir(file, compat_filldir, &buf);
38117 if (error >= 0)
38118@@ -987,6 +999,7 @@ out:
38119 struct compat_getdents_callback64 {
38120 struct linux_dirent64 __user *current_dir;
38121 struct linux_dirent64 __user *previous;
38122+ struct file * file;
38123 int count;
38124 int error;
38125 };
38126@@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf
38127 buf->error = -EINVAL; /* only used if we fail.. */
38128 if (reclen > buf->count)
38129 return -EINVAL;
38130+
38131+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
38132+ return 0;
38133+
38134 dirent = buf->previous;
38135
38136 if (dirent) {
38137@@ -1054,6 +1071,7 @@ asmlinkage long compat_sys_getdents64(un
38138 buf.previous = NULL;
38139 buf.count = count;
38140 buf.error = 0;
38141+ buf.file = file;
38142
38143 error = vfs_readdir(file, compat_filldir64, &buf);
38144 if (error >= 0)
38145@@ -1098,7 +1116,7 @@ static ssize_t compat_do_readv_writev(in
38146 * verify all the pointers
38147 */
38148 ret = -EINVAL;
38149- if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0))
38150+ if (nr_segs > UIO_MAXIOV)
38151 goto out;
38152 if (!file->f_op)
38153 goto out;
38154@@ -1463,6 +1481,11 @@ int compat_do_execve(char * filename,
38155 compat_uptr_t __user *envp,
38156 struct pt_regs * regs)
38157 {
38158+#ifdef CONFIG_GRKERNSEC
38159+ struct file *old_exec_file;
38160+ struct acl_subject_label *old_acl;
38161+ struct rlimit old_rlim[RLIM_NLIMITS];
38162+#endif
38163 struct linux_binprm *bprm;
38164 struct file *file;
38165 struct files_struct *displaced;
38166@@ -1499,6 +1522,19 @@ int compat_do_execve(char * filename,
38167 bprm->filename = filename;
38168 bprm->interp = filename;
38169
38170+ if (gr_process_user_ban()) {
38171+ retval = -EPERM;
38172+ goto out_file;
38173+ }
38174+
38175+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
38176+ retval = -EAGAIN;
38177+ if (gr_handle_nproc())
38178+ goto out_file;
38179+ retval = -EACCES;
38180+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
38181+ goto out_file;
38182+
38183 retval = bprm_mm_init(bprm);
38184 if (retval)
38185 goto out_file;
38186@@ -1528,9 +1564,40 @@ int compat_do_execve(char * filename,
38187 if (retval < 0)
38188 goto out;
38189
38190+ if (!gr_tpe_allow(file)) {
38191+ retval = -EACCES;
38192+ goto out;
38193+ }
38194+
38195+ if (gr_check_crash_exec(file)) {
38196+ retval = -EACCES;
38197+ goto out;
38198+ }
38199+
38200+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
38201+
38202+ gr_handle_exec_args_compat(bprm, argv);
38203+
38204+#ifdef CONFIG_GRKERNSEC
38205+ old_acl = current->acl;
38206+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
38207+ old_exec_file = current->exec_file;
38208+ get_file(file);
38209+ current->exec_file = file;
38210+#endif
38211+
38212+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
38213+ bprm->unsafe & LSM_UNSAFE_SHARE);
38214+ if (retval < 0)
38215+ goto out_fail;
38216+
38217 retval = search_binary_handler(bprm, regs);
38218 if (retval < 0)
38219- goto out;
38220+ goto out_fail;
38221+#ifdef CONFIG_GRKERNSEC
38222+ if (old_exec_file)
38223+ fput(old_exec_file);
38224+#endif
38225
38226 /* execve succeeded */
38227 current->fs->in_exec = 0;
38228@@ -1541,6 +1608,14 @@ int compat_do_execve(char * filename,
38229 put_files_struct(displaced);
38230 return retval;
38231
38232+out_fail:
38233+#ifdef CONFIG_GRKERNSEC
38234+ current->acl = old_acl;
38235+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
38236+ fput(current->exec_file);
38237+ current->exec_file = old_exec_file;
38238+#endif
38239+
38240 out:
38241 if (bprm->mm) {
38242 acct_arg_size(bprm, 0);
38243@@ -1711,6 +1786,8 @@ int compat_core_sys_select(int n, compat
38244 struct fdtable *fdt;
38245 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
38246
38247+ pax_track_stack();
38248+
38249 if (n < 0)
38250 goto out_nofds;
38251
38252diff -urNp linux-2.6.32.42/fs/compat_ioctl.c linux-2.6.32.42/fs/compat_ioctl.c
38253--- linux-2.6.32.42/fs/compat_ioctl.c 2011-03-27 14:31:47.000000000 -0400
38254+++ linux-2.6.32.42/fs/compat_ioctl.c 2011-04-23 12:56:11.000000000 -0400
38255@@ -234,6 +234,8 @@ static int do_video_set_spu_palette(unsi
38256 up = (struct compat_video_spu_palette __user *) arg;
38257 err = get_user(palp, &up->palette);
38258 err |= get_user(length, &up->length);
38259+ if (err)
38260+ return -EFAULT;
38261
38262 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
38263 err = put_user(compat_ptr(palp), &up_native->palette);
38264diff -urNp linux-2.6.32.42/fs/configfs/dir.c linux-2.6.32.42/fs/configfs/dir.c
38265--- linux-2.6.32.42/fs/configfs/dir.c 2011-03-27 14:31:47.000000000 -0400
38266+++ linux-2.6.32.42/fs/configfs/dir.c 2011-05-11 18:25:15.000000000 -0400
38267@@ -1572,7 +1572,8 @@ static int configfs_readdir(struct file
38268 }
38269 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
38270 struct configfs_dirent *next;
38271- const char * name;
38272+ const unsigned char * name;
38273+ char d_name[sizeof(next->s_dentry->d_iname)];
38274 int len;
38275
38276 next = list_entry(p, struct configfs_dirent,
38277@@ -1581,7 +1582,12 @@ static int configfs_readdir(struct file
38278 continue;
38279
38280 name = configfs_get_name(next);
38281- len = strlen(name);
38282+ if (next->s_dentry && name == next->s_dentry->d_iname) {
38283+ len = next->s_dentry->d_name.len;
38284+ memcpy(d_name, name, len);
38285+ name = d_name;
38286+ } else
38287+ len = strlen(name);
38288 if (next->s_dentry)
38289 ino = next->s_dentry->d_inode->i_ino;
38290 else
38291diff -urNp linux-2.6.32.42/fs/dcache.c linux-2.6.32.42/fs/dcache.c
38292--- linux-2.6.32.42/fs/dcache.c 2011-03-27 14:31:47.000000000 -0400
38293+++ linux-2.6.32.42/fs/dcache.c 2011-04-23 13:32:21.000000000 -0400
38294@@ -45,8 +45,6 @@ EXPORT_SYMBOL(dcache_lock);
38295
38296 static struct kmem_cache *dentry_cache __read_mostly;
38297
38298-#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
38299-
38300 /*
38301 * This is the single most critical data structure when it comes
38302 * to the dcache: the hashtable for lookups. Somebody should try
38303@@ -2319,7 +2317,7 @@ void __init vfs_caches_init(unsigned lon
38304 mempages -= reserve;
38305
38306 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
38307- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
38308+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
38309
38310 dcache_init();
38311 inode_init();
38312diff -urNp linux-2.6.32.42/fs/dlm/lockspace.c linux-2.6.32.42/fs/dlm/lockspace.c
38313--- linux-2.6.32.42/fs/dlm/lockspace.c 2011-03-27 14:31:47.000000000 -0400
38314+++ linux-2.6.32.42/fs/dlm/lockspace.c 2011-04-17 15:56:46.000000000 -0400
38315@@ -148,7 +148,7 @@ static void lockspace_kobj_release(struc
38316 kfree(ls);
38317 }
38318
38319-static struct sysfs_ops dlm_attr_ops = {
38320+static const struct sysfs_ops dlm_attr_ops = {
38321 .show = dlm_attr_show,
38322 .store = dlm_attr_store,
38323 };
38324diff -urNp linux-2.6.32.42/fs/ecryptfs/inode.c linux-2.6.32.42/fs/ecryptfs/inode.c
38325--- linux-2.6.32.42/fs/ecryptfs/inode.c 2011-03-27 14:31:47.000000000 -0400
38326+++ linux-2.6.32.42/fs/ecryptfs/inode.c 2011-04-17 15:56:46.000000000 -0400
38327@@ -660,7 +660,7 @@ static int ecryptfs_readlink_lower(struc
38328 old_fs = get_fs();
38329 set_fs(get_ds());
38330 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
38331- (char __user *)lower_buf,
38332+ (__force char __user *)lower_buf,
38333 lower_bufsiz);
38334 set_fs(old_fs);
38335 if (rc < 0)
38336@@ -706,7 +706,7 @@ static void *ecryptfs_follow_link(struct
38337 }
38338 old_fs = get_fs();
38339 set_fs(get_ds());
38340- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
38341+ rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
38342 set_fs(old_fs);
38343 if (rc < 0)
38344 goto out_free;
38345diff -urNp linux-2.6.32.42/fs/exec.c linux-2.6.32.42/fs/exec.c
38346--- linux-2.6.32.42/fs/exec.c 2011-06-25 12:55:34.000000000 -0400
38347+++ linux-2.6.32.42/fs/exec.c 2011-07-06 19:53:33.000000000 -0400
38348@@ -56,12 +56,24 @@
38349 #include <linux/fsnotify.h>
38350 #include <linux/fs_struct.h>
38351 #include <linux/pipe_fs_i.h>
38352+#include <linux/random.h>
38353+#include <linux/seq_file.h>
38354+
38355+#ifdef CONFIG_PAX_REFCOUNT
38356+#include <linux/kallsyms.h>
38357+#include <linux/kdebug.h>
38358+#endif
38359
38360 #include <asm/uaccess.h>
38361 #include <asm/mmu_context.h>
38362 #include <asm/tlb.h>
38363 #include "internal.h"
38364
38365+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
38366+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
38367+EXPORT_SYMBOL(pax_set_initial_flags_func);
38368+#endif
38369+
38370 int core_uses_pid;
38371 char core_pattern[CORENAME_MAX_SIZE] = "core";
38372 unsigned int core_pipe_limit;
38373@@ -115,7 +127,7 @@ SYSCALL_DEFINE1(uselib, const char __use
38374 goto out;
38375
38376 file = do_filp_open(AT_FDCWD, tmp,
38377- O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
38378+ O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0,
38379 MAY_READ | MAY_EXEC | MAY_OPEN);
38380 putname(tmp);
38381 error = PTR_ERR(file);
38382@@ -178,18 +190,10 @@ struct page *get_arg_page(struct linux_b
38383 int write)
38384 {
38385 struct page *page;
38386- int ret;
38387
38388-#ifdef CONFIG_STACK_GROWSUP
38389- if (write) {
38390- ret = expand_stack_downwards(bprm->vma, pos);
38391- if (ret < 0)
38392- return NULL;
38393- }
38394-#endif
38395- ret = get_user_pages(current, bprm->mm, pos,
38396- 1, write, 1, &page, NULL);
38397- if (ret <= 0)
38398+ if (0 > expand_stack_downwards(bprm->vma, pos))
38399+ return NULL;
38400+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
38401 return NULL;
38402
38403 if (write) {
38404@@ -263,6 +267,11 @@ static int __bprm_mm_init(struct linux_b
38405 vma->vm_end = STACK_TOP_MAX;
38406 vma->vm_start = vma->vm_end - PAGE_SIZE;
38407 vma->vm_flags = VM_STACK_FLAGS;
38408+
38409+#ifdef CONFIG_PAX_SEGMEXEC
38410+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
38411+#endif
38412+
38413 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
38414
38415 err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
38416@@ -276,6 +285,12 @@ static int __bprm_mm_init(struct linux_b
38417 mm->stack_vm = mm->total_vm = 1;
38418 up_write(&mm->mmap_sem);
38419 bprm->p = vma->vm_end - sizeof(void *);
38420+
38421+#ifdef CONFIG_PAX_RANDUSTACK
38422+ if (randomize_va_space)
38423+ bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
38424+#endif
38425+
38426 return 0;
38427 err:
38428 up_write(&mm->mmap_sem);
38429@@ -510,7 +525,7 @@ int copy_strings_kernel(int argc,char **
38430 int r;
38431 mm_segment_t oldfs = get_fs();
38432 set_fs(KERNEL_DS);
38433- r = copy_strings(argc, (char __user * __user *)argv, bprm);
38434+ r = copy_strings(argc, (__force char __user * __user *)argv, bprm);
38435 set_fs(oldfs);
38436 return r;
38437 }
38438@@ -540,7 +555,8 @@ static int shift_arg_pages(struct vm_are
38439 unsigned long new_end = old_end - shift;
38440 struct mmu_gather *tlb;
38441
38442- BUG_ON(new_start > new_end);
38443+ if (new_start >= new_end || new_start < mmap_min_addr)
38444+ return -ENOMEM;
38445
38446 /*
38447 * ensure there are no vmas between where we want to go
38448@@ -549,6 +565,10 @@ static int shift_arg_pages(struct vm_are
38449 if (vma != find_vma(mm, new_start))
38450 return -EFAULT;
38451
38452+#ifdef CONFIG_PAX_SEGMEXEC
38453+ BUG_ON(pax_find_mirror_vma(vma));
38454+#endif
38455+
38456 /*
38457 * cover the whole range: [new_start, old_end)
38458 */
38459@@ -630,10 +650,6 @@ int setup_arg_pages(struct linux_binprm
38460 stack_top = arch_align_stack(stack_top);
38461 stack_top = PAGE_ALIGN(stack_top);
38462
38463- if (unlikely(stack_top < mmap_min_addr) ||
38464- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
38465- return -ENOMEM;
38466-
38467 stack_shift = vma->vm_end - stack_top;
38468
38469 bprm->p -= stack_shift;
38470@@ -645,6 +661,14 @@ int setup_arg_pages(struct linux_binprm
38471 bprm->exec -= stack_shift;
38472
38473 down_write(&mm->mmap_sem);
38474+
38475+ /* Move stack pages down in memory. */
38476+ if (stack_shift) {
38477+ ret = shift_arg_pages(vma, stack_shift);
38478+ if (ret)
38479+ goto out_unlock;
38480+ }
38481+
38482 vm_flags = VM_STACK_FLAGS;
38483
38484 /*
38485@@ -658,19 +682,24 @@ int setup_arg_pages(struct linux_binprm
38486 vm_flags &= ~VM_EXEC;
38487 vm_flags |= mm->def_flags;
38488
38489+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
38490+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
38491+ vm_flags &= ~VM_EXEC;
38492+
38493+#ifdef CONFIG_PAX_MPROTECT
38494+ if (mm->pax_flags & MF_PAX_MPROTECT)
38495+ vm_flags &= ~VM_MAYEXEC;
38496+#endif
38497+
38498+ }
38499+#endif
38500+
38501 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
38502 vm_flags);
38503 if (ret)
38504 goto out_unlock;
38505 BUG_ON(prev != vma);
38506
38507- /* Move stack pages down in memory. */
38508- if (stack_shift) {
38509- ret = shift_arg_pages(vma, stack_shift);
38510- if (ret)
38511- goto out_unlock;
38512- }
38513-
38514 stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
38515 stack_size = vma->vm_end - vma->vm_start;
38516 /*
38517@@ -707,7 +736,7 @@ struct file *open_exec(const char *name)
38518 int err;
38519
38520 file = do_filp_open(AT_FDCWD, name,
38521- O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
38522+ O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0,
38523 MAY_EXEC | MAY_OPEN);
38524 if (IS_ERR(file))
38525 goto out;
38526@@ -744,7 +773,7 @@ int kernel_read(struct file *file, loff_
38527 old_fs = get_fs();
38528 set_fs(get_ds());
38529 /* The cast to a user pointer is valid due to the set_fs() */
38530- result = vfs_read(file, (void __user *)addr, count, &pos);
38531+ result = vfs_read(file, (__force void __user *)addr, count, &pos);
38532 set_fs(old_fs);
38533 return result;
38534 }
38535@@ -1152,7 +1181,7 @@ int check_unsafe_exec(struct linux_binpr
38536 }
38537 rcu_read_unlock();
38538
38539- if (p->fs->users > n_fs) {
38540+ if (atomic_read(&p->fs->users) > n_fs) {
38541 bprm->unsafe |= LSM_UNSAFE_SHARE;
38542 } else {
38543 res = -EAGAIN;
38544@@ -1347,6 +1376,11 @@ int do_execve(char * filename,
38545 char __user *__user *envp,
38546 struct pt_regs * regs)
38547 {
38548+#ifdef CONFIG_GRKERNSEC
38549+ struct file *old_exec_file;
38550+ struct acl_subject_label *old_acl;
38551+ struct rlimit old_rlim[RLIM_NLIMITS];
38552+#endif
38553 struct linux_binprm *bprm;
38554 struct file *file;
38555 struct files_struct *displaced;
38556@@ -1383,6 +1417,23 @@ int do_execve(char * filename,
38557 bprm->filename = filename;
38558 bprm->interp = filename;
38559
38560+ if (gr_process_user_ban()) {
38561+ retval = -EPERM;
38562+ goto out_file;
38563+ }
38564+
38565+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
38566+
38567+ if (gr_handle_nproc()) {
38568+ retval = -EAGAIN;
38569+ goto out_file;
38570+ }
38571+
38572+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
38573+ retval = -EACCES;
38574+ goto out_file;
38575+ }
38576+
38577 retval = bprm_mm_init(bprm);
38578 if (retval)
38579 goto out_file;
38580@@ -1412,10 +1463,41 @@ int do_execve(char * filename,
38581 if (retval < 0)
38582 goto out;
38583
38584+ if (!gr_tpe_allow(file)) {
38585+ retval = -EACCES;
38586+ goto out;
38587+ }
38588+
38589+ if (gr_check_crash_exec(file)) {
38590+ retval = -EACCES;
38591+ goto out;
38592+ }
38593+
38594+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
38595+
38596+ gr_handle_exec_args(bprm, (const char __user *const __user *)argv);
38597+
38598+#ifdef CONFIG_GRKERNSEC
38599+ old_acl = current->acl;
38600+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
38601+ old_exec_file = current->exec_file;
38602+ get_file(file);
38603+ current->exec_file = file;
38604+#endif
38605+
38606+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
38607+ bprm->unsafe & LSM_UNSAFE_SHARE);
38608+ if (retval < 0)
38609+ goto out_fail;
38610+
38611 current->flags &= ~PF_KTHREAD;
38612 retval = search_binary_handler(bprm,regs);
38613 if (retval < 0)
38614- goto out;
38615+ goto out_fail;
38616+#ifdef CONFIG_GRKERNSEC
38617+ if (old_exec_file)
38618+ fput(old_exec_file);
38619+#endif
38620
38621 /* execve succeeded */
38622 current->fs->in_exec = 0;
38623@@ -1426,6 +1508,14 @@ int do_execve(char * filename,
38624 put_files_struct(displaced);
38625 return retval;
38626
38627+out_fail:
38628+#ifdef CONFIG_GRKERNSEC
38629+ current->acl = old_acl;
38630+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
38631+ fput(current->exec_file);
38632+ current->exec_file = old_exec_file;
38633+#endif
38634+
38635 out:
38636 if (bprm->mm) {
38637 acct_arg_size(bprm, 0);
38638@@ -1591,6 +1681,220 @@ out:
38639 return ispipe;
38640 }
38641
38642+int pax_check_flags(unsigned long *flags)
38643+{
38644+ int retval = 0;
38645+
38646+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
38647+ if (*flags & MF_PAX_SEGMEXEC)
38648+ {
38649+ *flags &= ~MF_PAX_SEGMEXEC;
38650+ retval = -EINVAL;
38651+ }
38652+#endif
38653+
38654+ if ((*flags & MF_PAX_PAGEEXEC)
38655+
38656+#ifdef CONFIG_PAX_PAGEEXEC
38657+ && (*flags & MF_PAX_SEGMEXEC)
38658+#endif
38659+
38660+ )
38661+ {
38662+ *flags &= ~MF_PAX_PAGEEXEC;
38663+ retval = -EINVAL;
38664+ }
38665+
38666+ if ((*flags & MF_PAX_MPROTECT)
38667+
38668+#ifdef CONFIG_PAX_MPROTECT
38669+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
38670+#endif
38671+
38672+ )
38673+ {
38674+ *flags &= ~MF_PAX_MPROTECT;
38675+ retval = -EINVAL;
38676+ }
38677+
38678+ if ((*flags & MF_PAX_EMUTRAMP)
38679+
38680+#ifdef CONFIG_PAX_EMUTRAMP
38681+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
38682+#endif
38683+
38684+ )
38685+ {
38686+ *flags &= ~MF_PAX_EMUTRAMP;
38687+ retval = -EINVAL;
38688+ }
38689+
38690+ return retval;
38691+}
38692+
38693+EXPORT_SYMBOL(pax_check_flags);
38694+
38695+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
38696+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
38697+{
38698+ struct task_struct *tsk = current;
38699+ struct mm_struct *mm = current->mm;
38700+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
38701+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
38702+ char *path_exec = NULL;
38703+ char *path_fault = NULL;
38704+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
38705+
38706+ if (buffer_exec && buffer_fault) {
38707+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
38708+
38709+ down_read(&mm->mmap_sem);
38710+ vma = mm->mmap;
38711+ while (vma && (!vma_exec || !vma_fault)) {
38712+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
38713+ vma_exec = vma;
38714+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
38715+ vma_fault = vma;
38716+ vma = vma->vm_next;
38717+ }
38718+ if (vma_exec) {
38719+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
38720+ if (IS_ERR(path_exec))
38721+ path_exec = "<path too long>";
38722+ else {
38723+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
38724+ if (path_exec) {
38725+ *path_exec = 0;
38726+ path_exec = buffer_exec;
38727+ } else
38728+ path_exec = "<path too long>";
38729+ }
38730+ }
38731+ if (vma_fault) {
38732+ start = vma_fault->vm_start;
38733+ end = vma_fault->vm_end;
38734+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
38735+ if (vma_fault->vm_file) {
38736+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
38737+ if (IS_ERR(path_fault))
38738+ path_fault = "<path too long>";
38739+ else {
38740+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
38741+ if (path_fault) {
38742+ *path_fault = 0;
38743+ path_fault = buffer_fault;
38744+ } else
38745+ path_fault = "<path too long>";
38746+ }
38747+ } else
38748+ path_fault = "<anonymous mapping>";
38749+ }
38750+ up_read(&mm->mmap_sem);
38751+ }
38752+ if (tsk->signal->curr_ip)
38753+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
38754+ else
38755+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
38756+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
38757+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
38758+ task_uid(tsk), task_euid(tsk), pc, sp);
38759+ free_page((unsigned long)buffer_exec);
38760+ free_page((unsigned long)buffer_fault);
38761+ pax_report_insns(pc, sp);
38762+ do_coredump(SIGKILL, SIGKILL, regs);
38763+}
38764+#endif
38765+
38766+#ifdef CONFIG_PAX_REFCOUNT
38767+void pax_report_refcount_overflow(struct pt_regs *regs)
38768+{
38769+ if (current->signal->curr_ip)
38770+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
38771+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
38772+ else
38773+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
38774+ current->comm, task_pid_nr(current), current_uid(), current_euid());
38775+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
38776+ show_regs(regs);
38777+ force_sig_specific(SIGKILL, current);
38778+}
38779+#endif
38780+
38781+#ifdef CONFIG_PAX_USERCOPY
38782+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
38783+int object_is_on_stack(const void *obj, unsigned long len)
38784+{
38785+ const void * const stack = task_stack_page(current);
38786+ const void * const stackend = stack + THREAD_SIZE;
38787+
38788+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
38789+ const void *frame = NULL;
38790+ const void *oldframe;
38791+#endif
38792+
38793+ if (obj + len < obj)
38794+ return -1;
38795+
38796+ if (obj + len <= stack || stackend <= obj)
38797+ return 0;
38798+
38799+ if (obj < stack || stackend < obj + len)
38800+ return -1;
38801+
38802+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
38803+ oldframe = __builtin_frame_address(1);
38804+ if (oldframe)
38805+ frame = __builtin_frame_address(2);
38806+ /*
38807+ low ----------------------------------------------> high
38808+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
38809+ ^----------------^
38810+ allow copies only within here
38811+ */
38812+ while (stack <= frame && frame < stackend) {
38813+ /* if obj + len extends past the last frame, this
38814+ check won't pass and the next frame will be 0,
38815+ causing us to bail out and correctly report
38816+ the copy as invalid
38817+ */
38818+ if (obj + len <= frame)
38819+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
38820+ oldframe = frame;
38821+ frame = *(const void * const *)frame;
38822+ }
38823+ return -1;
38824+#else
38825+ return 1;
38826+#endif
38827+}
38828+
38829+
38830+NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
38831+{
38832+ if (current->signal->curr_ip)
38833+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
38834+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
38835+ else
38836+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
38837+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
38838+
38839+ dump_stack();
38840+ gr_handle_kernel_exploit();
38841+ do_group_exit(SIGKILL);
38842+}
38843+#endif
38844+
38845+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
38846+void pax_track_stack(void)
38847+{
38848+ unsigned long sp = (unsigned long)&sp;
38849+ if (sp < current_thread_info()->lowest_stack &&
38850+ sp > (unsigned long)task_stack_page(current))
38851+ current_thread_info()->lowest_stack = sp;
38852+}
38853+EXPORT_SYMBOL(pax_track_stack);
38854+#endif
38855+
38856 static int zap_process(struct task_struct *start)
38857 {
38858 struct task_struct *t;
38859@@ -1793,17 +2097,17 @@ static void wait_for_dump_helpers(struct
38860 pipe = file->f_path.dentry->d_inode->i_pipe;
38861
38862 pipe_lock(pipe);
38863- pipe->readers++;
38864- pipe->writers--;
38865+ atomic_inc(&pipe->readers);
38866+ atomic_dec(&pipe->writers);
38867
38868- while ((pipe->readers > 1) && (!signal_pending(current))) {
38869+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
38870 wake_up_interruptible_sync(&pipe->wait);
38871 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
38872 pipe_wait(pipe);
38873 }
38874
38875- pipe->readers--;
38876- pipe->writers++;
38877+ atomic_dec(&pipe->readers);
38878+ atomic_inc(&pipe->writers);
38879 pipe_unlock(pipe);
38880
38881 }
38882@@ -1826,10 +2130,13 @@ void do_coredump(long signr, int exit_co
38883 char **helper_argv = NULL;
38884 int helper_argc = 0;
38885 int dump_count = 0;
38886- static atomic_t core_dump_count = ATOMIC_INIT(0);
38887+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
38888
38889 audit_core_dumps(signr);
38890
38891+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
38892+ gr_handle_brute_attach(current, mm->flags);
38893+
38894 binfmt = mm->binfmt;
38895 if (!binfmt || !binfmt->core_dump)
38896 goto fail;
38897@@ -1874,6 +2181,8 @@ void do_coredump(long signr, int exit_co
38898 */
38899 clear_thread_flag(TIF_SIGPENDING);
38900
38901+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
38902+
38903 /*
38904 * lock_kernel() because format_corename() is controlled by sysctl, which
38905 * uses lock_kernel()
38906@@ -1908,7 +2217,7 @@ void do_coredump(long signr, int exit_co
38907 goto fail_unlock;
38908 }
38909
38910- dump_count = atomic_inc_return(&core_dump_count);
38911+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
38912 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
38913 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
38914 task_tgid_vnr(current), current->comm);
38915@@ -1972,7 +2281,7 @@ close_fail:
38916 filp_close(file, NULL);
38917 fail_dropcount:
38918 if (dump_count)
38919- atomic_dec(&core_dump_count);
38920+ atomic_dec_unchecked(&core_dump_count);
38921 fail_unlock:
38922 if (helper_argv)
38923 argv_free(helper_argv);
38924diff -urNp linux-2.6.32.42/fs/ext2/balloc.c linux-2.6.32.42/fs/ext2/balloc.c
38925--- linux-2.6.32.42/fs/ext2/balloc.c 2011-03-27 14:31:47.000000000 -0400
38926+++ linux-2.6.32.42/fs/ext2/balloc.c 2011-04-17 15:56:46.000000000 -0400
38927@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
38928
38929 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
38930 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
38931- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
38932+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
38933 sbi->s_resuid != current_fsuid() &&
38934 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
38935 return 0;
38936diff -urNp linux-2.6.32.42/fs/ext3/balloc.c linux-2.6.32.42/fs/ext3/balloc.c
38937--- linux-2.6.32.42/fs/ext3/balloc.c 2011-03-27 14:31:47.000000000 -0400
38938+++ linux-2.6.32.42/fs/ext3/balloc.c 2011-04-17 15:56:46.000000000 -0400
38939@@ -1421,7 +1421,7 @@ static int ext3_has_free_blocks(struct e
38940
38941 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
38942 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
38943- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
38944+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
38945 sbi->s_resuid != current_fsuid() &&
38946 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
38947 return 0;
38948diff -urNp linux-2.6.32.42/fs/ext4/balloc.c linux-2.6.32.42/fs/ext4/balloc.c
38949--- linux-2.6.32.42/fs/ext4/balloc.c 2011-03-27 14:31:47.000000000 -0400
38950+++ linux-2.6.32.42/fs/ext4/balloc.c 2011-04-17 15:56:46.000000000 -0400
38951@@ -570,7 +570,7 @@ int ext4_has_free_blocks(struct ext4_sb_
38952 /* Hm, nope. Are (enough) root reserved blocks available? */
38953 if (sbi->s_resuid == current_fsuid() ||
38954 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
38955- capable(CAP_SYS_RESOURCE)) {
38956+ capable_nolog(CAP_SYS_RESOURCE)) {
38957 if (free_blocks >= (nblocks + dirty_blocks))
38958 return 1;
38959 }
38960diff -urNp linux-2.6.32.42/fs/ext4/ext4.h linux-2.6.32.42/fs/ext4/ext4.h
38961--- linux-2.6.32.42/fs/ext4/ext4.h 2011-03-27 14:31:47.000000000 -0400
38962+++ linux-2.6.32.42/fs/ext4/ext4.h 2011-04-17 15:56:46.000000000 -0400
38963@@ -1078,19 +1078,19 @@ struct ext4_sb_info {
38964
38965 /* stats for buddy allocator */
38966 spinlock_t s_mb_pa_lock;
38967- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
38968- atomic_t s_bal_success; /* we found long enough chunks */
38969- atomic_t s_bal_allocated; /* in blocks */
38970- atomic_t s_bal_ex_scanned; /* total extents scanned */
38971- atomic_t s_bal_goals; /* goal hits */
38972- atomic_t s_bal_breaks; /* too long searches */
38973- atomic_t s_bal_2orders; /* 2^order hits */
38974+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
38975+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
38976+ atomic_unchecked_t s_bal_allocated; /* in blocks */
38977+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
38978+ atomic_unchecked_t s_bal_goals; /* goal hits */
38979+ atomic_unchecked_t s_bal_breaks; /* too long searches */
38980+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
38981 spinlock_t s_bal_lock;
38982 unsigned long s_mb_buddies_generated;
38983 unsigned long long s_mb_generation_time;
38984- atomic_t s_mb_lost_chunks;
38985- atomic_t s_mb_preallocated;
38986- atomic_t s_mb_discarded;
38987+ atomic_unchecked_t s_mb_lost_chunks;
38988+ atomic_unchecked_t s_mb_preallocated;
38989+ atomic_unchecked_t s_mb_discarded;
38990 atomic_t s_lock_busy;
38991
38992 /* locality groups */
38993diff -urNp linux-2.6.32.42/fs/ext4/mballoc.c linux-2.6.32.42/fs/ext4/mballoc.c
38994--- linux-2.6.32.42/fs/ext4/mballoc.c 2011-06-25 12:55:34.000000000 -0400
38995+++ linux-2.6.32.42/fs/ext4/mballoc.c 2011-06-25 12:56:37.000000000 -0400
38996@@ -1755,7 +1755,7 @@ void ext4_mb_simple_scan_group(struct ex
38997 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
38998
38999 if (EXT4_SB(sb)->s_mb_stats)
39000- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
39001+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
39002
39003 break;
39004 }
39005@@ -2131,7 +2131,7 @@ repeat:
39006 ac->ac_status = AC_STATUS_CONTINUE;
39007 ac->ac_flags |= EXT4_MB_HINT_FIRST;
39008 cr = 3;
39009- atomic_inc(&sbi->s_mb_lost_chunks);
39010+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
39011 goto repeat;
39012 }
39013 }
39014@@ -2174,6 +2174,8 @@ static int ext4_mb_seq_groups_show(struc
39015 ext4_grpblk_t counters[16];
39016 } sg;
39017
39018+ pax_track_stack();
39019+
39020 group--;
39021 if (group == 0)
39022 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
39023@@ -2534,25 +2536,25 @@ int ext4_mb_release(struct super_block *
39024 if (sbi->s_mb_stats) {
39025 printk(KERN_INFO
39026 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
39027- atomic_read(&sbi->s_bal_allocated),
39028- atomic_read(&sbi->s_bal_reqs),
39029- atomic_read(&sbi->s_bal_success));
39030+ atomic_read_unchecked(&sbi->s_bal_allocated),
39031+ atomic_read_unchecked(&sbi->s_bal_reqs),
39032+ atomic_read_unchecked(&sbi->s_bal_success));
39033 printk(KERN_INFO
39034 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
39035 "%u 2^N hits, %u breaks, %u lost\n",
39036- atomic_read(&sbi->s_bal_ex_scanned),
39037- atomic_read(&sbi->s_bal_goals),
39038- atomic_read(&sbi->s_bal_2orders),
39039- atomic_read(&sbi->s_bal_breaks),
39040- atomic_read(&sbi->s_mb_lost_chunks));
39041+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
39042+ atomic_read_unchecked(&sbi->s_bal_goals),
39043+ atomic_read_unchecked(&sbi->s_bal_2orders),
39044+ atomic_read_unchecked(&sbi->s_bal_breaks),
39045+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
39046 printk(KERN_INFO
39047 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
39048 sbi->s_mb_buddies_generated++,
39049 sbi->s_mb_generation_time);
39050 printk(KERN_INFO
39051 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
39052- atomic_read(&sbi->s_mb_preallocated),
39053- atomic_read(&sbi->s_mb_discarded));
39054+ atomic_read_unchecked(&sbi->s_mb_preallocated),
39055+ atomic_read_unchecked(&sbi->s_mb_discarded));
39056 }
39057
39058 free_percpu(sbi->s_locality_groups);
39059@@ -3034,16 +3036,16 @@ static void ext4_mb_collect_stats(struct
39060 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
39061
39062 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
39063- atomic_inc(&sbi->s_bal_reqs);
39064- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
39065+ atomic_inc_unchecked(&sbi->s_bal_reqs);
39066+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
39067 if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
39068- atomic_inc(&sbi->s_bal_success);
39069- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
39070+ atomic_inc_unchecked(&sbi->s_bal_success);
39071+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
39072 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
39073 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
39074- atomic_inc(&sbi->s_bal_goals);
39075+ atomic_inc_unchecked(&sbi->s_bal_goals);
39076 if (ac->ac_found > sbi->s_mb_max_to_scan)
39077- atomic_inc(&sbi->s_bal_breaks);
39078+ atomic_inc_unchecked(&sbi->s_bal_breaks);
39079 }
39080
39081 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
39082@@ -3443,7 +3445,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
39083 trace_ext4_mb_new_inode_pa(ac, pa);
39084
39085 ext4_mb_use_inode_pa(ac, pa);
39086- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
39087+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
39088
39089 ei = EXT4_I(ac->ac_inode);
39090 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
39091@@ -3503,7 +3505,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
39092 trace_ext4_mb_new_group_pa(ac, pa);
39093
39094 ext4_mb_use_group_pa(ac, pa);
39095- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
39096+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
39097
39098 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
39099 lg = ac->ac_lg;
39100@@ -3607,7 +3609,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
39101 * from the bitmap and continue.
39102 */
39103 }
39104- atomic_add(free, &sbi->s_mb_discarded);
39105+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
39106
39107 return err;
39108 }
39109@@ -3626,7 +3628,7 @@ ext4_mb_release_group_pa(struct ext4_bud
39110 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
39111 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
39112 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
39113- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
39114+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
39115
39116 if (ac) {
39117 ac->ac_sb = sb;
39118diff -urNp linux-2.6.32.42/fs/ext4/super.c linux-2.6.32.42/fs/ext4/super.c
39119--- linux-2.6.32.42/fs/ext4/super.c 2011-03-27 14:31:47.000000000 -0400
39120+++ linux-2.6.32.42/fs/ext4/super.c 2011-04-17 15:56:46.000000000 -0400
39121@@ -2287,7 +2287,7 @@ static void ext4_sb_release(struct kobje
39122 }
39123
39124
39125-static struct sysfs_ops ext4_attr_ops = {
39126+static const struct sysfs_ops ext4_attr_ops = {
39127 .show = ext4_attr_show,
39128 .store = ext4_attr_store,
39129 };
39130diff -urNp linux-2.6.32.42/fs/fcntl.c linux-2.6.32.42/fs/fcntl.c
39131--- linux-2.6.32.42/fs/fcntl.c 2011-03-27 14:31:47.000000000 -0400
39132+++ linux-2.6.32.42/fs/fcntl.c 2011-04-17 15:56:46.000000000 -0400
39133@@ -223,6 +223,11 @@ int __f_setown(struct file *filp, struct
39134 if (err)
39135 return err;
39136
39137+ if (gr_handle_chroot_fowner(pid, type))
39138+ return -ENOENT;
39139+ if (gr_check_protected_task_fowner(pid, type))
39140+ return -EACCES;
39141+
39142 f_modown(filp, pid, type, force);
39143 return 0;
39144 }
39145@@ -344,6 +349,7 @@ static long do_fcntl(int fd, unsigned in
39146 switch (cmd) {
39147 case F_DUPFD:
39148 case F_DUPFD_CLOEXEC:
39149+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
39150 if (arg >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
39151 break;
39152 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
39153diff -urNp linux-2.6.32.42/fs/fifo.c linux-2.6.32.42/fs/fifo.c
39154--- linux-2.6.32.42/fs/fifo.c 2011-03-27 14:31:47.000000000 -0400
39155+++ linux-2.6.32.42/fs/fifo.c 2011-04-17 15:56:46.000000000 -0400
39156@@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode
39157 */
39158 filp->f_op = &read_pipefifo_fops;
39159 pipe->r_counter++;
39160- if (pipe->readers++ == 0)
39161+ if (atomic_inc_return(&pipe->readers) == 1)
39162 wake_up_partner(inode);
39163
39164- if (!pipe->writers) {
39165+ if (!atomic_read(&pipe->writers)) {
39166 if ((filp->f_flags & O_NONBLOCK)) {
39167 /* suppress POLLHUP until we have
39168 * seen a writer */
39169@@ -83,15 +83,15 @@ static int fifo_open(struct inode *inode
39170 * errno=ENXIO when there is no process reading the FIFO.
39171 */
39172 ret = -ENXIO;
39173- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
39174+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
39175 goto err;
39176
39177 filp->f_op = &write_pipefifo_fops;
39178 pipe->w_counter++;
39179- if (!pipe->writers++)
39180+ if (atomic_inc_return(&pipe->writers) == 1)
39181 wake_up_partner(inode);
39182
39183- if (!pipe->readers) {
39184+ if (!atomic_read(&pipe->readers)) {
39185 wait_for_partner(inode, &pipe->r_counter);
39186 if (signal_pending(current))
39187 goto err_wr;
39188@@ -107,11 +107,11 @@ static int fifo_open(struct inode *inode
39189 */
39190 filp->f_op = &rdwr_pipefifo_fops;
39191
39192- pipe->readers++;
39193- pipe->writers++;
39194+ atomic_inc(&pipe->readers);
39195+ atomic_inc(&pipe->writers);
39196 pipe->r_counter++;
39197 pipe->w_counter++;
39198- if (pipe->readers == 1 || pipe->writers == 1)
39199+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
39200 wake_up_partner(inode);
39201 break;
39202
39203@@ -125,19 +125,19 @@ static int fifo_open(struct inode *inode
39204 return 0;
39205
39206 err_rd:
39207- if (!--pipe->readers)
39208+ if (atomic_dec_and_test(&pipe->readers))
39209 wake_up_interruptible(&pipe->wait);
39210 ret = -ERESTARTSYS;
39211 goto err;
39212
39213 err_wr:
39214- if (!--pipe->writers)
39215+ if (atomic_dec_and_test(&pipe->writers))
39216 wake_up_interruptible(&pipe->wait);
39217 ret = -ERESTARTSYS;
39218 goto err;
39219
39220 err:
39221- if (!pipe->readers && !pipe->writers)
39222+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
39223 free_pipe_info(inode);
39224
39225 err_nocleanup:
39226diff -urNp linux-2.6.32.42/fs/file.c linux-2.6.32.42/fs/file.c
39227--- linux-2.6.32.42/fs/file.c 2011-03-27 14:31:47.000000000 -0400
39228+++ linux-2.6.32.42/fs/file.c 2011-04-17 15:56:46.000000000 -0400
39229@@ -14,6 +14,7 @@
39230 #include <linux/slab.h>
39231 #include <linux/vmalloc.h>
39232 #include <linux/file.h>
39233+#include <linux/security.h>
39234 #include <linux/fdtable.h>
39235 #include <linux/bitops.h>
39236 #include <linux/interrupt.h>
39237@@ -257,6 +258,8 @@ int expand_files(struct files_struct *fi
39238 * N.B. For clone tasks sharing a files structure, this test
39239 * will limit the total number of files that can be opened.
39240 */
39241+
39242+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
39243 if (nr >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
39244 return -EMFILE;
39245
39246diff -urNp linux-2.6.32.42/fs/filesystems.c linux-2.6.32.42/fs/filesystems.c
39247--- linux-2.6.32.42/fs/filesystems.c 2011-03-27 14:31:47.000000000 -0400
39248+++ linux-2.6.32.42/fs/filesystems.c 2011-04-17 15:56:46.000000000 -0400
39249@@ -272,7 +272,12 @@ struct file_system_type *get_fs_type(con
39250 int len = dot ? dot - name : strlen(name);
39251
39252 fs = __get_fs_type(name, len);
39253+
39254+#ifdef CONFIG_GRKERNSEC_MODHARDEN
39255+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
39256+#else
39257 if (!fs && (request_module("%.*s", len, name) == 0))
39258+#endif
39259 fs = __get_fs_type(name, len);
39260
39261 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
39262diff -urNp linux-2.6.32.42/fs/fscache/cookie.c linux-2.6.32.42/fs/fscache/cookie.c
39263--- linux-2.6.32.42/fs/fscache/cookie.c 2011-03-27 14:31:47.000000000 -0400
39264+++ linux-2.6.32.42/fs/fscache/cookie.c 2011-05-04 17:56:28.000000000 -0400
39265@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
39266 parent ? (char *) parent->def->name : "<no-parent>",
39267 def->name, netfs_data);
39268
39269- fscache_stat(&fscache_n_acquires);
39270+ fscache_stat_unchecked(&fscache_n_acquires);
39271
39272 /* if there's no parent cookie, then we don't create one here either */
39273 if (!parent) {
39274- fscache_stat(&fscache_n_acquires_null);
39275+ fscache_stat_unchecked(&fscache_n_acquires_null);
39276 _leave(" [no parent]");
39277 return NULL;
39278 }
39279@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
39280 /* allocate and initialise a cookie */
39281 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
39282 if (!cookie) {
39283- fscache_stat(&fscache_n_acquires_oom);
39284+ fscache_stat_unchecked(&fscache_n_acquires_oom);
39285 _leave(" [ENOMEM]");
39286 return NULL;
39287 }
39288@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
39289
39290 switch (cookie->def->type) {
39291 case FSCACHE_COOKIE_TYPE_INDEX:
39292- fscache_stat(&fscache_n_cookie_index);
39293+ fscache_stat_unchecked(&fscache_n_cookie_index);
39294 break;
39295 case FSCACHE_COOKIE_TYPE_DATAFILE:
39296- fscache_stat(&fscache_n_cookie_data);
39297+ fscache_stat_unchecked(&fscache_n_cookie_data);
39298 break;
39299 default:
39300- fscache_stat(&fscache_n_cookie_special);
39301+ fscache_stat_unchecked(&fscache_n_cookie_special);
39302 break;
39303 }
39304
39305@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
39306 if (fscache_acquire_non_index_cookie(cookie) < 0) {
39307 atomic_dec(&parent->n_children);
39308 __fscache_cookie_put(cookie);
39309- fscache_stat(&fscache_n_acquires_nobufs);
39310+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
39311 _leave(" = NULL");
39312 return NULL;
39313 }
39314 }
39315
39316- fscache_stat(&fscache_n_acquires_ok);
39317+ fscache_stat_unchecked(&fscache_n_acquires_ok);
39318 _leave(" = %p", cookie);
39319 return cookie;
39320 }
39321@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
39322 cache = fscache_select_cache_for_object(cookie->parent);
39323 if (!cache) {
39324 up_read(&fscache_addremove_sem);
39325- fscache_stat(&fscache_n_acquires_no_cache);
39326+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
39327 _leave(" = -ENOMEDIUM [no cache]");
39328 return -ENOMEDIUM;
39329 }
39330@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
39331 object = cache->ops->alloc_object(cache, cookie);
39332 fscache_stat_d(&fscache_n_cop_alloc_object);
39333 if (IS_ERR(object)) {
39334- fscache_stat(&fscache_n_object_no_alloc);
39335+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
39336 ret = PTR_ERR(object);
39337 goto error;
39338 }
39339
39340- fscache_stat(&fscache_n_object_alloc);
39341+ fscache_stat_unchecked(&fscache_n_object_alloc);
39342
39343 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
39344
39345@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
39346 struct fscache_object *object;
39347 struct hlist_node *_p;
39348
39349- fscache_stat(&fscache_n_updates);
39350+ fscache_stat_unchecked(&fscache_n_updates);
39351
39352 if (!cookie) {
39353- fscache_stat(&fscache_n_updates_null);
39354+ fscache_stat_unchecked(&fscache_n_updates_null);
39355 _leave(" [no cookie]");
39356 return;
39357 }
39358@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
39359 struct fscache_object *object;
39360 unsigned long event;
39361
39362- fscache_stat(&fscache_n_relinquishes);
39363+ fscache_stat_unchecked(&fscache_n_relinquishes);
39364 if (retire)
39365- fscache_stat(&fscache_n_relinquishes_retire);
39366+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
39367
39368 if (!cookie) {
39369- fscache_stat(&fscache_n_relinquishes_null);
39370+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
39371 _leave(" [no cookie]");
39372 return;
39373 }
39374@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
39375
39376 /* wait for the cookie to finish being instantiated (or to fail) */
39377 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
39378- fscache_stat(&fscache_n_relinquishes_waitcrt);
39379+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
39380 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
39381 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
39382 }
39383diff -urNp linux-2.6.32.42/fs/fscache/internal.h linux-2.6.32.42/fs/fscache/internal.h
39384--- linux-2.6.32.42/fs/fscache/internal.h 2011-03-27 14:31:47.000000000 -0400
39385+++ linux-2.6.32.42/fs/fscache/internal.h 2011-05-04 17:56:28.000000000 -0400
39386@@ -136,94 +136,94 @@ extern void fscache_proc_cleanup(void);
39387 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
39388 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
39389
39390-extern atomic_t fscache_n_op_pend;
39391-extern atomic_t fscache_n_op_run;
39392-extern atomic_t fscache_n_op_enqueue;
39393-extern atomic_t fscache_n_op_deferred_release;
39394-extern atomic_t fscache_n_op_release;
39395-extern atomic_t fscache_n_op_gc;
39396-extern atomic_t fscache_n_op_cancelled;
39397-extern atomic_t fscache_n_op_rejected;
39398-
39399-extern atomic_t fscache_n_attr_changed;
39400-extern atomic_t fscache_n_attr_changed_ok;
39401-extern atomic_t fscache_n_attr_changed_nobufs;
39402-extern atomic_t fscache_n_attr_changed_nomem;
39403-extern atomic_t fscache_n_attr_changed_calls;
39404-
39405-extern atomic_t fscache_n_allocs;
39406-extern atomic_t fscache_n_allocs_ok;
39407-extern atomic_t fscache_n_allocs_wait;
39408-extern atomic_t fscache_n_allocs_nobufs;
39409-extern atomic_t fscache_n_allocs_intr;
39410-extern atomic_t fscache_n_allocs_object_dead;
39411-extern atomic_t fscache_n_alloc_ops;
39412-extern atomic_t fscache_n_alloc_op_waits;
39413-
39414-extern atomic_t fscache_n_retrievals;
39415-extern atomic_t fscache_n_retrievals_ok;
39416-extern atomic_t fscache_n_retrievals_wait;
39417-extern atomic_t fscache_n_retrievals_nodata;
39418-extern atomic_t fscache_n_retrievals_nobufs;
39419-extern atomic_t fscache_n_retrievals_intr;
39420-extern atomic_t fscache_n_retrievals_nomem;
39421-extern atomic_t fscache_n_retrievals_object_dead;
39422-extern atomic_t fscache_n_retrieval_ops;
39423-extern atomic_t fscache_n_retrieval_op_waits;
39424-
39425-extern atomic_t fscache_n_stores;
39426-extern atomic_t fscache_n_stores_ok;
39427-extern atomic_t fscache_n_stores_again;
39428-extern atomic_t fscache_n_stores_nobufs;
39429-extern atomic_t fscache_n_stores_oom;
39430-extern atomic_t fscache_n_store_ops;
39431-extern atomic_t fscache_n_store_calls;
39432-extern atomic_t fscache_n_store_pages;
39433-extern atomic_t fscache_n_store_radix_deletes;
39434-extern atomic_t fscache_n_store_pages_over_limit;
39435-
39436-extern atomic_t fscache_n_store_vmscan_not_storing;
39437-extern atomic_t fscache_n_store_vmscan_gone;
39438-extern atomic_t fscache_n_store_vmscan_busy;
39439-extern atomic_t fscache_n_store_vmscan_cancelled;
39440-
39441-extern atomic_t fscache_n_marks;
39442-extern atomic_t fscache_n_uncaches;
39443-
39444-extern atomic_t fscache_n_acquires;
39445-extern atomic_t fscache_n_acquires_null;
39446-extern atomic_t fscache_n_acquires_no_cache;
39447-extern atomic_t fscache_n_acquires_ok;
39448-extern atomic_t fscache_n_acquires_nobufs;
39449-extern atomic_t fscache_n_acquires_oom;
39450-
39451-extern atomic_t fscache_n_updates;
39452-extern atomic_t fscache_n_updates_null;
39453-extern atomic_t fscache_n_updates_run;
39454-
39455-extern atomic_t fscache_n_relinquishes;
39456-extern atomic_t fscache_n_relinquishes_null;
39457-extern atomic_t fscache_n_relinquishes_waitcrt;
39458-extern atomic_t fscache_n_relinquishes_retire;
39459-
39460-extern atomic_t fscache_n_cookie_index;
39461-extern atomic_t fscache_n_cookie_data;
39462-extern atomic_t fscache_n_cookie_special;
39463-
39464-extern atomic_t fscache_n_object_alloc;
39465-extern atomic_t fscache_n_object_no_alloc;
39466-extern atomic_t fscache_n_object_lookups;
39467-extern atomic_t fscache_n_object_lookups_negative;
39468-extern atomic_t fscache_n_object_lookups_positive;
39469-extern atomic_t fscache_n_object_lookups_timed_out;
39470-extern atomic_t fscache_n_object_created;
39471-extern atomic_t fscache_n_object_avail;
39472-extern atomic_t fscache_n_object_dead;
39473-
39474-extern atomic_t fscache_n_checkaux_none;
39475-extern atomic_t fscache_n_checkaux_okay;
39476-extern atomic_t fscache_n_checkaux_update;
39477-extern atomic_t fscache_n_checkaux_obsolete;
39478+extern atomic_unchecked_t fscache_n_op_pend;
39479+extern atomic_unchecked_t fscache_n_op_run;
39480+extern atomic_unchecked_t fscache_n_op_enqueue;
39481+extern atomic_unchecked_t fscache_n_op_deferred_release;
39482+extern atomic_unchecked_t fscache_n_op_release;
39483+extern atomic_unchecked_t fscache_n_op_gc;
39484+extern atomic_unchecked_t fscache_n_op_cancelled;
39485+extern atomic_unchecked_t fscache_n_op_rejected;
39486+
39487+extern atomic_unchecked_t fscache_n_attr_changed;
39488+extern atomic_unchecked_t fscache_n_attr_changed_ok;
39489+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
39490+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
39491+extern atomic_unchecked_t fscache_n_attr_changed_calls;
39492+
39493+extern atomic_unchecked_t fscache_n_allocs;
39494+extern atomic_unchecked_t fscache_n_allocs_ok;
39495+extern atomic_unchecked_t fscache_n_allocs_wait;
39496+extern atomic_unchecked_t fscache_n_allocs_nobufs;
39497+extern atomic_unchecked_t fscache_n_allocs_intr;
39498+extern atomic_unchecked_t fscache_n_allocs_object_dead;
39499+extern atomic_unchecked_t fscache_n_alloc_ops;
39500+extern atomic_unchecked_t fscache_n_alloc_op_waits;
39501+
39502+extern atomic_unchecked_t fscache_n_retrievals;
39503+extern atomic_unchecked_t fscache_n_retrievals_ok;
39504+extern atomic_unchecked_t fscache_n_retrievals_wait;
39505+extern atomic_unchecked_t fscache_n_retrievals_nodata;
39506+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
39507+extern atomic_unchecked_t fscache_n_retrievals_intr;
39508+extern atomic_unchecked_t fscache_n_retrievals_nomem;
39509+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
39510+extern atomic_unchecked_t fscache_n_retrieval_ops;
39511+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
39512+
39513+extern atomic_unchecked_t fscache_n_stores;
39514+extern atomic_unchecked_t fscache_n_stores_ok;
39515+extern atomic_unchecked_t fscache_n_stores_again;
39516+extern atomic_unchecked_t fscache_n_stores_nobufs;
39517+extern atomic_unchecked_t fscache_n_stores_oom;
39518+extern atomic_unchecked_t fscache_n_store_ops;
39519+extern atomic_unchecked_t fscache_n_store_calls;
39520+extern atomic_unchecked_t fscache_n_store_pages;
39521+extern atomic_unchecked_t fscache_n_store_radix_deletes;
39522+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
39523+
39524+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
39525+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
39526+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
39527+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
39528+
39529+extern atomic_unchecked_t fscache_n_marks;
39530+extern atomic_unchecked_t fscache_n_uncaches;
39531+
39532+extern atomic_unchecked_t fscache_n_acquires;
39533+extern atomic_unchecked_t fscache_n_acquires_null;
39534+extern atomic_unchecked_t fscache_n_acquires_no_cache;
39535+extern atomic_unchecked_t fscache_n_acquires_ok;
39536+extern atomic_unchecked_t fscache_n_acquires_nobufs;
39537+extern atomic_unchecked_t fscache_n_acquires_oom;
39538+
39539+extern atomic_unchecked_t fscache_n_updates;
39540+extern atomic_unchecked_t fscache_n_updates_null;
39541+extern atomic_unchecked_t fscache_n_updates_run;
39542+
39543+extern atomic_unchecked_t fscache_n_relinquishes;
39544+extern atomic_unchecked_t fscache_n_relinquishes_null;
39545+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
39546+extern atomic_unchecked_t fscache_n_relinquishes_retire;
39547+
39548+extern atomic_unchecked_t fscache_n_cookie_index;
39549+extern atomic_unchecked_t fscache_n_cookie_data;
39550+extern atomic_unchecked_t fscache_n_cookie_special;
39551+
39552+extern atomic_unchecked_t fscache_n_object_alloc;
39553+extern atomic_unchecked_t fscache_n_object_no_alloc;
39554+extern atomic_unchecked_t fscache_n_object_lookups;
39555+extern atomic_unchecked_t fscache_n_object_lookups_negative;
39556+extern atomic_unchecked_t fscache_n_object_lookups_positive;
39557+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
39558+extern atomic_unchecked_t fscache_n_object_created;
39559+extern atomic_unchecked_t fscache_n_object_avail;
39560+extern atomic_unchecked_t fscache_n_object_dead;
39561+
39562+extern atomic_unchecked_t fscache_n_checkaux_none;
39563+extern atomic_unchecked_t fscache_n_checkaux_okay;
39564+extern atomic_unchecked_t fscache_n_checkaux_update;
39565+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
39566
39567 extern atomic_t fscache_n_cop_alloc_object;
39568 extern atomic_t fscache_n_cop_lookup_object;
39569@@ -247,6 +247,11 @@ static inline void fscache_stat(atomic_t
39570 atomic_inc(stat);
39571 }
39572
39573+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
39574+{
39575+ atomic_inc_unchecked(stat);
39576+}
39577+
39578 static inline void fscache_stat_d(atomic_t *stat)
39579 {
39580 atomic_dec(stat);
39581@@ -259,6 +264,7 @@ extern const struct file_operations fsca
39582
39583 #define __fscache_stat(stat) (NULL)
39584 #define fscache_stat(stat) do {} while (0)
39585+#define fscache_stat_unchecked(stat) do {} while (0)
39586 #define fscache_stat_d(stat) do {} while (0)
39587 #endif
39588
39589diff -urNp linux-2.6.32.42/fs/fscache/object.c linux-2.6.32.42/fs/fscache/object.c
39590--- linux-2.6.32.42/fs/fscache/object.c 2011-03-27 14:31:47.000000000 -0400
39591+++ linux-2.6.32.42/fs/fscache/object.c 2011-05-04 17:56:28.000000000 -0400
39592@@ -144,7 +144,7 @@ static void fscache_object_state_machine
39593 /* update the object metadata on disk */
39594 case FSCACHE_OBJECT_UPDATING:
39595 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
39596- fscache_stat(&fscache_n_updates_run);
39597+ fscache_stat_unchecked(&fscache_n_updates_run);
39598 fscache_stat(&fscache_n_cop_update_object);
39599 object->cache->ops->update_object(object);
39600 fscache_stat_d(&fscache_n_cop_update_object);
39601@@ -233,7 +233,7 @@ static void fscache_object_state_machine
39602 spin_lock(&object->lock);
39603 object->state = FSCACHE_OBJECT_DEAD;
39604 spin_unlock(&object->lock);
39605- fscache_stat(&fscache_n_object_dead);
39606+ fscache_stat_unchecked(&fscache_n_object_dead);
39607 goto terminal_transit;
39608
39609 /* handle the parent cache of this object being withdrawn from
39610@@ -248,7 +248,7 @@ static void fscache_object_state_machine
39611 spin_lock(&object->lock);
39612 object->state = FSCACHE_OBJECT_DEAD;
39613 spin_unlock(&object->lock);
39614- fscache_stat(&fscache_n_object_dead);
39615+ fscache_stat_unchecked(&fscache_n_object_dead);
39616 goto terminal_transit;
39617
39618 /* complain about the object being woken up once it is
39619@@ -492,7 +492,7 @@ static void fscache_lookup_object(struct
39620 parent->cookie->def->name, cookie->def->name,
39621 object->cache->tag->name);
39622
39623- fscache_stat(&fscache_n_object_lookups);
39624+ fscache_stat_unchecked(&fscache_n_object_lookups);
39625 fscache_stat(&fscache_n_cop_lookup_object);
39626 ret = object->cache->ops->lookup_object(object);
39627 fscache_stat_d(&fscache_n_cop_lookup_object);
39628@@ -503,7 +503,7 @@ static void fscache_lookup_object(struct
39629 if (ret == -ETIMEDOUT) {
39630 /* probably stuck behind another object, so move this one to
39631 * the back of the queue */
39632- fscache_stat(&fscache_n_object_lookups_timed_out);
39633+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
39634 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
39635 }
39636
39637@@ -526,7 +526,7 @@ void fscache_object_lookup_negative(stru
39638
39639 spin_lock(&object->lock);
39640 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
39641- fscache_stat(&fscache_n_object_lookups_negative);
39642+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
39643
39644 /* transit here to allow write requests to begin stacking up
39645 * and read requests to begin returning ENODATA */
39646@@ -572,7 +572,7 @@ void fscache_obtained_object(struct fsca
39647 * result, in which case there may be data available */
39648 spin_lock(&object->lock);
39649 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
39650- fscache_stat(&fscache_n_object_lookups_positive);
39651+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
39652
39653 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
39654
39655@@ -586,7 +586,7 @@ void fscache_obtained_object(struct fsca
39656 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
39657 } else {
39658 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
39659- fscache_stat(&fscache_n_object_created);
39660+ fscache_stat_unchecked(&fscache_n_object_created);
39661
39662 object->state = FSCACHE_OBJECT_AVAILABLE;
39663 spin_unlock(&object->lock);
39664@@ -633,7 +633,7 @@ static void fscache_object_available(str
39665 fscache_enqueue_dependents(object);
39666
39667 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
39668- fscache_stat(&fscache_n_object_avail);
39669+ fscache_stat_unchecked(&fscache_n_object_avail);
39670
39671 _leave("");
39672 }
39673@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
39674 enum fscache_checkaux result;
39675
39676 if (!object->cookie->def->check_aux) {
39677- fscache_stat(&fscache_n_checkaux_none);
39678+ fscache_stat_unchecked(&fscache_n_checkaux_none);
39679 return FSCACHE_CHECKAUX_OKAY;
39680 }
39681
39682@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
39683 switch (result) {
39684 /* entry okay as is */
39685 case FSCACHE_CHECKAUX_OKAY:
39686- fscache_stat(&fscache_n_checkaux_okay);
39687+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
39688 break;
39689
39690 /* entry requires update */
39691 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
39692- fscache_stat(&fscache_n_checkaux_update);
39693+ fscache_stat_unchecked(&fscache_n_checkaux_update);
39694 break;
39695
39696 /* entry requires deletion */
39697 case FSCACHE_CHECKAUX_OBSOLETE:
39698- fscache_stat(&fscache_n_checkaux_obsolete);
39699+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
39700 break;
39701
39702 default:
39703diff -urNp linux-2.6.32.42/fs/fscache/operation.c linux-2.6.32.42/fs/fscache/operation.c
39704--- linux-2.6.32.42/fs/fscache/operation.c 2011-03-27 14:31:47.000000000 -0400
39705+++ linux-2.6.32.42/fs/fscache/operation.c 2011-05-04 17:56:28.000000000 -0400
39706@@ -16,7 +16,7 @@
39707 #include <linux/seq_file.h>
39708 #include "internal.h"
39709
39710-atomic_t fscache_op_debug_id;
39711+atomic_unchecked_t fscache_op_debug_id;
39712 EXPORT_SYMBOL(fscache_op_debug_id);
39713
39714 /**
39715@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fs
39716 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
39717 ASSERTCMP(atomic_read(&op->usage), >, 0);
39718
39719- fscache_stat(&fscache_n_op_enqueue);
39720+ fscache_stat_unchecked(&fscache_n_op_enqueue);
39721 switch (op->flags & FSCACHE_OP_TYPE) {
39722 case FSCACHE_OP_FAST:
39723 _debug("queue fast");
39724@@ -76,7 +76,7 @@ static void fscache_run_op(struct fscach
39725 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
39726 if (op->processor)
39727 fscache_enqueue_operation(op);
39728- fscache_stat(&fscache_n_op_run);
39729+ fscache_stat_unchecked(&fscache_n_op_run);
39730 }
39731
39732 /*
39733@@ -107,11 +107,11 @@ int fscache_submit_exclusive_op(struct f
39734 if (object->n_ops > 0) {
39735 atomic_inc(&op->usage);
39736 list_add_tail(&op->pend_link, &object->pending_ops);
39737- fscache_stat(&fscache_n_op_pend);
39738+ fscache_stat_unchecked(&fscache_n_op_pend);
39739 } else if (!list_empty(&object->pending_ops)) {
39740 atomic_inc(&op->usage);
39741 list_add_tail(&op->pend_link, &object->pending_ops);
39742- fscache_stat(&fscache_n_op_pend);
39743+ fscache_stat_unchecked(&fscache_n_op_pend);
39744 fscache_start_operations(object);
39745 } else {
39746 ASSERTCMP(object->n_in_progress, ==, 0);
39747@@ -127,7 +127,7 @@ int fscache_submit_exclusive_op(struct f
39748 object->n_exclusive++; /* reads and writes must wait */
39749 atomic_inc(&op->usage);
39750 list_add_tail(&op->pend_link, &object->pending_ops);
39751- fscache_stat(&fscache_n_op_pend);
39752+ fscache_stat_unchecked(&fscache_n_op_pend);
39753 ret = 0;
39754 } else {
39755 /* not allowed to submit ops in any other state */
39756@@ -214,11 +214,11 @@ int fscache_submit_op(struct fscache_obj
39757 if (object->n_exclusive > 0) {
39758 atomic_inc(&op->usage);
39759 list_add_tail(&op->pend_link, &object->pending_ops);
39760- fscache_stat(&fscache_n_op_pend);
39761+ fscache_stat_unchecked(&fscache_n_op_pend);
39762 } else if (!list_empty(&object->pending_ops)) {
39763 atomic_inc(&op->usage);
39764 list_add_tail(&op->pend_link, &object->pending_ops);
39765- fscache_stat(&fscache_n_op_pend);
39766+ fscache_stat_unchecked(&fscache_n_op_pend);
39767 fscache_start_operations(object);
39768 } else {
39769 ASSERTCMP(object->n_exclusive, ==, 0);
39770@@ -230,12 +230,12 @@ int fscache_submit_op(struct fscache_obj
39771 object->n_ops++;
39772 atomic_inc(&op->usage);
39773 list_add_tail(&op->pend_link, &object->pending_ops);
39774- fscache_stat(&fscache_n_op_pend);
39775+ fscache_stat_unchecked(&fscache_n_op_pend);
39776 ret = 0;
39777 } else if (object->state == FSCACHE_OBJECT_DYING ||
39778 object->state == FSCACHE_OBJECT_LC_DYING ||
39779 object->state == FSCACHE_OBJECT_WITHDRAWING) {
39780- fscache_stat(&fscache_n_op_rejected);
39781+ fscache_stat_unchecked(&fscache_n_op_rejected);
39782 ret = -ENOBUFS;
39783 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
39784 fscache_report_unexpected_submission(object, op, ostate);
39785@@ -305,7 +305,7 @@ int fscache_cancel_op(struct fscache_ope
39786
39787 ret = -EBUSY;
39788 if (!list_empty(&op->pend_link)) {
39789- fscache_stat(&fscache_n_op_cancelled);
39790+ fscache_stat_unchecked(&fscache_n_op_cancelled);
39791 list_del_init(&op->pend_link);
39792 object->n_ops--;
39793 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
39794@@ -344,7 +344,7 @@ void fscache_put_operation(struct fscach
39795 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
39796 BUG();
39797
39798- fscache_stat(&fscache_n_op_release);
39799+ fscache_stat_unchecked(&fscache_n_op_release);
39800
39801 if (op->release) {
39802 op->release(op);
39803@@ -361,7 +361,7 @@ void fscache_put_operation(struct fscach
39804 * lock, and defer it otherwise */
39805 if (!spin_trylock(&object->lock)) {
39806 _debug("defer put");
39807- fscache_stat(&fscache_n_op_deferred_release);
39808+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
39809
39810 cache = object->cache;
39811 spin_lock(&cache->op_gc_list_lock);
39812@@ -423,7 +423,7 @@ void fscache_operation_gc(struct work_st
39813
39814 _debug("GC DEFERRED REL OBJ%x OP%x",
39815 object->debug_id, op->debug_id);
39816- fscache_stat(&fscache_n_op_gc);
39817+ fscache_stat_unchecked(&fscache_n_op_gc);
39818
39819 ASSERTCMP(atomic_read(&op->usage), ==, 0);
39820
39821diff -urNp linux-2.6.32.42/fs/fscache/page.c linux-2.6.32.42/fs/fscache/page.c
39822--- linux-2.6.32.42/fs/fscache/page.c 2011-03-27 14:31:47.000000000 -0400
39823+++ linux-2.6.32.42/fs/fscache/page.c 2011-05-04 17:56:28.000000000 -0400
39824@@ -59,7 +59,7 @@ bool __fscache_maybe_release_page(struct
39825 val = radix_tree_lookup(&cookie->stores, page->index);
39826 if (!val) {
39827 rcu_read_unlock();
39828- fscache_stat(&fscache_n_store_vmscan_not_storing);
39829+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
39830 __fscache_uncache_page(cookie, page);
39831 return true;
39832 }
39833@@ -89,11 +89,11 @@ bool __fscache_maybe_release_page(struct
39834 spin_unlock(&cookie->stores_lock);
39835
39836 if (xpage) {
39837- fscache_stat(&fscache_n_store_vmscan_cancelled);
39838- fscache_stat(&fscache_n_store_radix_deletes);
39839+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
39840+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
39841 ASSERTCMP(xpage, ==, page);
39842 } else {
39843- fscache_stat(&fscache_n_store_vmscan_gone);
39844+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
39845 }
39846
39847 wake_up_bit(&cookie->flags, 0);
39848@@ -106,7 +106,7 @@ page_busy:
39849 /* we might want to wait here, but that could deadlock the allocator as
39850 * the slow-work threads writing to the cache may all end up sleeping
39851 * on memory allocation */
39852- fscache_stat(&fscache_n_store_vmscan_busy);
39853+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
39854 return false;
39855 }
39856 EXPORT_SYMBOL(__fscache_maybe_release_page);
39857@@ -130,7 +130,7 @@ static void fscache_end_page_write(struc
39858 FSCACHE_COOKIE_STORING_TAG);
39859 if (!radix_tree_tag_get(&cookie->stores, page->index,
39860 FSCACHE_COOKIE_PENDING_TAG)) {
39861- fscache_stat(&fscache_n_store_radix_deletes);
39862+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
39863 xpage = radix_tree_delete(&cookie->stores, page->index);
39864 }
39865 spin_unlock(&cookie->stores_lock);
39866@@ -151,7 +151,7 @@ static void fscache_attr_changed_op(stru
39867
39868 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
39869
39870- fscache_stat(&fscache_n_attr_changed_calls);
39871+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
39872
39873 if (fscache_object_is_active(object)) {
39874 fscache_set_op_state(op, "CallFS");
39875@@ -178,11 +178,11 @@ int __fscache_attr_changed(struct fscach
39876
39877 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
39878
39879- fscache_stat(&fscache_n_attr_changed);
39880+ fscache_stat_unchecked(&fscache_n_attr_changed);
39881
39882 op = kzalloc(sizeof(*op), GFP_KERNEL);
39883 if (!op) {
39884- fscache_stat(&fscache_n_attr_changed_nomem);
39885+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
39886 _leave(" = -ENOMEM");
39887 return -ENOMEM;
39888 }
39889@@ -202,7 +202,7 @@ int __fscache_attr_changed(struct fscach
39890 if (fscache_submit_exclusive_op(object, op) < 0)
39891 goto nobufs;
39892 spin_unlock(&cookie->lock);
39893- fscache_stat(&fscache_n_attr_changed_ok);
39894+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
39895 fscache_put_operation(op);
39896 _leave(" = 0");
39897 return 0;
39898@@ -210,7 +210,7 @@ int __fscache_attr_changed(struct fscach
39899 nobufs:
39900 spin_unlock(&cookie->lock);
39901 kfree(op);
39902- fscache_stat(&fscache_n_attr_changed_nobufs);
39903+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
39904 _leave(" = %d", -ENOBUFS);
39905 return -ENOBUFS;
39906 }
39907@@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache
39908 /* allocate a retrieval operation and attempt to submit it */
39909 op = kzalloc(sizeof(*op), GFP_NOIO);
39910 if (!op) {
39911- fscache_stat(&fscache_n_retrievals_nomem);
39912+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
39913 return NULL;
39914 }
39915
39916@@ -294,13 +294,13 @@ static int fscache_wait_for_deferred_loo
39917 return 0;
39918 }
39919
39920- fscache_stat(&fscache_n_retrievals_wait);
39921+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
39922
39923 jif = jiffies;
39924 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
39925 fscache_wait_bit_interruptible,
39926 TASK_INTERRUPTIBLE) != 0) {
39927- fscache_stat(&fscache_n_retrievals_intr);
39928+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
39929 _leave(" = -ERESTARTSYS");
39930 return -ERESTARTSYS;
39931 }
39932@@ -318,8 +318,8 @@ static int fscache_wait_for_deferred_loo
39933 */
39934 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
39935 struct fscache_retrieval *op,
39936- atomic_t *stat_op_waits,
39937- atomic_t *stat_object_dead)
39938+ atomic_unchecked_t *stat_op_waits,
39939+ atomic_unchecked_t *stat_object_dead)
39940 {
39941 int ret;
39942
39943@@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_ac
39944 goto check_if_dead;
39945
39946 _debug(">>> WT");
39947- fscache_stat(stat_op_waits);
39948+ fscache_stat_unchecked(stat_op_waits);
39949 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
39950 fscache_wait_bit_interruptible,
39951 TASK_INTERRUPTIBLE) < 0) {
39952@@ -344,7 +344,7 @@ static int fscache_wait_for_retrieval_ac
39953
39954 check_if_dead:
39955 if (unlikely(fscache_object_is_dead(object))) {
39956- fscache_stat(stat_object_dead);
39957+ fscache_stat_unchecked(stat_object_dead);
39958 return -ENOBUFS;
39959 }
39960 return 0;
39961@@ -371,7 +371,7 @@ int __fscache_read_or_alloc_page(struct
39962
39963 _enter("%p,%p,,,", cookie, page);
39964
39965- fscache_stat(&fscache_n_retrievals);
39966+ fscache_stat_unchecked(&fscache_n_retrievals);
39967
39968 if (hlist_empty(&cookie->backing_objects))
39969 goto nobufs;
39970@@ -405,7 +405,7 @@ int __fscache_read_or_alloc_page(struct
39971 goto nobufs_unlock;
39972 spin_unlock(&cookie->lock);
39973
39974- fscache_stat(&fscache_n_retrieval_ops);
39975+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
39976
39977 /* pin the netfs read context in case we need to do the actual netfs
39978 * read because we've encountered a cache read failure */
39979@@ -435,15 +435,15 @@ int __fscache_read_or_alloc_page(struct
39980
39981 error:
39982 if (ret == -ENOMEM)
39983- fscache_stat(&fscache_n_retrievals_nomem);
39984+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
39985 else if (ret == -ERESTARTSYS)
39986- fscache_stat(&fscache_n_retrievals_intr);
39987+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
39988 else if (ret == -ENODATA)
39989- fscache_stat(&fscache_n_retrievals_nodata);
39990+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
39991 else if (ret < 0)
39992- fscache_stat(&fscache_n_retrievals_nobufs);
39993+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
39994 else
39995- fscache_stat(&fscache_n_retrievals_ok);
39996+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
39997
39998 fscache_put_retrieval(op);
39999 _leave(" = %d", ret);
40000@@ -453,7 +453,7 @@ nobufs_unlock:
40001 spin_unlock(&cookie->lock);
40002 kfree(op);
40003 nobufs:
40004- fscache_stat(&fscache_n_retrievals_nobufs);
40005+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
40006 _leave(" = -ENOBUFS");
40007 return -ENOBUFS;
40008 }
40009@@ -491,7 +491,7 @@ int __fscache_read_or_alloc_pages(struct
40010
40011 _enter("%p,,%d,,,", cookie, *nr_pages);
40012
40013- fscache_stat(&fscache_n_retrievals);
40014+ fscache_stat_unchecked(&fscache_n_retrievals);
40015
40016 if (hlist_empty(&cookie->backing_objects))
40017 goto nobufs;
40018@@ -522,7 +522,7 @@ int __fscache_read_or_alloc_pages(struct
40019 goto nobufs_unlock;
40020 spin_unlock(&cookie->lock);
40021
40022- fscache_stat(&fscache_n_retrieval_ops);
40023+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
40024
40025 /* pin the netfs read context in case we need to do the actual netfs
40026 * read because we've encountered a cache read failure */
40027@@ -552,15 +552,15 @@ int __fscache_read_or_alloc_pages(struct
40028
40029 error:
40030 if (ret == -ENOMEM)
40031- fscache_stat(&fscache_n_retrievals_nomem);
40032+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
40033 else if (ret == -ERESTARTSYS)
40034- fscache_stat(&fscache_n_retrievals_intr);
40035+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
40036 else if (ret == -ENODATA)
40037- fscache_stat(&fscache_n_retrievals_nodata);
40038+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
40039 else if (ret < 0)
40040- fscache_stat(&fscache_n_retrievals_nobufs);
40041+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
40042 else
40043- fscache_stat(&fscache_n_retrievals_ok);
40044+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
40045
40046 fscache_put_retrieval(op);
40047 _leave(" = %d", ret);
40048@@ -570,7 +570,7 @@ nobufs_unlock:
40049 spin_unlock(&cookie->lock);
40050 kfree(op);
40051 nobufs:
40052- fscache_stat(&fscache_n_retrievals_nobufs);
40053+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
40054 _leave(" = -ENOBUFS");
40055 return -ENOBUFS;
40056 }
40057@@ -594,7 +594,7 @@ int __fscache_alloc_page(struct fscache_
40058
40059 _enter("%p,%p,,,", cookie, page);
40060
40061- fscache_stat(&fscache_n_allocs);
40062+ fscache_stat_unchecked(&fscache_n_allocs);
40063
40064 if (hlist_empty(&cookie->backing_objects))
40065 goto nobufs;
40066@@ -621,7 +621,7 @@ int __fscache_alloc_page(struct fscache_
40067 goto nobufs_unlock;
40068 spin_unlock(&cookie->lock);
40069
40070- fscache_stat(&fscache_n_alloc_ops);
40071+ fscache_stat_unchecked(&fscache_n_alloc_ops);
40072
40073 ret = fscache_wait_for_retrieval_activation(
40074 object, op,
40075@@ -637,11 +637,11 @@ int __fscache_alloc_page(struct fscache_
40076
40077 error:
40078 if (ret == -ERESTARTSYS)
40079- fscache_stat(&fscache_n_allocs_intr);
40080+ fscache_stat_unchecked(&fscache_n_allocs_intr);
40081 else if (ret < 0)
40082- fscache_stat(&fscache_n_allocs_nobufs);
40083+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
40084 else
40085- fscache_stat(&fscache_n_allocs_ok);
40086+ fscache_stat_unchecked(&fscache_n_allocs_ok);
40087
40088 fscache_put_retrieval(op);
40089 _leave(" = %d", ret);
40090@@ -651,7 +651,7 @@ nobufs_unlock:
40091 spin_unlock(&cookie->lock);
40092 kfree(op);
40093 nobufs:
40094- fscache_stat(&fscache_n_allocs_nobufs);
40095+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
40096 _leave(" = -ENOBUFS");
40097 return -ENOBUFS;
40098 }
40099@@ -694,7 +694,7 @@ static void fscache_write_op(struct fsca
40100
40101 spin_lock(&cookie->stores_lock);
40102
40103- fscache_stat(&fscache_n_store_calls);
40104+ fscache_stat_unchecked(&fscache_n_store_calls);
40105
40106 /* find a page to store */
40107 page = NULL;
40108@@ -705,7 +705,7 @@ static void fscache_write_op(struct fsca
40109 page = results[0];
40110 _debug("gang %d [%lx]", n, page->index);
40111 if (page->index > op->store_limit) {
40112- fscache_stat(&fscache_n_store_pages_over_limit);
40113+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
40114 goto superseded;
40115 }
40116
40117@@ -721,7 +721,7 @@ static void fscache_write_op(struct fsca
40118
40119 if (page) {
40120 fscache_set_op_state(&op->op, "Store");
40121- fscache_stat(&fscache_n_store_pages);
40122+ fscache_stat_unchecked(&fscache_n_store_pages);
40123 fscache_stat(&fscache_n_cop_write_page);
40124 ret = object->cache->ops->write_page(op, page);
40125 fscache_stat_d(&fscache_n_cop_write_page);
40126@@ -792,7 +792,7 @@ int __fscache_write_page(struct fscache_
40127 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
40128 ASSERT(PageFsCache(page));
40129
40130- fscache_stat(&fscache_n_stores);
40131+ fscache_stat_unchecked(&fscache_n_stores);
40132
40133 op = kzalloc(sizeof(*op), GFP_NOIO);
40134 if (!op)
40135@@ -844,7 +844,7 @@ int __fscache_write_page(struct fscache_
40136 spin_unlock(&cookie->stores_lock);
40137 spin_unlock(&object->lock);
40138
40139- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
40140+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
40141 op->store_limit = object->store_limit;
40142
40143 if (fscache_submit_op(object, &op->op) < 0)
40144@@ -852,8 +852,8 @@ int __fscache_write_page(struct fscache_
40145
40146 spin_unlock(&cookie->lock);
40147 radix_tree_preload_end();
40148- fscache_stat(&fscache_n_store_ops);
40149- fscache_stat(&fscache_n_stores_ok);
40150+ fscache_stat_unchecked(&fscache_n_store_ops);
40151+ fscache_stat_unchecked(&fscache_n_stores_ok);
40152
40153 /* the slow work queue now carries its own ref on the object */
40154 fscache_put_operation(&op->op);
40155@@ -861,14 +861,14 @@ int __fscache_write_page(struct fscache_
40156 return 0;
40157
40158 already_queued:
40159- fscache_stat(&fscache_n_stores_again);
40160+ fscache_stat_unchecked(&fscache_n_stores_again);
40161 already_pending:
40162 spin_unlock(&cookie->stores_lock);
40163 spin_unlock(&object->lock);
40164 spin_unlock(&cookie->lock);
40165 radix_tree_preload_end();
40166 kfree(op);
40167- fscache_stat(&fscache_n_stores_ok);
40168+ fscache_stat_unchecked(&fscache_n_stores_ok);
40169 _leave(" = 0");
40170 return 0;
40171
40172@@ -886,14 +886,14 @@ nobufs:
40173 spin_unlock(&cookie->lock);
40174 radix_tree_preload_end();
40175 kfree(op);
40176- fscache_stat(&fscache_n_stores_nobufs);
40177+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
40178 _leave(" = -ENOBUFS");
40179 return -ENOBUFS;
40180
40181 nomem_free:
40182 kfree(op);
40183 nomem:
40184- fscache_stat(&fscache_n_stores_oom);
40185+ fscache_stat_unchecked(&fscache_n_stores_oom);
40186 _leave(" = -ENOMEM");
40187 return -ENOMEM;
40188 }
40189@@ -911,7 +911,7 @@ void __fscache_uncache_page(struct fscac
40190 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
40191 ASSERTCMP(page, !=, NULL);
40192
40193- fscache_stat(&fscache_n_uncaches);
40194+ fscache_stat_unchecked(&fscache_n_uncaches);
40195
40196 /* cache withdrawal may beat us to it */
40197 if (!PageFsCache(page))
40198@@ -964,7 +964,7 @@ void fscache_mark_pages_cached(struct fs
40199 unsigned long loop;
40200
40201 #ifdef CONFIG_FSCACHE_STATS
40202- atomic_add(pagevec->nr, &fscache_n_marks);
40203+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
40204 #endif
40205
40206 for (loop = 0; loop < pagevec->nr; loop++) {
40207diff -urNp linux-2.6.32.42/fs/fscache/stats.c linux-2.6.32.42/fs/fscache/stats.c
40208--- linux-2.6.32.42/fs/fscache/stats.c 2011-03-27 14:31:47.000000000 -0400
40209+++ linux-2.6.32.42/fs/fscache/stats.c 2011-05-04 17:56:28.000000000 -0400
40210@@ -18,95 +18,95 @@
40211 /*
40212 * operation counters
40213 */
40214-atomic_t fscache_n_op_pend;
40215-atomic_t fscache_n_op_run;
40216-atomic_t fscache_n_op_enqueue;
40217-atomic_t fscache_n_op_requeue;
40218-atomic_t fscache_n_op_deferred_release;
40219-atomic_t fscache_n_op_release;
40220-atomic_t fscache_n_op_gc;
40221-atomic_t fscache_n_op_cancelled;
40222-atomic_t fscache_n_op_rejected;
40223-
40224-atomic_t fscache_n_attr_changed;
40225-atomic_t fscache_n_attr_changed_ok;
40226-atomic_t fscache_n_attr_changed_nobufs;
40227-atomic_t fscache_n_attr_changed_nomem;
40228-atomic_t fscache_n_attr_changed_calls;
40229-
40230-atomic_t fscache_n_allocs;
40231-atomic_t fscache_n_allocs_ok;
40232-atomic_t fscache_n_allocs_wait;
40233-atomic_t fscache_n_allocs_nobufs;
40234-atomic_t fscache_n_allocs_intr;
40235-atomic_t fscache_n_allocs_object_dead;
40236-atomic_t fscache_n_alloc_ops;
40237-atomic_t fscache_n_alloc_op_waits;
40238-
40239-atomic_t fscache_n_retrievals;
40240-atomic_t fscache_n_retrievals_ok;
40241-atomic_t fscache_n_retrievals_wait;
40242-atomic_t fscache_n_retrievals_nodata;
40243-atomic_t fscache_n_retrievals_nobufs;
40244-atomic_t fscache_n_retrievals_intr;
40245-atomic_t fscache_n_retrievals_nomem;
40246-atomic_t fscache_n_retrievals_object_dead;
40247-atomic_t fscache_n_retrieval_ops;
40248-atomic_t fscache_n_retrieval_op_waits;
40249-
40250-atomic_t fscache_n_stores;
40251-atomic_t fscache_n_stores_ok;
40252-atomic_t fscache_n_stores_again;
40253-atomic_t fscache_n_stores_nobufs;
40254-atomic_t fscache_n_stores_oom;
40255-atomic_t fscache_n_store_ops;
40256-atomic_t fscache_n_store_calls;
40257-atomic_t fscache_n_store_pages;
40258-atomic_t fscache_n_store_radix_deletes;
40259-atomic_t fscache_n_store_pages_over_limit;
40260-
40261-atomic_t fscache_n_store_vmscan_not_storing;
40262-atomic_t fscache_n_store_vmscan_gone;
40263-atomic_t fscache_n_store_vmscan_busy;
40264-atomic_t fscache_n_store_vmscan_cancelled;
40265-
40266-atomic_t fscache_n_marks;
40267-atomic_t fscache_n_uncaches;
40268-
40269-atomic_t fscache_n_acquires;
40270-atomic_t fscache_n_acquires_null;
40271-atomic_t fscache_n_acquires_no_cache;
40272-atomic_t fscache_n_acquires_ok;
40273-atomic_t fscache_n_acquires_nobufs;
40274-atomic_t fscache_n_acquires_oom;
40275-
40276-atomic_t fscache_n_updates;
40277-atomic_t fscache_n_updates_null;
40278-atomic_t fscache_n_updates_run;
40279-
40280-atomic_t fscache_n_relinquishes;
40281-atomic_t fscache_n_relinquishes_null;
40282-atomic_t fscache_n_relinquishes_waitcrt;
40283-atomic_t fscache_n_relinquishes_retire;
40284-
40285-atomic_t fscache_n_cookie_index;
40286-atomic_t fscache_n_cookie_data;
40287-atomic_t fscache_n_cookie_special;
40288-
40289-atomic_t fscache_n_object_alloc;
40290-atomic_t fscache_n_object_no_alloc;
40291-atomic_t fscache_n_object_lookups;
40292-atomic_t fscache_n_object_lookups_negative;
40293-atomic_t fscache_n_object_lookups_positive;
40294-atomic_t fscache_n_object_lookups_timed_out;
40295-atomic_t fscache_n_object_created;
40296-atomic_t fscache_n_object_avail;
40297-atomic_t fscache_n_object_dead;
40298-
40299-atomic_t fscache_n_checkaux_none;
40300-atomic_t fscache_n_checkaux_okay;
40301-atomic_t fscache_n_checkaux_update;
40302-atomic_t fscache_n_checkaux_obsolete;
40303+atomic_unchecked_t fscache_n_op_pend;
40304+atomic_unchecked_t fscache_n_op_run;
40305+atomic_unchecked_t fscache_n_op_enqueue;
40306+atomic_unchecked_t fscache_n_op_requeue;
40307+atomic_unchecked_t fscache_n_op_deferred_release;
40308+atomic_unchecked_t fscache_n_op_release;
40309+atomic_unchecked_t fscache_n_op_gc;
40310+atomic_unchecked_t fscache_n_op_cancelled;
40311+atomic_unchecked_t fscache_n_op_rejected;
40312+
40313+atomic_unchecked_t fscache_n_attr_changed;
40314+atomic_unchecked_t fscache_n_attr_changed_ok;
40315+atomic_unchecked_t fscache_n_attr_changed_nobufs;
40316+atomic_unchecked_t fscache_n_attr_changed_nomem;
40317+atomic_unchecked_t fscache_n_attr_changed_calls;
40318+
40319+atomic_unchecked_t fscache_n_allocs;
40320+atomic_unchecked_t fscache_n_allocs_ok;
40321+atomic_unchecked_t fscache_n_allocs_wait;
40322+atomic_unchecked_t fscache_n_allocs_nobufs;
40323+atomic_unchecked_t fscache_n_allocs_intr;
40324+atomic_unchecked_t fscache_n_allocs_object_dead;
40325+atomic_unchecked_t fscache_n_alloc_ops;
40326+atomic_unchecked_t fscache_n_alloc_op_waits;
40327+
40328+atomic_unchecked_t fscache_n_retrievals;
40329+atomic_unchecked_t fscache_n_retrievals_ok;
40330+atomic_unchecked_t fscache_n_retrievals_wait;
40331+atomic_unchecked_t fscache_n_retrievals_nodata;
40332+atomic_unchecked_t fscache_n_retrievals_nobufs;
40333+atomic_unchecked_t fscache_n_retrievals_intr;
40334+atomic_unchecked_t fscache_n_retrievals_nomem;
40335+atomic_unchecked_t fscache_n_retrievals_object_dead;
40336+atomic_unchecked_t fscache_n_retrieval_ops;
40337+atomic_unchecked_t fscache_n_retrieval_op_waits;
40338+
40339+atomic_unchecked_t fscache_n_stores;
40340+atomic_unchecked_t fscache_n_stores_ok;
40341+atomic_unchecked_t fscache_n_stores_again;
40342+atomic_unchecked_t fscache_n_stores_nobufs;
40343+atomic_unchecked_t fscache_n_stores_oom;
40344+atomic_unchecked_t fscache_n_store_ops;
40345+atomic_unchecked_t fscache_n_store_calls;
40346+atomic_unchecked_t fscache_n_store_pages;
40347+atomic_unchecked_t fscache_n_store_radix_deletes;
40348+atomic_unchecked_t fscache_n_store_pages_over_limit;
40349+
40350+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
40351+atomic_unchecked_t fscache_n_store_vmscan_gone;
40352+atomic_unchecked_t fscache_n_store_vmscan_busy;
40353+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
40354+
40355+atomic_unchecked_t fscache_n_marks;
40356+atomic_unchecked_t fscache_n_uncaches;
40357+
40358+atomic_unchecked_t fscache_n_acquires;
40359+atomic_unchecked_t fscache_n_acquires_null;
40360+atomic_unchecked_t fscache_n_acquires_no_cache;
40361+atomic_unchecked_t fscache_n_acquires_ok;
40362+atomic_unchecked_t fscache_n_acquires_nobufs;
40363+atomic_unchecked_t fscache_n_acquires_oom;
40364+
40365+atomic_unchecked_t fscache_n_updates;
40366+atomic_unchecked_t fscache_n_updates_null;
40367+atomic_unchecked_t fscache_n_updates_run;
40368+
40369+atomic_unchecked_t fscache_n_relinquishes;
40370+atomic_unchecked_t fscache_n_relinquishes_null;
40371+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
40372+atomic_unchecked_t fscache_n_relinquishes_retire;
40373+
40374+atomic_unchecked_t fscache_n_cookie_index;
40375+atomic_unchecked_t fscache_n_cookie_data;
40376+atomic_unchecked_t fscache_n_cookie_special;
40377+
40378+atomic_unchecked_t fscache_n_object_alloc;
40379+atomic_unchecked_t fscache_n_object_no_alloc;
40380+atomic_unchecked_t fscache_n_object_lookups;
40381+atomic_unchecked_t fscache_n_object_lookups_negative;
40382+atomic_unchecked_t fscache_n_object_lookups_positive;
40383+atomic_unchecked_t fscache_n_object_lookups_timed_out;
40384+atomic_unchecked_t fscache_n_object_created;
40385+atomic_unchecked_t fscache_n_object_avail;
40386+atomic_unchecked_t fscache_n_object_dead;
40387+
40388+atomic_unchecked_t fscache_n_checkaux_none;
40389+atomic_unchecked_t fscache_n_checkaux_okay;
40390+atomic_unchecked_t fscache_n_checkaux_update;
40391+atomic_unchecked_t fscache_n_checkaux_obsolete;
40392
40393 atomic_t fscache_n_cop_alloc_object;
40394 atomic_t fscache_n_cop_lookup_object;
40395@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
40396 seq_puts(m, "FS-Cache statistics\n");
40397
40398 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
40399- atomic_read(&fscache_n_cookie_index),
40400- atomic_read(&fscache_n_cookie_data),
40401- atomic_read(&fscache_n_cookie_special));
40402+ atomic_read_unchecked(&fscache_n_cookie_index),
40403+ atomic_read_unchecked(&fscache_n_cookie_data),
40404+ atomic_read_unchecked(&fscache_n_cookie_special));
40405
40406 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
40407- atomic_read(&fscache_n_object_alloc),
40408- atomic_read(&fscache_n_object_no_alloc),
40409- atomic_read(&fscache_n_object_avail),
40410- atomic_read(&fscache_n_object_dead));
40411+ atomic_read_unchecked(&fscache_n_object_alloc),
40412+ atomic_read_unchecked(&fscache_n_object_no_alloc),
40413+ atomic_read_unchecked(&fscache_n_object_avail),
40414+ atomic_read_unchecked(&fscache_n_object_dead));
40415 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
40416- atomic_read(&fscache_n_checkaux_none),
40417- atomic_read(&fscache_n_checkaux_okay),
40418- atomic_read(&fscache_n_checkaux_update),
40419- atomic_read(&fscache_n_checkaux_obsolete));
40420+ atomic_read_unchecked(&fscache_n_checkaux_none),
40421+ atomic_read_unchecked(&fscache_n_checkaux_okay),
40422+ atomic_read_unchecked(&fscache_n_checkaux_update),
40423+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
40424
40425 seq_printf(m, "Pages : mrk=%u unc=%u\n",
40426- atomic_read(&fscache_n_marks),
40427- atomic_read(&fscache_n_uncaches));
40428+ atomic_read_unchecked(&fscache_n_marks),
40429+ atomic_read_unchecked(&fscache_n_uncaches));
40430
40431 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
40432 " oom=%u\n",
40433- atomic_read(&fscache_n_acquires),
40434- atomic_read(&fscache_n_acquires_null),
40435- atomic_read(&fscache_n_acquires_no_cache),
40436- atomic_read(&fscache_n_acquires_ok),
40437- atomic_read(&fscache_n_acquires_nobufs),
40438- atomic_read(&fscache_n_acquires_oom));
40439+ atomic_read_unchecked(&fscache_n_acquires),
40440+ atomic_read_unchecked(&fscache_n_acquires_null),
40441+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
40442+ atomic_read_unchecked(&fscache_n_acquires_ok),
40443+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
40444+ atomic_read_unchecked(&fscache_n_acquires_oom));
40445
40446 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
40447- atomic_read(&fscache_n_object_lookups),
40448- atomic_read(&fscache_n_object_lookups_negative),
40449- atomic_read(&fscache_n_object_lookups_positive),
40450- atomic_read(&fscache_n_object_lookups_timed_out),
40451- atomic_read(&fscache_n_object_created));
40452+ atomic_read_unchecked(&fscache_n_object_lookups),
40453+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
40454+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
40455+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out),
40456+ atomic_read_unchecked(&fscache_n_object_created));
40457
40458 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
40459- atomic_read(&fscache_n_updates),
40460- atomic_read(&fscache_n_updates_null),
40461- atomic_read(&fscache_n_updates_run));
40462+ atomic_read_unchecked(&fscache_n_updates),
40463+ atomic_read_unchecked(&fscache_n_updates_null),
40464+ atomic_read_unchecked(&fscache_n_updates_run));
40465
40466 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
40467- atomic_read(&fscache_n_relinquishes),
40468- atomic_read(&fscache_n_relinquishes_null),
40469- atomic_read(&fscache_n_relinquishes_waitcrt),
40470- atomic_read(&fscache_n_relinquishes_retire));
40471+ atomic_read_unchecked(&fscache_n_relinquishes),
40472+ atomic_read_unchecked(&fscache_n_relinquishes_null),
40473+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
40474+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
40475
40476 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
40477- atomic_read(&fscache_n_attr_changed),
40478- atomic_read(&fscache_n_attr_changed_ok),
40479- atomic_read(&fscache_n_attr_changed_nobufs),
40480- atomic_read(&fscache_n_attr_changed_nomem),
40481- atomic_read(&fscache_n_attr_changed_calls));
40482+ atomic_read_unchecked(&fscache_n_attr_changed),
40483+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
40484+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
40485+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
40486+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
40487
40488 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
40489- atomic_read(&fscache_n_allocs),
40490- atomic_read(&fscache_n_allocs_ok),
40491- atomic_read(&fscache_n_allocs_wait),
40492- atomic_read(&fscache_n_allocs_nobufs),
40493- atomic_read(&fscache_n_allocs_intr));
40494+ atomic_read_unchecked(&fscache_n_allocs),
40495+ atomic_read_unchecked(&fscache_n_allocs_ok),
40496+ atomic_read_unchecked(&fscache_n_allocs_wait),
40497+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
40498+ atomic_read_unchecked(&fscache_n_allocs_intr));
40499 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
40500- atomic_read(&fscache_n_alloc_ops),
40501- atomic_read(&fscache_n_alloc_op_waits),
40502- atomic_read(&fscache_n_allocs_object_dead));
40503+ atomic_read_unchecked(&fscache_n_alloc_ops),
40504+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
40505+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
40506
40507 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
40508 " int=%u oom=%u\n",
40509- atomic_read(&fscache_n_retrievals),
40510- atomic_read(&fscache_n_retrievals_ok),
40511- atomic_read(&fscache_n_retrievals_wait),
40512- atomic_read(&fscache_n_retrievals_nodata),
40513- atomic_read(&fscache_n_retrievals_nobufs),
40514- atomic_read(&fscache_n_retrievals_intr),
40515- atomic_read(&fscache_n_retrievals_nomem));
40516+ atomic_read_unchecked(&fscache_n_retrievals),
40517+ atomic_read_unchecked(&fscache_n_retrievals_ok),
40518+ atomic_read_unchecked(&fscache_n_retrievals_wait),
40519+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
40520+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
40521+ atomic_read_unchecked(&fscache_n_retrievals_intr),
40522+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
40523 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
40524- atomic_read(&fscache_n_retrieval_ops),
40525- atomic_read(&fscache_n_retrieval_op_waits),
40526- atomic_read(&fscache_n_retrievals_object_dead));
40527+ atomic_read_unchecked(&fscache_n_retrieval_ops),
40528+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
40529+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
40530
40531 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
40532- atomic_read(&fscache_n_stores),
40533- atomic_read(&fscache_n_stores_ok),
40534- atomic_read(&fscache_n_stores_again),
40535- atomic_read(&fscache_n_stores_nobufs),
40536- atomic_read(&fscache_n_stores_oom));
40537+ atomic_read_unchecked(&fscache_n_stores),
40538+ atomic_read_unchecked(&fscache_n_stores_ok),
40539+ atomic_read_unchecked(&fscache_n_stores_again),
40540+ atomic_read_unchecked(&fscache_n_stores_nobufs),
40541+ atomic_read_unchecked(&fscache_n_stores_oom));
40542 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
40543- atomic_read(&fscache_n_store_ops),
40544- atomic_read(&fscache_n_store_calls),
40545- atomic_read(&fscache_n_store_pages),
40546- atomic_read(&fscache_n_store_radix_deletes),
40547- atomic_read(&fscache_n_store_pages_over_limit));
40548+ atomic_read_unchecked(&fscache_n_store_ops),
40549+ atomic_read_unchecked(&fscache_n_store_calls),
40550+ atomic_read_unchecked(&fscache_n_store_pages),
40551+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
40552+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
40553
40554 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
40555- atomic_read(&fscache_n_store_vmscan_not_storing),
40556- atomic_read(&fscache_n_store_vmscan_gone),
40557- atomic_read(&fscache_n_store_vmscan_busy),
40558- atomic_read(&fscache_n_store_vmscan_cancelled));
40559+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
40560+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
40561+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
40562+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
40563
40564 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
40565- atomic_read(&fscache_n_op_pend),
40566- atomic_read(&fscache_n_op_run),
40567- atomic_read(&fscache_n_op_enqueue),
40568- atomic_read(&fscache_n_op_cancelled),
40569- atomic_read(&fscache_n_op_rejected));
40570+ atomic_read_unchecked(&fscache_n_op_pend),
40571+ atomic_read_unchecked(&fscache_n_op_run),
40572+ atomic_read_unchecked(&fscache_n_op_enqueue),
40573+ atomic_read_unchecked(&fscache_n_op_cancelled),
40574+ atomic_read_unchecked(&fscache_n_op_rejected));
40575 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
40576- atomic_read(&fscache_n_op_deferred_release),
40577- atomic_read(&fscache_n_op_release),
40578- atomic_read(&fscache_n_op_gc));
40579+ atomic_read_unchecked(&fscache_n_op_deferred_release),
40580+ atomic_read_unchecked(&fscache_n_op_release),
40581+ atomic_read_unchecked(&fscache_n_op_gc));
40582
40583 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
40584 atomic_read(&fscache_n_cop_alloc_object),
40585diff -urNp linux-2.6.32.42/fs/fs_struct.c linux-2.6.32.42/fs/fs_struct.c
40586--- linux-2.6.32.42/fs/fs_struct.c 2011-03-27 14:31:47.000000000 -0400
40587+++ linux-2.6.32.42/fs/fs_struct.c 2011-04-17 15:56:46.000000000 -0400
40588@@ -4,6 +4,7 @@
40589 #include <linux/path.h>
40590 #include <linux/slab.h>
40591 #include <linux/fs_struct.h>
40592+#include <linux/grsecurity.h>
40593
40594 /*
40595 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
40596@@ -17,6 +18,7 @@ void set_fs_root(struct fs_struct *fs, s
40597 old_root = fs->root;
40598 fs->root = *path;
40599 path_get(path);
40600+ gr_set_chroot_entries(current, path);
40601 write_unlock(&fs->lock);
40602 if (old_root.dentry)
40603 path_put(&old_root);
40604@@ -56,6 +58,7 @@ void chroot_fs_refs(struct path *old_roo
40605 && fs->root.mnt == old_root->mnt) {
40606 path_get(new_root);
40607 fs->root = *new_root;
40608+ gr_set_chroot_entries(p, new_root);
40609 count++;
40610 }
40611 if (fs->pwd.dentry == old_root->dentry
40612@@ -89,7 +92,8 @@ void exit_fs(struct task_struct *tsk)
40613 task_lock(tsk);
40614 write_lock(&fs->lock);
40615 tsk->fs = NULL;
40616- kill = !--fs->users;
40617+ gr_clear_chroot_entries(tsk);
40618+ kill = !atomic_dec_return(&fs->users);
40619 write_unlock(&fs->lock);
40620 task_unlock(tsk);
40621 if (kill)
40622@@ -102,7 +106,7 @@ struct fs_struct *copy_fs_struct(struct
40623 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
40624 /* We don't need to lock fs - think why ;-) */
40625 if (fs) {
40626- fs->users = 1;
40627+ atomic_set(&fs->users, 1);
40628 fs->in_exec = 0;
40629 rwlock_init(&fs->lock);
40630 fs->umask = old->umask;
40631@@ -127,8 +131,9 @@ int unshare_fs_struct(void)
40632
40633 task_lock(current);
40634 write_lock(&fs->lock);
40635- kill = !--fs->users;
40636+ kill = !atomic_dec_return(&fs->users);
40637 current->fs = new_fs;
40638+ gr_set_chroot_entries(current, &new_fs->root);
40639 write_unlock(&fs->lock);
40640 task_unlock(current);
40641
40642@@ -147,7 +152,7 @@ EXPORT_SYMBOL(current_umask);
40643
40644 /* to be mentioned only in INIT_TASK */
40645 struct fs_struct init_fs = {
40646- .users = 1,
40647+ .users = ATOMIC_INIT(1),
40648 .lock = __RW_LOCK_UNLOCKED(init_fs.lock),
40649 .umask = 0022,
40650 };
40651@@ -162,12 +167,13 @@ void daemonize_fs_struct(void)
40652 task_lock(current);
40653
40654 write_lock(&init_fs.lock);
40655- init_fs.users++;
40656+ atomic_inc(&init_fs.users);
40657 write_unlock(&init_fs.lock);
40658
40659 write_lock(&fs->lock);
40660 current->fs = &init_fs;
40661- kill = !--fs->users;
40662+ gr_set_chroot_entries(current, &current->fs->root);
40663+ kill = !atomic_dec_return(&fs->users);
40664 write_unlock(&fs->lock);
40665
40666 task_unlock(current);
40667diff -urNp linux-2.6.32.42/fs/fuse/cuse.c linux-2.6.32.42/fs/fuse/cuse.c
40668--- linux-2.6.32.42/fs/fuse/cuse.c 2011-03-27 14:31:47.000000000 -0400
40669+++ linux-2.6.32.42/fs/fuse/cuse.c 2011-04-17 15:56:46.000000000 -0400
40670@@ -528,8 +528,18 @@ static int cuse_channel_release(struct i
40671 return rc;
40672 }
40673
40674-static struct file_operations cuse_channel_fops; /* initialized during init */
40675-
40676+static const struct file_operations cuse_channel_fops = { /* initialized during init */
40677+ .owner = THIS_MODULE,
40678+ .llseek = no_llseek,
40679+ .read = do_sync_read,
40680+ .aio_read = fuse_dev_read,
40681+ .write = do_sync_write,
40682+ .aio_write = fuse_dev_write,
40683+ .poll = fuse_dev_poll,
40684+ .open = cuse_channel_open,
40685+ .release = cuse_channel_release,
40686+ .fasync = fuse_dev_fasync,
40687+};
40688
40689 /**************************************************************************
40690 * Misc stuff and module initializatiion
40691@@ -575,12 +585,6 @@ static int __init cuse_init(void)
40692 for (i = 0; i < CUSE_CONNTBL_LEN; i++)
40693 INIT_LIST_HEAD(&cuse_conntbl[i]);
40694
40695- /* inherit and extend fuse_dev_operations */
40696- cuse_channel_fops = fuse_dev_operations;
40697- cuse_channel_fops.owner = THIS_MODULE;
40698- cuse_channel_fops.open = cuse_channel_open;
40699- cuse_channel_fops.release = cuse_channel_release;
40700-
40701 cuse_class = class_create(THIS_MODULE, "cuse");
40702 if (IS_ERR(cuse_class))
40703 return PTR_ERR(cuse_class);
40704diff -urNp linux-2.6.32.42/fs/fuse/dev.c linux-2.6.32.42/fs/fuse/dev.c
40705--- linux-2.6.32.42/fs/fuse/dev.c 2011-03-27 14:31:47.000000000 -0400
40706+++ linux-2.6.32.42/fs/fuse/dev.c 2011-04-17 15:56:46.000000000 -0400
40707@@ -745,7 +745,7 @@ __releases(&fc->lock)
40708 * request_end(). Otherwise add it to the processing list, and set
40709 * the 'sent' flag.
40710 */
40711-static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
40712+ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
40713 unsigned long nr_segs, loff_t pos)
40714 {
40715 int err;
40716@@ -827,6 +827,7 @@ static ssize_t fuse_dev_read(struct kioc
40717 spin_unlock(&fc->lock);
40718 return err;
40719 }
40720+EXPORT_SYMBOL_GPL(fuse_dev_read);
40721
40722 static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
40723 struct fuse_copy_state *cs)
40724@@ -885,7 +886,7 @@ static int fuse_notify_inval_entry(struc
40725 {
40726 struct fuse_notify_inval_entry_out outarg;
40727 int err = -EINVAL;
40728- char buf[FUSE_NAME_MAX+1];
40729+ char *buf = NULL;
40730 struct qstr name;
40731
40732 if (size < sizeof(outarg))
40733@@ -899,6 +900,11 @@ static int fuse_notify_inval_entry(struc
40734 if (outarg.namelen > FUSE_NAME_MAX)
40735 goto err;
40736
40737+ err = -ENOMEM;
40738+ buf = kmalloc(FUSE_NAME_MAX+1, GFP_KERNEL);
40739+ if (!buf)
40740+ goto err;
40741+
40742 name.name = buf;
40743 name.len = outarg.namelen;
40744 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
40745@@ -910,17 +916,15 @@ static int fuse_notify_inval_entry(struc
40746
40747 down_read(&fc->killsb);
40748 err = -ENOENT;
40749- if (!fc->sb)
40750- goto err_unlock;
40751-
40752- err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
40753-
40754-err_unlock:
40755+ if (fc->sb)
40756+ err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
40757 up_read(&fc->killsb);
40758+ kfree(buf);
40759 return err;
40760
40761 err:
40762 fuse_copy_finish(cs);
40763+ kfree(buf);
40764 return err;
40765 }
40766
40767@@ -987,7 +991,7 @@ static int copy_out_args(struct fuse_cop
40768 * it from the list and copy the rest of the buffer to the request.
40769 * The request is finished by calling request_end()
40770 */
40771-static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
40772+ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
40773 unsigned long nr_segs, loff_t pos)
40774 {
40775 int err;
40776@@ -1083,8 +1087,9 @@ static ssize_t fuse_dev_write(struct kio
40777 fuse_copy_finish(&cs);
40778 return err;
40779 }
40780+EXPORT_SYMBOL_GPL(fuse_dev_write);
40781
40782-static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
40783+unsigned fuse_dev_poll(struct file *file, poll_table *wait)
40784 {
40785 unsigned mask = POLLOUT | POLLWRNORM;
40786 struct fuse_conn *fc = fuse_get_conn(file);
40787@@ -1102,6 +1107,7 @@ static unsigned fuse_dev_poll(struct fil
40788
40789 return mask;
40790 }
40791+EXPORT_SYMBOL_GPL(fuse_dev_poll);
40792
40793 /*
40794 * Abort all requests on the given list (pending or processing)
40795@@ -1218,7 +1224,7 @@ int fuse_dev_release(struct inode *inode
40796 }
40797 EXPORT_SYMBOL_GPL(fuse_dev_release);
40798
40799-static int fuse_dev_fasync(int fd, struct file *file, int on)
40800+int fuse_dev_fasync(int fd, struct file *file, int on)
40801 {
40802 struct fuse_conn *fc = fuse_get_conn(file);
40803 if (!fc)
40804@@ -1227,6 +1233,7 @@ static int fuse_dev_fasync(int fd, struc
40805 /* No locking - fasync_helper does its own locking */
40806 return fasync_helper(fd, file, on, &fc->fasync);
40807 }
40808+EXPORT_SYMBOL_GPL(fuse_dev_fasync);
40809
40810 const struct file_operations fuse_dev_operations = {
40811 .owner = THIS_MODULE,
40812diff -urNp linux-2.6.32.42/fs/fuse/dir.c linux-2.6.32.42/fs/fuse/dir.c
40813--- linux-2.6.32.42/fs/fuse/dir.c 2011-03-27 14:31:47.000000000 -0400
40814+++ linux-2.6.32.42/fs/fuse/dir.c 2011-04-17 15:56:46.000000000 -0400
40815@@ -1127,7 +1127,7 @@ static char *read_link(struct dentry *de
40816 return link;
40817 }
40818
40819-static void free_link(char *link)
40820+static void free_link(const char *link)
40821 {
40822 if (!IS_ERR(link))
40823 free_page((unsigned long) link);
40824diff -urNp linux-2.6.32.42/fs/fuse/fuse_i.h linux-2.6.32.42/fs/fuse/fuse_i.h
40825--- linux-2.6.32.42/fs/fuse/fuse_i.h 2011-03-27 14:31:47.000000000 -0400
40826+++ linux-2.6.32.42/fs/fuse/fuse_i.h 2011-04-17 15:56:46.000000000 -0400
40827@@ -525,6 +525,16 @@ extern const struct file_operations fuse
40828
40829 extern const struct dentry_operations fuse_dentry_operations;
40830
40831+extern ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
40832+ unsigned long nr_segs, loff_t pos);
40833+
40834+extern ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
40835+ unsigned long nr_segs, loff_t pos);
40836+
40837+extern unsigned fuse_dev_poll(struct file *file, poll_table *wait);
40838+
40839+extern int fuse_dev_fasync(int fd, struct file *file, int on);
40840+
40841 /**
40842 * Inode to nodeid comparison.
40843 */
40844diff -urNp linux-2.6.32.42/fs/gfs2/ops_inode.c linux-2.6.32.42/fs/gfs2/ops_inode.c
40845--- linux-2.6.32.42/fs/gfs2/ops_inode.c 2011-03-27 14:31:47.000000000 -0400
40846+++ linux-2.6.32.42/fs/gfs2/ops_inode.c 2011-05-16 21:46:57.000000000 -0400
40847@@ -752,6 +752,8 @@ static int gfs2_rename(struct inode *odi
40848 unsigned int x;
40849 int error;
40850
40851+ pax_track_stack();
40852+
40853 if (ndentry->d_inode) {
40854 nip = GFS2_I(ndentry->d_inode);
40855 if (ip == nip)
40856diff -urNp linux-2.6.32.42/fs/gfs2/sys.c linux-2.6.32.42/fs/gfs2/sys.c
40857--- linux-2.6.32.42/fs/gfs2/sys.c 2011-03-27 14:31:47.000000000 -0400
40858+++ linux-2.6.32.42/fs/gfs2/sys.c 2011-04-17 15:56:46.000000000 -0400
40859@@ -49,7 +49,7 @@ static ssize_t gfs2_attr_store(struct ko
40860 return a->store ? a->store(sdp, buf, len) : len;
40861 }
40862
40863-static struct sysfs_ops gfs2_attr_ops = {
40864+static const struct sysfs_ops gfs2_attr_ops = {
40865 .show = gfs2_attr_show,
40866 .store = gfs2_attr_store,
40867 };
40868@@ -584,7 +584,7 @@ static int gfs2_uevent(struct kset *kset
40869 return 0;
40870 }
40871
40872-static struct kset_uevent_ops gfs2_uevent_ops = {
40873+static const struct kset_uevent_ops gfs2_uevent_ops = {
40874 .uevent = gfs2_uevent,
40875 };
40876
40877diff -urNp linux-2.6.32.42/fs/hfsplus/catalog.c linux-2.6.32.42/fs/hfsplus/catalog.c
40878--- linux-2.6.32.42/fs/hfsplus/catalog.c 2011-03-27 14:31:47.000000000 -0400
40879+++ linux-2.6.32.42/fs/hfsplus/catalog.c 2011-05-16 21:46:57.000000000 -0400
40880@@ -157,6 +157,8 @@ int hfsplus_find_cat(struct super_block
40881 int err;
40882 u16 type;
40883
40884+ pax_track_stack();
40885+
40886 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
40887 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
40888 if (err)
40889@@ -186,6 +188,8 @@ int hfsplus_create_cat(u32 cnid, struct
40890 int entry_size;
40891 int err;
40892
40893+ pax_track_stack();
40894+
40895 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink);
40896 sb = dir->i_sb;
40897 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
40898@@ -318,6 +322,8 @@ int hfsplus_rename_cat(u32 cnid,
40899 int entry_size, type;
40900 int err = 0;
40901
40902+ pax_track_stack();
40903+
40904 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name,
40905 dst_dir->i_ino, dst_name->name);
40906 sb = src_dir->i_sb;
40907diff -urNp linux-2.6.32.42/fs/hfsplus/dir.c linux-2.6.32.42/fs/hfsplus/dir.c
40908--- linux-2.6.32.42/fs/hfsplus/dir.c 2011-03-27 14:31:47.000000000 -0400
40909+++ linux-2.6.32.42/fs/hfsplus/dir.c 2011-05-16 21:46:57.000000000 -0400
40910@@ -121,6 +121,8 @@ static int hfsplus_readdir(struct file *
40911 struct hfsplus_readdir_data *rd;
40912 u16 type;
40913
40914+ pax_track_stack();
40915+
40916 if (filp->f_pos >= inode->i_size)
40917 return 0;
40918
40919diff -urNp linux-2.6.32.42/fs/hfsplus/inode.c linux-2.6.32.42/fs/hfsplus/inode.c
40920--- linux-2.6.32.42/fs/hfsplus/inode.c 2011-03-27 14:31:47.000000000 -0400
40921+++ linux-2.6.32.42/fs/hfsplus/inode.c 2011-05-16 21:46:57.000000000 -0400
40922@@ -399,6 +399,8 @@ int hfsplus_cat_read_inode(struct inode
40923 int res = 0;
40924 u16 type;
40925
40926+ pax_track_stack();
40927+
40928 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
40929
40930 HFSPLUS_I(inode).dev = 0;
40931@@ -461,6 +463,8 @@ int hfsplus_cat_write_inode(struct inode
40932 struct hfs_find_data fd;
40933 hfsplus_cat_entry entry;
40934
40935+ pax_track_stack();
40936+
40937 if (HFSPLUS_IS_RSRC(inode))
40938 main_inode = HFSPLUS_I(inode).rsrc_inode;
40939
40940diff -urNp linux-2.6.32.42/fs/hfsplus/ioctl.c linux-2.6.32.42/fs/hfsplus/ioctl.c
40941--- linux-2.6.32.42/fs/hfsplus/ioctl.c 2011-03-27 14:31:47.000000000 -0400
40942+++ linux-2.6.32.42/fs/hfsplus/ioctl.c 2011-05-16 21:46:57.000000000 -0400
40943@@ -101,6 +101,8 @@ int hfsplus_setxattr(struct dentry *dent
40944 struct hfsplus_cat_file *file;
40945 int res;
40946
40947+ pax_track_stack();
40948+
40949 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
40950 return -EOPNOTSUPP;
40951
40952@@ -143,6 +145,8 @@ ssize_t hfsplus_getxattr(struct dentry *
40953 struct hfsplus_cat_file *file;
40954 ssize_t res = 0;
40955
40956+ pax_track_stack();
40957+
40958 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
40959 return -EOPNOTSUPP;
40960
40961diff -urNp linux-2.6.32.42/fs/hfsplus/super.c linux-2.6.32.42/fs/hfsplus/super.c
40962--- linux-2.6.32.42/fs/hfsplus/super.c 2011-03-27 14:31:47.000000000 -0400
40963+++ linux-2.6.32.42/fs/hfsplus/super.c 2011-05-16 21:46:57.000000000 -0400
40964@@ -312,6 +312,8 @@ static int hfsplus_fill_super(struct sup
40965 struct nls_table *nls = NULL;
40966 int err = -EINVAL;
40967
40968+ pax_track_stack();
40969+
40970 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
40971 if (!sbi)
40972 return -ENOMEM;
40973diff -urNp linux-2.6.32.42/fs/hugetlbfs/inode.c linux-2.6.32.42/fs/hugetlbfs/inode.c
40974--- linux-2.6.32.42/fs/hugetlbfs/inode.c 2011-03-27 14:31:47.000000000 -0400
40975+++ linux-2.6.32.42/fs/hugetlbfs/inode.c 2011-04-17 15:56:46.000000000 -0400
40976@@ -909,7 +909,7 @@ static struct file_system_type hugetlbfs
40977 .kill_sb = kill_litter_super,
40978 };
40979
40980-static struct vfsmount *hugetlbfs_vfsmount;
40981+struct vfsmount *hugetlbfs_vfsmount;
40982
40983 static int can_do_hugetlb_shm(void)
40984 {
40985diff -urNp linux-2.6.32.42/fs/ioctl.c linux-2.6.32.42/fs/ioctl.c
40986--- linux-2.6.32.42/fs/ioctl.c 2011-03-27 14:31:47.000000000 -0400
40987+++ linux-2.6.32.42/fs/ioctl.c 2011-04-17 15:56:46.000000000 -0400
40988@@ -97,7 +97,7 @@ int fiemap_fill_next_extent(struct fiema
40989 u64 phys, u64 len, u32 flags)
40990 {
40991 struct fiemap_extent extent;
40992- struct fiemap_extent *dest = fieinfo->fi_extents_start;
40993+ struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
40994
40995 /* only count the extents */
40996 if (fieinfo->fi_extents_max == 0) {
40997@@ -207,7 +207,7 @@ static int ioctl_fiemap(struct file *fil
40998
40999 fieinfo.fi_flags = fiemap.fm_flags;
41000 fieinfo.fi_extents_max = fiemap.fm_extent_count;
41001- fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap));
41002+ fieinfo.fi_extents_start = (struct fiemap_extent __user *)(arg + sizeof(fiemap));
41003
41004 if (fiemap.fm_extent_count != 0 &&
41005 !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start,
41006@@ -220,7 +220,7 @@ static int ioctl_fiemap(struct file *fil
41007 error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
41008 fiemap.fm_flags = fieinfo.fi_flags;
41009 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
41010- if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap)))
41011+ if (copy_to_user((__force char __user *)arg, &fiemap, sizeof(fiemap)))
41012 error = -EFAULT;
41013
41014 return error;
41015diff -urNp linux-2.6.32.42/fs/jbd/checkpoint.c linux-2.6.32.42/fs/jbd/checkpoint.c
41016--- linux-2.6.32.42/fs/jbd/checkpoint.c 2011-03-27 14:31:47.000000000 -0400
41017+++ linux-2.6.32.42/fs/jbd/checkpoint.c 2011-05-16 21:46:57.000000000 -0400
41018@@ -348,6 +348,8 @@ int log_do_checkpoint(journal_t *journal
41019 tid_t this_tid;
41020 int result;
41021
41022+ pax_track_stack();
41023+
41024 jbd_debug(1, "Start checkpoint\n");
41025
41026 /*
41027diff -urNp linux-2.6.32.42/fs/jffs2/compr_rtime.c linux-2.6.32.42/fs/jffs2/compr_rtime.c
41028--- linux-2.6.32.42/fs/jffs2/compr_rtime.c 2011-03-27 14:31:47.000000000 -0400
41029+++ linux-2.6.32.42/fs/jffs2/compr_rtime.c 2011-05-16 21:46:57.000000000 -0400
41030@@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
41031 int outpos = 0;
41032 int pos=0;
41033
41034+ pax_track_stack();
41035+
41036 memset(positions,0,sizeof(positions));
41037
41038 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
41039@@ -79,6 +81,8 @@ static int jffs2_rtime_decompress(unsign
41040 int outpos = 0;
41041 int pos=0;
41042
41043+ pax_track_stack();
41044+
41045 memset(positions,0,sizeof(positions));
41046
41047 while (outpos<destlen) {
41048diff -urNp linux-2.6.32.42/fs/jffs2/compr_rubin.c linux-2.6.32.42/fs/jffs2/compr_rubin.c
41049--- linux-2.6.32.42/fs/jffs2/compr_rubin.c 2011-03-27 14:31:47.000000000 -0400
41050+++ linux-2.6.32.42/fs/jffs2/compr_rubin.c 2011-05-16 21:46:57.000000000 -0400
41051@@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
41052 int ret;
41053 uint32_t mysrclen, mydstlen;
41054
41055+ pax_track_stack();
41056+
41057 mysrclen = *sourcelen;
41058 mydstlen = *dstlen - 8;
41059
41060diff -urNp linux-2.6.32.42/fs/jffs2/erase.c linux-2.6.32.42/fs/jffs2/erase.c
41061--- linux-2.6.32.42/fs/jffs2/erase.c 2011-03-27 14:31:47.000000000 -0400
41062+++ linux-2.6.32.42/fs/jffs2/erase.c 2011-04-17 15:56:46.000000000 -0400
41063@@ -434,7 +434,8 @@ static void jffs2_mark_erased_block(stru
41064 struct jffs2_unknown_node marker = {
41065 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
41066 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
41067- .totlen = cpu_to_je32(c->cleanmarker_size)
41068+ .totlen = cpu_to_je32(c->cleanmarker_size),
41069+ .hdr_crc = cpu_to_je32(0)
41070 };
41071
41072 jffs2_prealloc_raw_node_refs(c, jeb, 1);
41073diff -urNp linux-2.6.32.42/fs/jffs2/wbuf.c linux-2.6.32.42/fs/jffs2/wbuf.c
41074--- linux-2.6.32.42/fs/jffs2/wbuf.c 2011-03-27 14:31:47.000000000 -0400
41075+++ linux-2.6.32.42/fs/jffs2/wbuf.c 2011-04-17 15:56:46.000000000 -0400
41076@@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
41077 {
41078 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
41079 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
41080- .totlen = constant_cpu_to_je32(8)
41081+ .totlen = constant_cpu_to_je32(8),
41082+ .hdr_crc = constant_cpu_to_je32(0)
41083 };
41084
41085 /*
41086diff -urNp linux-2.6.32.42/fs/jffs2/xattr.c linux-2.6.32.42/fs/jffs2/xattr.c
41087--- linux-2.6.32.42/fs/jffs2/xattr.c 2011-03-27 14:31:47.000000000 -0400
41088+++ linux-2.6.32.42/fs/jffs2/xattr.c 2011-05-16 21:46:57.000000000 -0400
41089@@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct
41090
41091 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
41092
41093+ pax_track_stack();
41094+
41095 /* Phase.1 : Merge same xref */
41096 for (i=0; i < XREF_TMPHASH_SIZE; i++)
41097 xref_tmphash[i] = NULL;
41098diff -urNp linux-2.6.32.42/fs/jfs/super.c linux-2.6.32.42/fs/jfs/super.c
41099--- linux-2.6.32.42/fs/jfs/super.c 2011-03-27 14:31:47.000000000 -0400
41100+++ linux-2.6.32.42/fs/jfs/super.c 2011-06-07 18:06:04.000000000 -0400
41101@@ -793,7 +793,7 @@ static int __init init_jfs_fs(void)
41102
41103 jfs_inode_cachep =
41104 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
41105- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
41106+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
41107 init_once);
41108 if (jfs_inode_cachep == NULL)
41109 return -ENOMEM;
41110diff -urNp linux-2.6.32.42/fs/Kconfig.binfmt linux-2.6.32.42/fs/Kconfig.binfmt
41111--- linux-2.6.32.42/fs/Kconfig.binfmt 2011-03-27 14:31:47.000000000 -0400
41112+++ linux-2.6.32.42/fs/Kconfig.binfmt 2011-04-17 15:56:46.000000000 -0400
41113@@ -86,7 +86,7 @@ config HAVE_AOUT
41114
41115 config BINFMT_AOUT
41116 tristate "Kernel support for a.out and ECOFF binaries"
41117- depends on HAVE_AOUT
41118+ depends on HAVE_AOUT && BROKEN
41119 ---help---
41120 A.out (Assembler.OUTput) is a set of formats for libraries and
41121 executables used in the earliest versions of UNIX. Linux used
41122diff -urNp linux-2.6.32.42/fs/libfs.c linux-2.6.32.42/fs/libfs.c
41123--- linux-2.6.32.42/fs/libfs.c 2011-03-27 14:31:47.000000000 -0400
41124+++ linux-2.6.32.42/fs/libfs.c 2011-05-11 18:25:15.000000000 -0400
41125@@ -157,12 +157,20 @@ int dcache_readdir(struct file * filp, v
41126
41127 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
41128 struct dentry *next;
41129+ char d_name[sizeof(next->d_iname)];
41130+ const unsigned char *name;
41131+
41132 next = list_entry(p, struct dentry, d_u.d_child);
41133 if (d_unhashed(next) || !next->d_inode)
41134 continue;
41135
41136 spin_unlock(&dcache_lock);
41137- if (filldir(dirent, next->d_name.name,
41138+ name = next->d_name.name;
41139+ if (name == next->d_iname) {
41140+ memcpy(d_name, name, next->d_name.len);
41141+ name = d_name;
41142+ }
41143+ if (filldir(dirent, name,
41144 next->d_name.len, filp->f_pos,
41145 next->d_inode->i_ino,
41146 dt_type(next->d_inode)) < 0)
41147diff -urNp linux-2.6.32.42/fs/lockd/clntproc.c linux-2.6.32.42/fs/lockd/clntproc.c
41148--- linux-2.6.32.42/fs/lockd/clntproc.c 2011-03-27 14:31:47.000000000 -0400
41149+++ linux-2.6.32.42/fs/lockd/clntproc.c 2011-05-16 21:46:57.000000000 -0400
41150@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
41151 /*
41152 * Cookie counter for NLM requests
41153 */
41154-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
41155+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
41156
41157 void nlmclnt_next_cookie(struct nlm_cookie *c)
41158 {
41159- u32 cookie = atomic_inc_return(&nlm_cookie);
41160+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
41161
41162 memcpy(c->data, &cookie, 4);
41163 c->len=4;
41164@@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
41165 struct nlm_rqst reqst, *req;
41166 int status;
41167
41168+ pax_track_stack();
41169+
41170 req = &reqst;
41171 memset(req, 0, sizeof(*req));
41172 locks_init_lock(&req->a_args.lock.fl);
41173diff -urNp linux-2.6.32.42/fs/lockd/svc.c linux-2.6.32.42/fs/lockd/svc.c
41174--- linux-2.6.32.42/fs/lockd/svc.c 2011-03-27 14:31:47.000000000 -0400
41175+++ linux-2.6.32.42/fs/lockd/svc.c 2011-04-17 15:56:46.000000000 -0400
41176@@ -43,7 +43,7 @@
41177
41178 static struct svc_program nlmsvc_program;
41179
41180-struct nlmsvc_binding * nlmsvc_ops;
41181+const struct nlmsvc_binding * nlmsvc_ops;
41182 EXPORT_SYMBOL_GPL(nlmsvc_ops);
41183
41184 static DEFINE_MUTEX(nlmsvc_mutex);
41185diff -urNp linux-2.6.32.42/fs/locks.c linux-2.6.32.42/fs/locks.c
41186--- linux-2.6.32.42/fs/locks.c 2011-03-27 14:31:47.000000000 -0400
41187+++ linux-2.6.32.42/fs/locks.c 2011-07-06 19:47:11.000000000 -0400
41188@@ -145,10 +145,28 @@ static LIST_HEAD(blocked_list);
41189
41190 static struct kmem_cache *filelock_cache __read_mostly;
41191
41192+static void locks_init_lock_always(struct file_lock *fl)
41193+{
41194+ fl->fl_next = NULL;
41195+ fl->fl_fasync = NULL;
41196+ fl->fl_owner = NULL;
41197+ fl->fl_pid = 0;
41198+ fl->fl_nspid = NULL;
41199+ fl->fl_file = NULL;
41200+ fl->fl_flags = 0;
41201+ fl->fl_type = 0;
41202+ fl->fl_start = fl->fl_end = 0;
41203+}
41204+
41205 /* Allocate an empty lock structure. */
41206 static struct file_lock *locks_alloc_lock(void)
41207 {
41208- return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
41209+ struct file_lock *fl = kmem_cache_alloc(filelock_cache, GFP_KERNEL);
41210+
41211+ if (fl)
41212+ locks_init_lock_always(fl);
41213+
41214+ return fl;
41215 }
41216
41217 void locks_release_private(struct file_lock *fl)
41218@@ -183,17 +201,9 @@ void locks_init_lock(struct file_lock *f
41219 INIT_LIST_HEAD(&fl->fl_link);
41220 INIT_LIST_HEAD(&fl->fl_block);
41221 init_waitqueue_head(&fl->fl_wait);
41222- fl->fl_next = NULL;
41223- fl->fl_fasync = NULL;
41224- fl->fl_owner = NULL;
41225- fl->fl_pid = 0;
41226- fl->fl_nspid = NULL;
41227- fl->fl_file = NULL;
41228- fl->fl_flags = 0;
41229- fl->fl_type = 0;
41230- fl->fl_start = fl->fl_end = 0;
41231 fl->fl_ops = NULL;
41232 fl->fl_lmops = NULL;
41233+ locks_init_lock_always(fl);
41234 }
41235
41236 EXPORT_SYMBOL(locks_init_lock);
41237@@ -2007,16 +2017,16 @@ void locks_remove_flock(struct file *fil
41238 return;
41239
41240 if (filp->f_op && filp->f_op->flock) {
41241- struct file_lock fl = {
41242+ struct file_lock flock = {
41243 .fl_pid = current->tgid,
41244 .fl_file = filp,
41245 .fl_flags = FL_FLOCK,
41246 .fl_type = F_UNLCK,
41247 .fl_end = OFFSET_MAX,
41248 };
41249- filp->f_op->flock(filp, F_SETLKW, &fl);
41250- if (fl.fl_ops && fl.fl_ops->fl_release_private)
41251- fl.fl_ops->fl_release_private(&fl);
41252+ filp->f_op->flock(filp, F_SETLKW, &flock);
41253+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
41254+ flock.fl_ops->fl_release_private(&flock);
41255 }
41256
41257 lock_kernel();
41258diff -urNp linux-2.6.32.42/fs/namei.c linux-2.6.32.42/fs/namei.c
41259--- linux-2.6.32.42/fs/namei.c 2011-03-27 14:31:47.000000000 -0400
41260+++ linux-2.6.32.42/fs/namei.c 2011-05-16 21:46:57.000000000 -0400
41261@@ -224,14 +224,6 @@ int generic_permission(struct inode *ino
41262 return ret;
41263
41264 /*
41265- * Read/write DACs are always overridable.
41266- * Executable DACs are overridable if at least one exec bit is set.
41267- */
41268- if (!(mask & MAY_EXEC) || execute_ok(inode))
41269- if (capable(CAP_DAC_OVERRIDE))
41270- return 0;
41271-
41272- /*
41273 * Searching includes executable on directories, else just read.
41274 */
41275 mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
41276@@ -239,6 +231,14 @@ int generic_permission(struct inode *ino
41277 if (capable(CAP_DAC_READ_SEARCH))
41278 return 0;
41279
41280+ /*
41281+ * Read/write DACs are always overridable.
41282+ * Executable DACs are overridable if at least one exec bit is set.
41283+ */
41284+ if (!(mask & MAY_EXEC) || execute_ok(inode))
41285+ if (capable(CAP_DAC_OVERRIDE))
41286+ return 0;
41287+
41288 return -EACCES;
41289 }
41290
41291@@ -458,7 +458,8 @@ static int exec_permission_lite(struct i
41292 if (!ret)
41293 goto ok;
41294
41295- if (capable(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH))
41296+ if (capable_nolog(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH) ||
41297+ capable(CAP_DAC_OVERRIDE))
41298 goto ok;
41299
41300 return ret;
41301@@ -638,7 +639,7 @@ static __always_inline int __do_follow_l
41302 cookie = dentry->d_inode->i_op->follow_link(dentry, nd);
41303 error = PTR_ERR(cookie);
41304 if (!IS_ERR(cookie)) {
41305- char *s = nd_get_link(nd);
41306+ const char *s = nd_get_link(nd);
41307 error = 0;
41308 if (s)
41309 error = __vfs_follow_link(nd, s);
41310@@ -669,6 +670,13 @@ static inline int do_follow_link(struct
41311 err = security_inode_follow_link(path->dentry, nd);
41312 if (err)
41313 goto loop;
41314+
41315+ if (gr_handle_follow_link(path->dentry->d_parent->d_inode,
41316+ path->dentry->d_inode, path->dentry, nd->path.mnt)) {
41317+ err = -EACCES;
41318+ goto loop;
41319+ }
41320+
41321 current->link_count++;
41322 current->total_link_count++;
41323 nd->depth++;
41324@@ -1016,11 +1024,18 @@ return_reval:
41325 break;
41326 }
41327 return_base:
41328+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
41329+ path_put(&nd->path);
41330+ return -ENOENT;
41331+ }
41332 return 0;
41333 out_dput:
41334 path_put_conditional(&next, nd);
41335 break;
41336 }
41337+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
41338+ err = -ENOENT;
41339+
41340 path_put(&nd->path);
41341 return_err:
41342 return err;
41343@@ -1091,13 +1106,20 @@ static int do_path_lookup(int dfd, const
41344 int retval = path_init(dfd, name, flags, nd);
41345 if (!retval)
41346 retval = path_walk(name, nd);
41347- if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
41348- nd->path.dentry->d_inode))
41349- audit_inode(name, nd->path.dentry);
41350+
41351+ if (likely(!retval)) {
41352+ if (nd->path.dentry && nd->path.dentry->d_inode) {
41353+ if (*name != '/' && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
41354+ retval = -ENOENT;
41355+ if (!audit_dummy_context())
41356+ audit_inode(name, nd->path.dentry);
41357+ }
41358+ }
41359 if (nd->root.mnt) {
41360 path_put(&nd->root);
41361 nd->root.mnt = NULL;
41362 }
41363+
41364 return retval;
41365 }
41366
41367@@ -1576,6 +1598,20 @@ int may_open(struct path *path, int acc_
41368 if (error)
41369 goto err_out;
41370
41371+
41372+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
41373+ error = -EPERM;
41374+ goto err_out;
41375+ }
41376+ if (gr_handle_rawio(inode)) {
41377+ error = -EPERM;
41378+ goto err_out;
41379+ }
41380+ if (!gr_acl_handle_open(dentry, path->mnt, flag)) {
41381+ error = -EACCES;
41382+ goto err_out;
41383+ }
41384+
41385 if (flag & O_TRUNC) {
41386 error = get_write_access(inode);
41387 if (error)
41388@@ -1621,12 +1657,19 @@ static int __open_namei_create(struct na
41389 int error;
41390 struct dentry *dir = nd->path.dentry;
41391
41392+ if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, nd->path.mnt, flag, mode)) {
41393+ error = -EACCES;
41394+ goto out_unlock;
41395+ }
41396+
41397 if (!IS_POSIXACL(dir->d_inode))
41398 mode &= ~current_umask();
41399 error = security_path_mknod(&nd->path, path->dentry, mode, 0);
41400 if (error)
41401 goto out_unlock;
41402 error = vfs_create(dir->d_inode, path->dentry, mode, nd);
41403+ if (!error)
41404+ gr_handle_create(path->dentry, nd->path.mnt);
41405 out_unlock:
41406 mutex_unlock(&dir->d_inode->i_mutex);
41407 dput(nd->path.dentry);
41408@@ -1709,6 +1752,22 @@ struct file *do_filp_open(int dfd, const
41409 &nd, flag);
41410 if (error)
41411 return ERR_PTR(error);
41412+
41413+ if (gr_handle_rofs_blockwrite(nd.path.dentry, nd.path.mnt, acc_mode)) {
41414+ error = -EPERM;
41415+ goto exit;
41416+ }
41417+
41418+ if (gr_handle_rawio(nd.path.dentry->d_inode)) {
41419+ error = -EPERM;
41420+ goto exit;
41421+ }
41422+
41423+ if (!gr_acl_handle_open(nd.path.dentry, nd.path.mnt, flag)) {
41424+ error = -EACCES;
41425+ goto exit;
41426+ }
41427+
41428 goto ok;
41429 }
41430
41431@@ -1795,6 +1854,14 @@ do_last:
41432 /*
41433 * It already exists.
41434 */
41435+
41436+ /* only check if O_CREAT is specified, all other checks need
41437+ to go into may_open */
41438+ if (gr_handle_fifo(path.dentry, path.mnt, dir, flag, acc_mode)) {
41439+ error = -EACCES;
41440+ goto exit_mutex_unlock;
41441+ }
41442+
41443 mutex_unlock(&dir->d_inode->i_mutex);
41444 audit_inode(pathname, path.dentry);
41445
41446@@ -1887,6 +1954,13 @@ do_link:
41447 error = security_inode_follow_link(path.dentry, &nd);
41448 if (error)
41449 goto exit_dput;
41450+
41451+ if (gr_handle_follow_link(path.dentry->d_parent->d_inode, path.dentry->d_inode,
41452+ path.dentry, nd.path.mnt)) {
41453+ error = -EACCES;
41454+ goto exit_dput;
41455+ }
41456+
41457 error = __do_follow_link(&path, &nd);
41458 if (error) {
41459 /* Does someone understand code flow here? Or it is only
41460@@ -2061,6 +2135,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
41461 error = may_mknod(mode);
41462 if (error)
41463 goto out_dput;
41464+
41465+ if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
41466+ error = -EPERM;
41467+ goto out_dput;
41468+ }
41469+
41470+ if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
41471+ error = -EACCES;
41472+ goto out_dput;
41473+ }
41474+
41475 error = mnt_want_write(nd.path.mnt);
41476 if (error)
41477 goto out_dput;
41478@@ -2081,6 +2166,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
41479 }
41480 out_drop_write:
41481 mnt_drop_write(nd.path.mnt);
41482+
41483+ if (!error)
41484+ gr_handle_create(dentry, nd.path.mnt);
41485 out_dput:
41486 dput(dentry);
41487 out_unlock:
41488@@ -2134,6 +2222,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
41489 if (IS_ERR(dentry))
41490 goto out_unlock;
41491
41492+ if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
41493+ error = -EACCES;
41494+ goto out_dput;
41495+ }
41496+
41497 if (!IS_POSIXACL(nd.path.dentry->d_inode))
41498 mode &= ~current_umask();
41499 error = mnt_want_write(nd.path.mnt);
41500@@ -2145,6 +2238,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
41501 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
41502 out_drop_write:
41503 mnt_drop_write(nd.path.mnt);
41504+
41505+ if (!error)
41506+ gr_handle_create(dentry, nd.path.mnt);
41507+
41508 out_dput:
41509 dput(dentry);
41510 out_unlock:
41511@@ -2226,6 +2323,8 @@ static long do_rmdir(int dfd, const char
41512 char * name;
41513 struct dentry *dentry;
41514 struct nameidata nd;
41515+ ino_t saved_ino = 0;
41516+ dev_t saved_dev = 0;
41517
41518 error = user_path_parent(dfd, pathname, &nd, &name);
41519 if (error)
41520@@ -2250,6 +2349,19 @@ static long do_rmdir(int dfd, const char
41521 error = PTR_ERR(dentry);
41522 if (IS_ERR(dentry))
41523 goto exit2;
41524+
41525+ if (dentry->d_inode != NULL) {
41526+ if (dentry->d_inode->i_nlink <= 1) {
41527+ saved_ino = dentry->d_inode->i_ino;
41528+ saved_dev = gr_get_dev_from_dentry(dentry);
41529+ }
41530+
41531+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
41532+ error = -EACCES;
41533+ goto exit3;
41534+ }
41535+ }
41536+
41537 error = mnt_want_write(nd.path.mnt);
41538 if (error)
41539 goto exit3;
41540@@ -2257,6 +2369,8 @@ static long do_rmdir(int dfd, const char
41541 if (error)
41542 goto exit4;
41543 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
41544+ if (!error && (saved_dev || saved_ino))
41545+ gr_handle_delete(saved_ino, saved_dev);
41546 exit4:
41547 mnt_drop_write(nd.path.mnt);
41548 exit3:
41549@@ -2318,6 +2432,8 @@ static long do_unlinkat(int dfd, const c
41550 struct dentry *dentry;
41551 struct nameidata nd;
41552 struct inode *inode = NULL;
41553+ ino_t saved_ino = 0;
41554+ dev_t saved_dev = 0;
41555
41556 error = user_path_parent(dfd, pathname, &nd, &name);
41557 if (error)
41558@@ -2337,8 +2453,19 @@ static long do_unlinkat(int dfd, const c
41559 if (nd.last.name[nd.last.len])
41560 goto slashes;
41561 inode = dentry->d_inode;
41562- if (inode)
41563+ if (inode) {
41564+ if (inode->i_nlink <= 1) {
41565+ saved_ino = inode->i_ino;
41566+ saved_dev = gr_get_dev_from_dentry(dentry);
41567+ }
41568+
41569 atomic_inc(&inode->i_count);
41570+
41571+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
41572+ error = -EACCES;
41573+ goto exit2;
41574+ }
41575+ }
41576 error = mnt_want_write(nd.path.mnt);
41577 if (error)
41578 goto exit2;
41579@@ -2346,6 +2473,8 @@ static long do_unlinkat(int dfd, const c
41580 if (error)
41581 goto exit3;
41582 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
41583+ if (!error && (saved_ino || saved_dev))
41584+ gr_handle_delete(saved_ino, saved_dev);
41585 exit3:
41586 mnt_drop_write(nd.path.mnt);
41587 exit2:
41588@@ -2424,6 +2553,11 @@ SYSCALL_DEFINE3(symlinkat, const char __
41589 if (IS_ERR(dentry))
41590 goto out_unlock;
41591
41592+ if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
41593+ error = -EACCES;
41594+ goto out_dput;
41595+ }
41596+
41597 error = mnt_want_write(nd.path.mnt);
41598 if (error)
41599 goto out_dput;
41600@@ -2431,6 +2565,8 @@ SYSCALL_DEFINE3(symlinkat, const char __
41601 if (error)
41602 goto out_drop_write;
41603 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
41604+ if (!error)
41605+ gr_handle_create(dentry, nd.path.mnt);
41606 out_drop_write:
41607 mnt_drop_write(nd.path.mnt);
41608 out_dput:
41609@@ -2524,6 +2660,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
41610 error = PTR_ERR(new_dentry);
41611 if (IS_ERR(new_dentry))
41612 goto out_unlock;
41613+
41614+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
41615+ old_path.dentry->d_inode,
41616+ old_path.dentry->d_inode->i_mode, to)) {
41617+ error = -EACCES;
41618+ goto out_dput;
41619+ }
41620+
41621+ if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
41622+ old_path.dentry, old_path.mnt, to)) {
41623+ error = -EACCES;
41624+ goto out_dput;
41625+ }
41626+
41627 error = mnt_want_write(nd.path.mnt);
41628 if (error)
41629 goto out_dput;
41630@@ -2531,6 +2681,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
41631 if (error)
41632 goto out_drop_write;
41633 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
41634+ if (!error)
41635+ gr_handle_create(new_dentry, nd.path.mnt);
41636 out_drop_write:
41637 mnt_drop_write(nd.path.mnt);
41638 out_dput:
41639@@ -2708,6 +2860,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
41640 char *to;
41641 int error;
41642
41643+ pax_track_stack();
41644+
41645 error = user_path_parent(olddfd, oldname, &oldnd, &from);
41646 if (error)
41647 goto exit;
41648@@ -2764,6 +2918,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
41649 if (new_dentry == trap)
41650 goto exit5;
41651
41652+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
41653+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
41654+ to);
41655+ if (error)
41656+ goto exit5;
41657+
41658 error = mnt_want_write(oldnd.path.mnt);
41659 if (error)
41660 goto exit5;
41661@@ -2773,6 +2933,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
41662 goto exit6;
41663 error = vfs_rename(old_dir->d_inode, old_dentry,
41664 new_dir->d_inode, new_dentry);
41665+ if (!error)
41666+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
41667+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
41668 exit6:
41669 mnt_drop_write(oldnd.path.mnt);
41670 exit5:
41671@@ -2798,6 +2961,8 @@ SYSCALL_DEFINE2(rename, const char __use
41672
41673 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
41674 {
41675+ char tmpbuf[64];
41676+ const char *newlink;
41677 int len;
41678
41679 len = PTR_ERR(link);
41680@@ -2807,7 +2972,14 @@ int vfs_readlink(struct dentry *dentry,
41681 len = strlen(link);
41682 if (len > (unsigned) buflen)
41683 len = buflen;
41684- if (copy_to_user(buffer, link, len))
41685+
41686+ if (len < sizeof(tmpbuf)) {
41687+ memcpy(tmpbuf, link, len);
41688+ newlink = tmpbuf;
41689+ } else
41690+ newlink = link;
41691+
41692+ if (copy_to_user(buffer, newlink, len))
41693 len = -EFAULT;
41694 out:
41695 return len;
41696diff -urNp linux-2.6.32.42/fs/namespace.c linux-2.6.32.42/fs/namespace.c
41697--- linux-2.6.32.42/fs/namespace.c 2011-03-27 14:31:47.000000000 -0400
41698+++ linux-2.6.32.42/fs/namespace.c 2011-04-17 15:56:46.000000000 -0400
41699@@ -1083,6 +1083,9 @@ static int do_umount(struct vfsmount *mn
41700 if (!(sb->s_flags & MS_RDONLY))
41701 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
41702 up_write(&sb->s_umount);
41703+
41704+ gr_log_remount(mnt->mnt_devname, retval);
41705+
41706 return retval;
41707 }
41708
41709@@ -1104,6 +1107,9 @@ static int do_umount(struct vfsmount *mn
41710 security_sb_umount_busy(mnt);
41711 up_write(&namespace_sem);
41712 release_mounts(&umount_list);
41713+
41714+ gr_log_unmount(mnt->mnt_devname, retval);
41715+
41716 return retval;
41717 }
41718
41719@@ -1962,6 +1968,16 @@ long do_mount(char *dev_name, char *dir_
41720 if (retval)
41721 goto dput_out;
41722
41723+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
41724+ retval = -EPERM;
41725+ goto dput_out;
41726+ }
41727+
41728+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
41729+ retval = -EPERM;
41730+ goto dput_out;
41731+ }
41732+
41733 if (flags & MS_REMOUNT)
41734 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
41735 data_page);
41736@@ -1976,6 +1992,9 @@ long do_mount(char *dev_name, char *dir_
41737 dev_name, data_page);
41738 dput_out:
41739 path_put(&path);
41740+
41741+ gr_log_mount(dev_name, dir_name, retval);
41742+
41743 return retval;
41744 }
41745
41746@@ -2182,6 +2201,12 @@ SYSCALL_DEFINE2(pivot_root, const char _
41747 goto out1;
41748 }
41749
41750+ if (gr_handle_chroot_pivot()) {
41751+ error = -EPERM;
41752+ path_put(&old);
41753+ goto out1;
41754+ }
41755+
41756 read_lock(&current->fs->lock);
41757 root = current->fs->root;
41758 path_get(&current->fs->root);
41759diff -urNp linux-2.6.32.42/fs/ncpfs/dir.c linux-2.6.32.42/fs/ncpfs/dir.c
41760--- linux-2.6.32.42/fs/ncpfs/dir.c 2011-03-27 14:31:47.000000000 -0400
41761+++ linux-2.6.32.42/fs/ncpfs/dir.c 2011-05-16 21:46:57.000000000 -0400
41762@@ -275,6 +275,8 @@ __ncp_lookup_validate(struct dentry *den
41763 int res, val = 0, len;
41764 __u8 __name[NCP_MAXPATHLEN + 1];
41765
41766+ pax_track_stack();
41767+
41768 parent = dget_parent(dentry);
41769 dir = parent->d_inode;
41770
41771@@ -799,6 +801,8 @@ static struct dentry *ncp_lookup(struct
41772 int error, res, len;
41773 __u8 __name[NCP_MAXPATHLEN + 1];
41774
41775+ pax_track_stack();
41776+
41777 lock_kernel();
41778 error = -EIO;
41779 if (!ncp_conn_valid(server))
41780@@ -883,10 +887,12 @@ int ncp_create_new(struct inode *dir, st
41781 int error, result, len;
41782 int opmode;
41783 __u8 __name[NCP_MAXPATHLEN + 1];
41784-
41785+
41786 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
41787 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
41788
41789+ pax_track_stack();
41790+
41791 error = -EIO;
41792 lock_kernel();
41793 if (!ncp_conn_valid(server))
41794@@ -952,6 +958,8 @@ static int ncp_mkdir(struct inode *dir,
41795 int error, len;
41796 __u8 __name[NCP_MAXPATHLEN + 1];
41797
41798+ pax_track_stack();
41799+
41800 DPRINTK("ncp_mkdir: making %s/%s\n",
41801 dentry->d_parent->d_name.name, dentry->d_name.name);
41802
41803@@ -960,6 +968,8 @@ static int ncp_mkdir(struct inode *dir,
41804 if (!ncp_conn_valid(server))
41805 goto out;
41806
41807+ pax_track_stack();
41808+
41809 ncp_age_dentry(server, dentry);
41810 len = sizeof(__name);
41811 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
41812@@ -1114,6 +1124,8 @@ static int ncp_rename(struct inode *old_
41813 int old_len, new_len;
41814 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
41815
41816+ pax_track_stack();
41817+
41818 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
41819 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
41820 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
41821diff -urNp linux-2.6.32.42/fs/ncpfs/inode.c linux-2.6.32.42/fs/ncpfs/inode.c
41822--- linux-2.6.32.42/fs/ncpfs/inode.c 2011-03-27 14:31:47.000000000 -0400
41823+++ linux-2.6.32.42/fs/ncpfs/inode.c 2011-05-16 21:46:57.000000000 -0400
41824@@ -445,6 +445,8 @@ static int ncp_fill_super(struct super_b
41825 #endif
41826 struct ncp_entry_info finfo;
41827
41828+ pax_track_stack();
41829+
41830 data.wdog_pid = NULL;
41831 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
41832 if (!server)
41833diff -urNp linux-2.6.32.42/fs/nfs/inode.c linux-2.6.32.42/fs/nfs/inode.c
41834--- linux-2.6.32.42/fs/nfs/inode.c 2011-05-10 22:12:01.000000000 -0400
41835+++ linux-2.6.32.42/fs/nfs/inode.c 2011-07-06 19:53:33.000000000 -0400
41836@@ -156,7 +156,7 @@ static void nfs_zap_caches_locked(struct
41837 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
41838 nfsi->attrtimeo_timestamp = jiffies;
41839
41840- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
41841+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
41842 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
41843 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
41844 else
41845@@ -973,16 +973,16 @@ static int nfs_size_need_update(const st
41846 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
41847 }
41848
41849-static atomic_long_t nfs_attr_generation_counter;
41850+static atomic_long_unchecked_t nfs_attr_generation_counter;
41851
41852 static unsigned long nfs_read_attr_generation_counter(void)
41853 {
41854- return atomic_long_read(&nfs_attr_generation_counter);
41855+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
41856 }
41857
41858 unsigned long nfs_inc_attr_generation_counter(void)
41859 {
41860- return atomic_long_inc_return(&nfs_attr_generation_counter);
41861+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
41862 }
41863
41864 void nfs_fattr_init(struct nfs_fattr *fattr)
41865diff -urNp linux-2.6.32.42/fs/nfsd/lockd.c linux-2.6.32.42/fs/nfsd/lockd.c
41866--- linux-2.6.32.42/fs/nfsd/lockd.c 2011-04-17 17:00:52.000000000 -0400
41867+++ linux-2.6.32.42/fs/nfsd/lockd.c 2011-04-17 17:03:15.000000000 -0400
41868@@ -66,7 +66,7 @@ nlm_fclose(struct file *filp)
41869 fput(filp);
41870 }
41871
41872-static struct nlmsvc_binding nfsd_nlm_ops = {
41873+static const struct nlmsvc_binding nfsd_nlm_ops = {
41874 .fopen = nlm_fopen, /* open file for locking */
41875 .fclose = nlm_fclose, /* close file */
41876 };
41877diff -urNp linux-2.6.32.42/fs/nfsd/nfs4state.c linux-2.6.32.42/fs/nfsd/nfs4state.c
41878--- linux-2.6.32.42/fs/nfsd/nfs4state.c 2011-03-27 14:31:47.000000000 -0400
41879+++ linux-2.6.32.42/fs/nfsd/nfs4state.c 2011-05-16 21:46:57.000000000 -0400
41880@@ -3457,6 +3457,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
41881 unsigned int cmd;
41882 int err;
41883
41884+ pax_track_stack();
41885+
41886 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
41887 (long long) lock->lk_offset,
41888 (long long) lock->lk_length);
41889diff -urNp linux-2.6.32.42/fs/nfsd/nfs4xdr.c linux-2.6.32.42/fs/nfsd/nfs4xdr.c
41890--- linux-2.6.32.42/fs/nfsd/nfs4xdr.c 2011-03-27 14:31:47.000000000 -0400
41891+++ linux-2.6.32.42/fs/nfsd/nfs4xdr.c 2011-05-16 21:46:57.000000000 -0400
41892@@ -1751,6 +1751,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
41893 struct nfsd4_compoundres *resp = rqstp->rq_resp;
41894 u32 minorversion = resp->cstate.minorversion;
41895
41896+ pax_track_stack();
41897+
41898 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
41899 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
41900 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
41901diff -urNp linux-2.6.32.42/fs/nfsd/vfs.c linux-2.6.32.42/fs/nfsd/vfs.c
41902--- linux-2.6.32.42/fs/nfsd/vfs.c 2011-05-10 22:12:01.000000000 -0400
41903+++ linux-2.6.32.42/fs/nfsd/vfs.c 2011-05-10 22:12:33.000000000 -0400
41904@@ -937,7 +937,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
41905 } else {
41906 oldfs = get_fs();
41907 set_fs(KERNEL_DS);
41908- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
41909+ host_err = vfs_readv(file, (__force struct iovec __user *)vec, vlen, &offset);
41910 set_fs(oldfs);
41911 }
41912
41913@@ -1060,7 +1060,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
41914
41915 /* Write the data. */
41916 oldfs = get_fs(); set_fs(KERNEL_DS);
41917- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
41918+ host_err = vfs_writev(file, (__force struct iovec __user *)vec, vlen, &offset);
41919 set_fs(oldfs);
41920 if (host_err < 0)
41921 goto out_nfserr;
41922@@ -1542,7 +1542,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
41923 */
41924
41925 oldfs = get_fs(); set_fs(KERNEL_DS);
41926- host_err = inode->i_op->readlink(dentry, buf, *lenp);
41927+ host_err = inode->i_op->readlink(dentry, (__force char __user *)buf, *lenp);
41928 set_fs(oldfs);
41929
41930 if (host_err < 0)
41931diff -urNp linux-2.6.32.42/fs/nilfs2/ioctl.c linux-2.6.32.42/fs/nilfs2/ioctl.c
41932--- linux-2.6.32.42/fs/nilfs2/ioctl.c 2011-03-27 14:31:47.000000000 -0400
41933+++ linux-2.6.32.42/fs/nilfs2/ioctl.c 2011-05-04 17:56:28.000000000 -0400
41934@@ -480,7 +480,7 @@ static int nilfs_ioctl_clean_segments(st
41935 unsigned int cmd, void __user *argp)
41936 {
41937 struct nilfs_argv argv[5];
41938- const static size_t argsz[5] = {
41939+ static const size_t argsz[5] = {
41940 sizeof(struct nilfs_vdesc),
41941 sizeof(struct nilfs_period),
41942 sizeof(__u64),
41943diff -urNp linux-2.6.32.42/fs/notify/dnotify/dnotify.c linux-2.6.32.42/fs/notify/dnotify/dnotify.c
41944--- linux-2.6.32.42/fs/notify/dnotify/dnotify.c 2011-03-27 14:31:47.000000000 -0400
41945+++ linux-2.6.32.42/fs/notify/dnotify/dnotify.c 2011-04-17 15:56:46.000000000 -0400
41946@@ -173,7 +173,7 @@ static void dnotify_free_mark(struct fsn
41947 kmem_cache_free(dnotify_mark_entry_cache, dnentry);
41948 }
41949
41950-static struct fsnotify_ops dnotify_fsnotify_ops = {
41951+static const struct fsnotify_ops dnotify_fsnotify_ops = {
41952 .handle_event = dnotify_handle_event,
41953 .should_send_event = dnotify_should_send_event,
41954 .free_group_priv = NULL,
41955diff -urNp linux-2.6.32.42/fs/notify/notification.c linux-2.6.32.42/fs/notify/notification.c
41956--- linux-2.6.32.42/fs/notify/notification.c 2011-03-27 14:31:47.000000000 -0400
41957+++ linux-2.6.32.42/fs/notify/notification.c 2011-05-04 17:56:28.000000000 -0400
41958@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
41959 * get set to 0 so it will never get 'freed'
41960 */
41961 static struct fsnotify_event q_overflow_event;
41962-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
41963+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
41964
41965 /**
41966 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
41967@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
41968 */
41969 u32 fsnotify_get_cookie(void)
41970 {
41971- return atomic_inc_return(&fsnotify_sync_cookie);
41972+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
41973 }
41974 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
41975
41976diff -urNp linux-2.6.32.42/fs/ntfs/dir.c linux-2.6.32.42/fs/ntfs/dir.c
41977--- linux-2.6.32.42/fs/ntfs/dir.c 2011-03-27 14:31:47.000000000 -0400
41978+++ linux-2.6.32.42/fs/ntfs/dir.c 2011-04-17 15:56:46.000000000 -0400
41979@@ -1328,7 +1328,7 @@ find_next_index_buffer:
41980 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
41981 ~(s64)(ndir->itype.index.block_size - 1)));
41982 /* Bounds checks. */
41983- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
41984+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
41985 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
41986 "inode 0x%lx or driver bug.", vdir->i_ino);
41987 goto err_out;
41988diff -urNp linux-2.6.32.42/fs/ntfs/file.c linux-2.6.32.42/fs/ntfs/file.c
41989--- linux-2.6.32.42/fs/ntfs/file.c 2011-03-27 14:31:47.000000000 -0400
41990+++ linux-2.6.32.42/fs/ntfs/file.c 2011-04-17 15:56:46.000000000 -0400
41991@@ -2243,6 +2243,6 @@ const struct inode_operations ntfs_file_
41992 #endif /* NTFS_RW */
41993 };
41994
41995-const struct file_operations ntfs_empty_file_ops = {};
41996+const struct file_operations ntfs_empty_file_ops __read_only;
41997
41998-const struct inode_operations ntfs_empty_inode_ops = {};
41999+const struct inode_operations ntfs_empty_inode_ops __read_only;
42000diff -urNp linux-2.6.32.42/fs/ocfs2/cluster/masklog.c linux-2.6.32.42/fs/ocfs2/cluster/masklog.c
42001--- linux-2.6.32.42/fs/ocfs2/cluster/masklog.c 2011-03-27 14:31:47.000000000 -0400
42002+++ linux-2.6.32.42/fs/ocfs2/cluster/masklog.c 2011-04-17 15:56:46.000000000 -0400
42003@@ -135,7 +135,7 @@ static ssize_t mlog_store(struct kobject
42004 return mlog_mask_store(mlog_attr->mask, buf, count);
42005 }
42006
42007-static struct sysfs_ops mlog_attr_ops = {
42008+static const struct sysfs_ops mlog_attr_ops = {
42009 .show = mlog_show,
42010 .store = mlog_store,
42011 };
42012diff -urNp linux-2.6.32.42/fs/ocfs2/localalloc.c linux-2.6.32.42/fs/ocfs2/localalloc.c
42013--- linux-2.6.32.42/fs/ocfs2/localalloc.c 2011-03-27 14:31:47.000000000 -0400
42014+++ linux-2.6.32.42/fs/ocfs2/localalloc.c 2011-04-17 15:56:46.000000000 -0400
42015@@ -1188,7 +1188,7 @@ static int ocfs2_local_alloc_slide_windo
42016 goto bail;
42017 }
42018
42019- atomic_inc(&osb->alloc_stats.moves);
42020+ atomic_inc_unchecked(&osb->alloc_stats.moves);
42021
42022 status = 0;
42023 bail:
42024diff -urNp linux-2.6.32.42/fs/ocfs2/namei.c linux-2.6.32.42/fs/ocfs2/namei.c
42025--- linux-2.6.32.42/fs/ocfs2/namei.c 2011-03-27 14:31:47.000000000 -0400
42026+++ linux-2.6.32.42/fs/ocfs2/namei.c 2011-05-16 21:46:57.000000000 -0400
42027@@ -1043,6 +1043,8 @@ static int ocfs2_rename(struct inode *ol
42028 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
42029 struct ocfs2_dir_lookup_result target_insert = { NULL, };
42030
42031+ pax_track_stack();
42032+
42033 /* At some point it might be nice to break this function up a
42034 * bit. */
42035
42036diff -urNp linux-2.6.32.42/fs/ocfs2/ocfs2.h linux-2.6.32.42/fs/ocfs2/ocfs2.h
42037--- linux-2.6.32.42/fs/ocfs2/ocfs2.h 2011-03-27 14:31:47.000000000 -0400
42038+++ linux-2.6.32.42/fs/ocfs2/ocfs2.h 2011-04-17 15:56:46.000000000 -0400
42039@@ -217,11 +217,11 @@ enum ocfs2_vol_state
42040
42041 struct ocfs2_alloc_stats
42042 {
42043- atomic_t moves;
42044- atomic_t local_data;
42045- atomic_t bitmap_data;
42046- atomic_t bg_allocs;
42047- atomic_t bg_extends;
42048+ atomic_unchecked_t moves;
42049+ atomic_unchecked_t local_data;
42050+ atomic_unchecked_t bitmap_data;
42051+ atomic_unchecked_t bg_allocs;
42052+ atomic_unchecked_t bg_extends;
42053 };
42054
42055 enum ocfs2_local_alloc_state
42056diff -urNp linux-2.6.32.42/fs/ocfs2/suballoc.c linux-2.6.32.42/fs/ocfs2/suballoc.c
42057--- linux-2.6.32.42/fs/ocfs2/suballoc.c 2011-03-27 14:31:47.000000000 -0400
42058+++ linux-2.6.32.42/fs/ocfs2/suballoc.c 2011-04-17 15:56:46.000000000 -0400
42059@@ -623,7 +623,7 @@ static int ocfs2_reserve_suballoc_bits(s
42060 mlog_errno(status);
42061 goto bail;
42062 }
42063- atomic_inc(&osb->alloc_stats.bg_extends);
42064+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
42065
42066 /* You should never ask for this much metadata */
42067 BUG_ON(bits_wanted >
42068@@ -1654,7 +1654,7 @@ int ocfs2_claim_metadata(struct ocfs2_su
42069 mlog_errno(status);
42070 goto bail;
42071 }
42072- atomic_inc(&osb->alloc_stats.bg_allocs);
42073+ atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
42074
42075 *blkno_start = bg_blkno + (u64) *suballoc_bit_start;
42076 ac->ac_bits_given += (*num_bits);
42077@@ -1728,7 +1728,7 @@ int ocfs2_claim_new_inode(struct ocfs2_s
42078 mlog_errno(status);
42079 goto bail;
42080 }
42081- atomic_inc(&osb->alloc_stats.bg_allocs);
42082+ atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
42083
42084 BUG_ON(num_bits != 1);
42085
42086@@ -1830,7 +1830,7 @@ int __ocfs2_claim_clusters(struct ocfs2_
42087 cluster_start,
42088 num_clusters);
42089 if (!status)
42090- atomic_inc(&osb->alloc_stats.local_data);
42091+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
42092 } else {
42093 if (min_clusters > (osb->bitmap_cpg - 1)) {
42094 /* The only paths asking for contiguousness
42095@@ -1858,7 +1858,7 @@ int __ocfs2_claim_clusters(struct ocfs2_
42096 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
42097 bg_blkno,
42098 bg_bit_off);
42099- atomic_inc(&osb->alloc_stats.bitmap_data);
42100+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
42101 }
42102 }
42103 if (status < 0) {
42104diff -urNp linux-2.6.32.42/fs/ocfs2/super.c linux-2.6.32.42/fs/ocfs2/super.c
42105--- linux-2.6.32.42/fs/ocfs2/super.c 2011-03-27 14:31:47.000000000 -0400
42106+++ linux-2.6.32.42/fs/ocfs2/super.c 2011-04-17 15:56:46.000000000 -0400
42107@@ -284,11 +284,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
42108 "%10s => GlobalAllocs: %d LocalAllocs: %d "
42109 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
42110 "Stats",
42111- atomic_read(&osb->alloc_stats.bitmap_data),
42112- atomic_read(&osb->alloc_stats.local_data),
42113- atomic_read(&osb->alloc_stats.bg_allocs),
42114- atomic_read(&osb->alloc_stats.moves),
42115- atomic_read(&osb->alloc_stats.bg_extends));
42116+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
42117+ atomic_read_unchecked(&osb->alloc_stats.local_data),
42118+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
42119+ atomic_read_unchecked(&osb->alloc_stats.moves),
42120+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
42121
42122 out += snprintf(buf + out, len - out,
42123 "%10s => State: %u Descriptor: %llu Size: %u bits "
42124@@ -2002,11 +2002,11 @@ static int ocfs2_initialize_super(struct
42125 spin_lock_init(&osb->osb_xattr_lock);
42126 ocfs2_init_inode_steal_slot(osb);
42127
42128- atomic_set(&osb->alloc_stats.moves, 0);
42129- atomic_set(&osb->alloc_stats.local_data, 0);
42130- atomic_set(&osb->alloc_stats.bitmap_data, 0);
42131- atomic_set(&osb->alloc_stats.bg_allocs, 0);
42132- atomic_set(&osb->alloc_stats.bg_extends, 0);
42133+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
42134+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
42135+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
42136+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
42137+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
42138
42139 /* Copy the blockcheck stats from the superblock probe */
42140 osb->osb_ecc_stats = *stats;
42141diff -urNp linux-2.6.32.42/fs/open.c linux-2.6.32.42/fs/open.c
42142--- linux-2.6.32.42/fs/open.c 2011-03-27 14:31:47.000000000 -0400
42143+++ linux-2.6.32.42/fs/open.c 2011-04-17 15:56:46.000000000 -0400
42144@@ -275,6 +275,10 @@ static long do_sys_truncate(const char _
42145 error = locks_verify_truncate(inode, NULL, length);
42146 if (!error)
42147 error = security_path_truncate(&path, length, 0);
42148+
42149+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
42150+ error = -EACCES;
42151+
42152 if (!error) {
42153 vfs_dq_init(inode);
42154 error = do_truncate(path.dentry, length, 0, NULL);
42155@@ -511,6 +515,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
42156 if (__mnt_is_readonly(path.mnt))
42157 res = -EROFS;
42158
42159+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
42160+ res = -EACCES;
42161+
42162 out_path_release:
42163 path_put(&path);
42164 out:
42165@@ -537,6 +544,8 @@ SYSCALL_DEFINE1(chdir, const char __user
42166 if (error)
42167 goto dput_and_out;
42168
42169+ gr_log_chdir(path.dentry, path.mnt);
42170+
42171 set_fs_pwd(current->fs, &path);
42172
42173 dput_and_out:
42174@@ -563,6 +572,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
42175 goto out_putf;
42176
42177 error = inode_permission(inode, MAY_EXEC | MAY_ACCESS);
42178+
42179+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
42180+ error = -EPERM;
42181+
42182+ if (!error)
42183+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
42184+
42185 if (!error)
42186 set_fs_pwd(current->fs, &file->f_path);
42187 out_putf:
42188@@ -588,7 +604,18 @@ SYSCALL_DEFINE1(chroot, const char __use
42189 if (!capable(CAP_SYS_CHROOT))
42190 goto dput_and_out;
42191
42192+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
42193+ goto dput_and_out;
42194+
42195+ if (gr_handle_chroot_caps(&path)) {
42196+ error = -ENOMEM;
42197+ goto dput_and_out;
42198+ }
42199+
42200 set_fs_root(current->fs, &path);
42201+
42202+ gr_handle_chroot_chdir(&path);
42203+
42204 error = 0;
42205 dput_and_out:
42206 path_put(&path);
42207@@ -616,12 +643,27 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
42208 err = mnt_want_write_file(file);
42209 if (err)
42210 goto out_putf;
42211+
42212 mutex_lock(&inode->i_mutex);
42213+
42214+ if (!gr_acl_handle_fchmod(dentry, file->f_path.mnt, mode)) {
42215+ err = -EACCES;
42216+ goto out_unlock;
42217+ }
42218+
42219 if (mode == (mode_t) -1)
42220 mode = inode->i_mode;
42221+
42222+ if (gr_handle_chroot_chmod(dentry, file->f_path.mnt, mode)) {
42223+ err = -EPERM;
42224+ goto out_unlock;
42225+ }
42226+
42227 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
42228 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
42229 err = notify_change(dentry, &newattrs);
42230+
42231+out_unlock:
42232 mutex_unlock(&inode->i_mutex);
42233 mnt_drop_write(file->f_path.mnt);
42234 out_putf:
42235@@ -645,12 +687,27 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
42236 error = mnt_want_write(path.mnt);
42237 if (error)
42238 goto dput_and_out;
42239+
42240 mutex_lock(&inode->i_mutex);
42241+
42242+ if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
42243+ error = -EACCES;
42244+ goto out_unlock;
42245+ }
42246+
42247 if (mode == (mode_t) -1)
42248 mode = inode->i_mode;
42249+
42250+ if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
42251+ error = -EACCES;
42252+ goto out_unlock;
42253+ }
42254+
42255 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
42256 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
42257 error = notify_change(path.dentry, &newattrs);
42258+
42259+out_unlock:
42260 mutex_unlock(&inode->i_mutex);
42261 mnt_drop_write(path.mnt);
42262 dput_and_out:
42263@@ -664,12 +721,15 @@ SYSCALL_DEFINE2(chmod, const char __user
42264 return sys_fchmodat(AT_FDCWD, filename, mode);
42265 }
42266
42267-static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
42268+static int chown_common(struct dentry * dentry, uid_t user, gid_t group, struct vfsmount *mnt)
42269 {
42270 struct inode *inode = dentry->d_inode;
42271 int error;
42272 struct iattr newattrs;
42273
42274+ if (!gr_acl_handle_chown(dentry, mnt))
42275+ return -EACCES;
42276+
42277 newattrs.ia_valid = ATTR_CTIME;
42278 if (user != (uid_t) -1) {
42279 newattrs.ia_valid |= ATTR_UID;
42280@@ -700,7 +760,7 @@ SYSCALL_DEFINE3(chown, const char __user
42281 error = mnt_want_write(path.mnt);
42282 if (error)
42283 goto out_release;
42284- error = chown_common(path.dentry, user, group);
42285+ error = chown_common(path.dentry, user, group, path.mnt);
42286 mnt_drop_write(path.mnt);
42287 out_release:
42288 path_put(&path);
42289@@ -725,7 +785,7 @@ SYSCALL_DEFINE5(fchownat, int, dfd, cons
42290 error = mnt_want_write(path.mnt);
42291 if (error)
42292 goto out_release;
42293- error = chown_common(path.dentry, user, group);
42294+ error = chown_common(path.dentry, user, group, path.mnt);
42295 mnt_drop_write(path.mnt);
42296 out_release:
42297 path_put(&path);
42298@@ -744,7 +804,7 @@ SYSCALL_DEFINE3(lchown, const char __use
42299 error = mnt_want_write(path.mnt);
42300 if (error)
42301 goto out_release;
42302- error = chown_common(path.dentry, user, group);
42303+ error = chown_common(path.dentry, user, group, path.mnt);
42304 mnt_drop_write(path.mnt);
42305 out_release:
42306 path_put(&path);
42307@@ -767,7 +827,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd
42308 goto out_fput;
42309 dentry = file->f_path.dentry;
42310 audit_inode(NULL, dentry);
42311- error = chown_common(dentry, user, group);
42312+ error = chown_common(dentry, user, group, file->f_path.mnt);
42313 mnt_drop_write(file->f_path.mnt);
42314 out_fput:
42315 fput(file);
42316@@ -1036,7 +1096,10 @@ long do_sys_open(int dfd, const char __u
42317 if (!IS_ERR(tmp)) {
42318 fd = get_unused_fd_flags(flags);
42319 if (fd >= 0) {
42320- struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
42321+ struct file *f;
42322+ /* don't allow to be set by userland */
42323+ flags &= ~FMODE_GREXEC;
42324+ f = do_filp_open(dfd, tmp, flags, mode, 0);
42325 if (IS_ERR(f)) {
42326 put_unused_fd(fd);
42327 fd = PTR_ERR(f);
42328diff -urNp linux-2.6.32.42/fs/partitions/ldm.c linux-2.6.32.42/fs/partitions/ldm.c
42329--- linux-2.6.32.42/fs/partitions/ldm.c 2011-06-25 12:55:34.000000000 -0400
42330+++ linux-2.6.32.42/fs/partitions/ldm.c 2011-06-25 12:56:37.000000000 -0400
42331@@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data
42332 ldm_error ("A VBLK claims to have %d parts.", num);
42333 return false;
42334 }
42335+
42336 if (rec >= num) {
42337 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
42338 return false;
42339@@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data
42340 goto found;
42341 }
42342
42343- f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
42344+ f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
42345 if (!f) {
42346 ldm_crit ("Out of memory.");
42347 return false;
42348diff -urNp linux-2.6.32.42/fs/partitions/mac.c linux-2.6.32.42/fs/partitions/mac.c
42349--- linux-2.6.32.42/fs/partitions/mac.c 2011-03-27 14:31:47.000000000 -0400
42350+++ linux-2.6.32.42/fs/partitions/mac.c 2011-04-17 15:56:46.000000000 -0400
42351@@ -59,11 +59,11 @@ int mac_partition(struct parsed_partitio
42352 return 0; /* not a MacOS disk */
42353 }
42354 blocks_in_map = be32_to_cpu(part->map_count);
42355+ printk(" [mac]");
42356 if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
42357 put_dev_sector(sect);
42358 return 0;
42359 }
42360- printk(" [mac]");
42361 for (slot = 1; slot <= blocks_in_map; ++slot) {
42362 int pos = slot * secsize;
42363 put_dev_sector(sect);
42364diff -urNp linux-2.6.32.42/fs/pipe.c linux-2.6.32.42/fs/pipe.c
42365--- linux-2.6.32.42/fs/pipe.c 2011-03-27 14:31:47.000000000 -0400
42366+++ linux-2.6.32.42/fs/pipe.c 2011-04-23 13:37:17.000000000 -0400
42367@@ -401,9 +401,9 @@ redo:
42368 }
42369 if (bufs) /* More to do? */
42370 continue;
42371- if (!pipe->writers)
42372+ if (!atomic_read(&pipe->writers))
42373 break;
42374- if (!pipe->waiting_writers) {
42375+ if (!atomic_read(&pipe->waiting_writers)) {
42376 /* syscall merging: Usually we must not sleep
42377 * if O_NONBLOCK is set, or if we got some data.
42378 * But if a writer sleeps in kernel space, then
42379@@ -462,7 +462,7 @@ pipe_write(struct kiocb *iocb, const str
42380 mutex_lock(&inode->i_mutex);
42381 pipe = inode->i_pipe;
42382
42383- if (!pipe->readers) {
42384+ if (!atomic_read(&pipe->readers)) {
42385 send_sig(SIGPIPE, current, 0);
42386 ret = -EPIPE;
42387 goto out;
42388@@ -511,7 +511,7 @@ redo1:
42389 for (;;) {
42390 int bufs;
42391
42392- if (!pipe->readers) {
42393+ if (!atomic_read(&pipe->readers)) {
42394 send_sig(SIGPIPE, current, 0);
42395 if (!ret)
42396 ret = -EPIPE;
42397@@ -597,9 +597,9 @@ redo2:
42398 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
42399 do_wakeup = 0;
42400 }
42401- pipe->waiting_writers++;
42402+ atomic_inc(&pipe->waiting_writers);
42403 pipe_wait(pipe);
42404- pipe->waiting_writers--;
42405+ atomic_dec(&pipe->waiting_writers);
42406 }
42407 out:
42408 mutex_unlock(&inode->i_mutex);
42409@@ -666,7 +666,7 @@ pipe_poll(struct file *filp, poll_table
42410 mask = 0;
42411 if (filp->f_mode & FMODE_READ) {
42412 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
42413- if (!pipe->writers && filp->f_version != pipe->w_counter)
42414+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
42415 mask |= POLLHUP;
42416 }
42417
42418@@ -676,7 +676,7 @@ pipe_poll(struct file *filp, poll_table
42419 * Most Unices do not set POLLERR for FIFOs but on Linux they
42420 * behave exactly like pipes for poll().
42421 */
42422- if (!pipe->readers)
42423+ if (!atomic_read(&pipe->readers))
42424 mask |= POLLERR;
42425 }
42426
42427@@ -690,10 +690,10 @@ pipe_release(struct inode *inode, int de
42428
42429 mutex_lock(&inode->i_mutex);
42430 pipe = inode->i_pipe;
42431- pipe->readers -= decr;
42432- pipe->writers -= decw;
42433+ atomic_sub(decr, &pipe->readers);
42434+ atomic_sub(decw, &pipe->writers);
42435
42436- if (!pipe->readers && !pipe->writers) {
42437+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
42438 free_pipe_info(inode);
42439 } else {
42440 wake_up_interruptible_sync(&pipe->wait);
42441@@ -783,7 +783,7 @@ pipe_read_open(struct inode *inode, stru
42442
42443 if (inode->i_pipe) {
42444 ret = 0;
42445- inode->i_pipe->readers++;
42446+ atomic_inc(&inode->i_pipe->readers);
42447 }
42448
42449 mutex_unlock(&inode->i_mutex);
42450@@ -800,7 +800,7 @@ pipe_write_open(struct inode *inode, str
42451
42452 if (inode->i_pipe) {
42453 ret = 0;
42454- inode->i_pipe->writers++;
42455+ atomic_inc(&inode->i_pipe->writers);
42456 }
42457
42458 mutex_unlock(&inode->i_mutex);
42459@@ -818,9 +818,9 @@ pipe_rdwr_open(struct inode *inode, stru
42460 if (inode->i_pipe) {
42461 ret = 0;
42462 if (filp->f_mode & FMODE_READ)
42463- inode->i_pipe->readers++;
42464+ atomic_inc(&inode->i_pipe->readers);
42465 if (filp->f_mode & FMODE_WRITE)
42466- inode->i_pipe->writers++;
42467+ atomic_inc(&inode->i_pipe->writers);
42468 }
42469
42470 mutex_unlock(&inode->i_mutex);
42471@@ -905,7 +905,7 @@ void free_pipe_info(struct inode *inode)
42472 inode->i_pipe = NULL;
42473 }
42474
42475-static struct vfsmount *pipe_mnt __read_mostly;
42476+struct vfsmount *pipe_mnt __read_mostly;
42477 static int pipefs_delete_dentry(struct dentry *dentry)
42478 {
42479 /*
42480@@ -945,7 +945,8 @@ static struct inode * get_pipe_inode(voi
42481 goto fail_iput;
42482 inode->i_pipe = pipe;
42483
42484- pipe->readers = pipe->writers = 1;
42485+ atomic_set(&pipe->readers, 1);
42486+ atomic_set(&pipe->writers, 1);
42487 inode->i_fop = &rdwr_pipefifo_fops;
42488
42489 /*
42490diff -urNp linux-2.6.32.42/fs/proc/array.c linux-2.6.32.42/fs/proc/array.c
42491--- linux-2.6.32.42/fs/proc/array.c 2011-03-27 14:31:47.000000000 -0400
42492+++ linux-2.6.32.42/fs/proc/array.c 2011-05-16 21:46:57.000000000 -0400
42493@@ -60,6 +60,7 @@
42494 #include <linux/tty.h>
42495 #include <linux/string.h>
42496 #include <linux/mman.h>
42497+#include <linux/grsecurity.h>
42498 #include <linux/proc_fs.h>
42499 #include <linux/ioport.h>
42500 #include <linux/uaccess.h>
42501@@ -321,6 +322,21 @@ static inline void task_context_switch_c
42502 p->nivcsw);
42503 }
42504
42505+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
42506+static inline void task_pax(struct seq_file *m, struct task_struct *p)
42507+{
42508+ if (p->mm)
42509+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
42510+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
42511+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
42512+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
42513+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
42514+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
42515+ else
42516+ seq_printf(m, "PaX:\t-----\n");
42517+}
42518+#endif
42519+
42520 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
42521 struct pid *pid, struct task_struct *task)
42522 {
42523@@ -337,9 +353,24 @@ int proc_pid_status(struct seq_file *m,
42524 task_cap(m, task);
42525 cpuset_task_status_allowed(m, task);
42526 task_context_switch_counts(m, task);
42527+
42528+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
42529+ task_pax(m, task);
42530+#endif
42531+
42532+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
42533+ task_grsec_rbac(m, task);
42534+#endif
42535+
42536 return 0;
42537 }
42538
42539+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42540+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
42541+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
42542+ _mm->pax_flags & MF_PAX_SEGMEXEC))
42543+#endif
42544+
42545 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
42546 struct pid *pid, struct task_struct *task, int whole)
42547 {
42548@@ -358,9 +389,11 @@ static int do_task_stat(struct seq_file
42549 cputime_t cutime, cstime, utime, stime;
42550 cputime_t cgtime, gtime;
42551 unsigned long rsslim = 0;
42552- char tcomm[sizeof(task->comm)];
42553+ char tcomm[sizeof(task->comm)] = { 0 };
42554 unsigned long flags;
42555
42556+ pax_track_stack();
42557+
42558 state = *get_task_state(task);
42559 vsize = eip = esp = 0;
42560 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
42561@@ -433,6 +466,19 @@ static int do_task_stat(struct seq_file
42562 gtime = task_gtime(task);
42563 }
42564
42565+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42566+ if (PAX_RAND_FLAGS(mm)) {
42567+ eip = 0;
42568+ esp = 0;
42569+ wchan = 0;
42570+ }
42571+#endif
42572+#ifdef CONFIG_GRKERNSEC_HIDESYM
42573+ wchan = 0;
42574+ eip =0;
42575+ esp =0;
42576+#endif
42577+
42578 /* scale priority and nice values from timeslices to -20..20 */
42579 /* to make it look like a "normal" Unix priority/nice value */
42580 priority = task_prio(task);
42581@@ -473,9 +519,15 @@ static int do_task_stat(struct seq_file
42582 vsize,
42583 mm ? get_mm_rss(mm) : 0,
42584 rsslim,
42585+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42586+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
42587+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
42588+ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
42589+#else
42590 mm ? (permitted ? mm->start_code : 1) : 0,
42591 mm ? (permitted ? mm->end_code : 1) : 0,
42592 (permitted && mm) ? mm->start_stack : 0,
42593+#endif
42594 esp,
42595 eip,
42596 /* The signal information here is obsolete.
42597@@ -528,3 +580,18 @@ int proc_pid_statm(struct seq_file *m, s
42598
42599 return 0;
42600 }
42601+
42602+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
42603+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
42604+{
42605+ u32 curr_ip = 0;
42606+ unsigned long flags;
42607+
42608+ if (lock_task_sighand(task, &flags)) {
42609+ curr_ip = task->signal->curr_ip;
42610+ unlock_task_sighand(task, &flags);
42611+ }
42612+
42613+ return sprintf(buffer, "%pI4\n", &curr_ip);
42614+}
42615+#endif
42616diff -urNp linux-2.6.32.42/fs/proc/base.c linux-2.6.32.42/fs/proc/base.c
42617--- linux-2.6.32.42/fs/proc/base.c 2011-04-22 19:16:29.000000000 -0400
42618+++ linux-2.6.32.42/fs/proc/base.c 2011-06-04 21:20:50.000000000 -0400
42619@@ -102,6 +102,22 @@ struct pid_entry {
42620 union proc_op op;
42621 };
42622
42623+struct getdents_callback {
42624+ struct linux_dirent __user * current_dir;
42625+ struct linux_dirent __user * previous;
42626+ struct file * file;
42627+ int count;
42628+ int error;
42629+};
42630+
42631+static int gr_fake_filldir(void * __buf, const char *name, int namlen,
42632+ loff_t offset, u64 ino, unsigned int d_type)
42633+{
42634+ struct getdents_callback * buf = (struct getdents_callback *) __buf;
42635+ buf->error = -EINVAL;
42636+ return 0;
42637+}
42638+
42639 #define NOD(NAME, MODE, IOP, FOP, OP) { \
42640 .name = (NAME), \
42641 .len = sizeof(NAME) - 1, \
42642@@ -213,6 +229,9 @@ static int check_mem_permission(struct t
42643 if (task == current)
42644 return 0;
42645
42646+ if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
42647+ return -EPERM;
42648+
42649 /*
42650 * If current is actively ptrace'ing, and would also be
42651 * permitted to freshly attach with ptrace now, permit it.
42652@@ -260,6 +279,9 @@ static int proc_pid_cmdline(struct task_
42653 if (!mm->arg_end)
42654 goto out_mm; /* Shh! No looking before we're done */
42655
42656+ if (gr_acl_handle_procpidmem(task))
42657+ goto out_mm;
42658+
42659 len = mm->arg_end - mm->arg_start;
42660
42661 if (len > PAGE_SIZE)
42662@@ -287,12 +309,28 @@ out:
42663 return res;
42664 }
42665
42666+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42667+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
42668+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
42669+ _mm->pax_flags & MF_PAX_SEGMEXEC))
42670+#endif
42671+
42672 static int proc_pid_auxv(struct task_struct *task, char *buffer)
42673 {
42674 int res = 0;
42675 struct mm_struct *mm = get_task_mm(task);
42676 if (mm) {
42677 unsigned int nwords = 0;
42678+
42679+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42680+ /* allow if we're currently ptracing this task */
42681+ if (PAX_RAND_FLAGS(mm) &&
42682+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
42683+ mmput(mm);
42684+ return res;
42685+ }
42686+#endif
42687+
42688 do {
42689 nwords += 2;
42690 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
42691@@ -306,7 +344,7 @@ static int proc_pid_auxv(struct task_str
42692 }
42693
42694
42695-#ifdef CONFIG_KALLSYMS
42696+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42697 /*
42698 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
42699 * Returns the resolved symbol. If that fails, simply return the address.
42700@@ -328,7 +366,7 @@ static int proc_pid_wchan(struct task_st
42701 }
42702 #endif /* CONFIG_KALLSYMS */
42703
42704-#ifdef CONFIG_STACKTRACE
42705+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42706
42707 #define MAX_STACK_TRACE_DEPTH 64
42708
42709@@ -522,7 +560,7 @@ static int proc_pid_limits(struct task_s
42710 return count;
42711 }
42712
42713-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
42714+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
42715 static int proc_pid_syscall(struct task_struct *task, char *buffer)
42716 {
42717 long nr;
42718@@ -547,7 +585,7 @@ static int proc_pid_syscall(struct task_
42719 /************************************************************************/
42720
42721 /* permission checks */
42722-static int proc_fd_access_allowed(struct inode *inode)
42723+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
42724 {
42725 struct task_struct *task;
42726 int allowed = 0;
42727@@ -557,7 +595,10 @@ static int proc_fd_access_allowed(struct
42728 */
42729 task = get_proc_task(inode);
42730 if (task) {
42731- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
42732+ if (log)
42733+ allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
42734+ else
42735+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
42736 put_task_struct(task);
42737 }
42738 return allowed;
42739@@ -936,6 +977,9 @@ static ssize_t environ_read(struct file
42740 if (!task)
42741 goto out_no_task;
42742
42743+ if (gr_acl_handle_procpidmem(task))
42744+ goto out;
42745+
42746 if (!ptrace_may_access(task, PTRACE_MODE_READ))
42747 goto out;
42748
42749@@ -1350,7 +1394,7 @@ static void *proc_pid_follow_link(struct
42750 path_put(&nd->path);
42751
42752 /* Are we allowed to snoop on the tasks file descriptors? */
42753- if (!proc_fd_access_allowed(inode))
42754+ if (!proc_fd_access_allowed(inode,0))
42755 goto out;
42756
42757 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
42758@@ -1390,8 +1434,18 @@ static int proc_pid_readlink(struct dent
42759 struct path path;
42760
42761 /* Are we allowed to snoop on the tasks file descriptors? */
42762- if (!proc_fd_access_allowed(inode))
42763- goto out;
42764+ /* logging this is needed for learning on chromium to work properly,
42765+ but we don't want to flood the logs from 'ps' which does a readlink
42766+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
42767+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
42768+ */
42769+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
42770+ if (!proc_fd_access_allowed(inode,0))
42771+ goto out;
42772+ } else {
42773+ if (!proc_fd_access_allowed(inode,1))
42774+ goto out;
42775+ }
42776
42777 error = PROC_I(inode)->op.proc_get_link(inode, &path);
42778 if (error)
42779@@ -1456,7 +1510,11 @@ static struct inode *proc_pid_make_inode
42780 rcu_read_lock();
42781 cred = __task_cred(task);
42782 inode->i_uid = cred->euid;
42783+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42784+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
42785+#else
42786 inode->i_gid = cred->egid;
42787+#endif
42788 rcu_read_unlock();
42789 }
42790 security_task_to_inode(task, inode);
42791@@ -1474,6 +1532,9 @@ static int pid_getattr(struct vfsmount *
42792 struct inode *inode = dentry->d_inode;
42793 struct task_struct *task;
42794 const struct cred *cred;
42795+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42796+ const struct cred *tmpcred = current_cred();
42797+#endif
42798
42799 generic_fillattr(inode, stat);
42800
42801@@ -1481,13 +1542,41 @@ static int pid_getattr(struct vfsmount *
42802 stat->uid = 0;
42803 stat->gid = 0;
42804 task = pid_task(proc_pid(inode), PIDTYPE_PID);
42805+
42806+ if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
42807+ rcu_read_unlock();
42808+ return -ENOENT;
42809+ }
42810+
42811 if (task) {
42812+ cred = __task_cred(task);
42813+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42814+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
42815+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42816+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
42817+#endif
42818+ ) {
42819+#endif
42820 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
42821+#ifdef CONFIG_GRKERNSEC_PROC_USER
42822+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
42823+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42824+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
42825+#endif
42826 task_dumpable(task)) {
42827- cred = __task_cred(task);
42828 stat->uid = cred->euid;
42829+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42830+ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
42831+#else
42832 stat->gid = cred->egid;
42833+#endif
42834 }
42835+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42836+ } else {
42837+ rcu_read_unlock();
42838+ return -ENOENT;
42839+ }
42840+#endif
42841 }
42842 rcu_read_unlock();
42843 return 0;
42844@@ -1518,11 +1607,20 @@ static int pid_revalidate(struct dentry
42845
42846 if (task) {
42847 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
42848+#ifdef CONFIG_GRKERNSEC_PROC_USER
42849+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
42850+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42851+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
42852+#endif
42853 task_dumpable(task)) {
42854 rcu_read_lock();
42855 cred = __task_cred(task);
42856 inode->i_uid = cred->euid;
42857+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42858+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
42859+#else
42860 inode->i_gid = cred->egid;
42861+#endif
42862 rcu_read_unlock();
42863 } else {
42864 inode->i_uid = 0;
42865@@ -1643,7 +1741,8 @@ static int proc_fd_info(struct inode *in
42866 int fd = proc_fd(inode);
42867
42868 if (task) {
42869- files = get_files_struct(task);
42870+ if (!gr_acl_handle_procpidmem(task))
42871+ files = get_files_struct(task);
42872 put_task_struct(task);
42873 }
42874 if (files) {
42875@@ -1895,12 +1994,22 @@ static const struct file_operations proc
42876 static int proc_fd_permission(struct inode *inode, int mask)
42877 {
42878 int rv;
42879+ struct task_struct *task;
42880
42881 rv = generic_permission(inode, mask, NULL);
42882- if (rv == 0)
42883- return 0;
42884+
42885 if (task_pid(current) == proc_pid(inode))
42886 rv = 0;
42887+
42888+ task = get_proc_task(inode);
42889+ if (task == NULL)
42890+ return rv;
42891+
42892+ if (gr_acl_handle_procpidmem(task))
42893+ rv = -EACCES;
42894+
42895+ put_task_struct(task);
42896+
42897 return rv;
42898 }
42899
42900@@ -2009,6 +2118,9 @@ static struct dentry *proc_pident_lookup
42901 if (!task)
42902 goto out_no_task;
42903
42904+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
42905+ goto out;
42906+
42907 /*
42908 * Yes, it does not scale. And it should not. Don't add
42909 * new entries into /proc/<tgid>/ without very good reasons.
42910@@ -2053,6 +2165,9 @@ static int proc_pident_readdir(struct fi
42911 if (!task)
42912 goto out_no_task;
42913
42914+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
42915+ goto out;
42916+
42917 ret = 0;
42918 i = filp->f_pos;
42919 switch (i) {
42920@@ -2320,7 +2435,7 @@ static void *proc_self_follow_link(struc
42921 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
42922 void *cookie)
42923 {
42924- char *s = nd_get_link(nd);
42925+ const char *s = nd_get_link(nd);
42926 if (!IS_ERR(s))
42927 __putname(s);
42928 }
42929@@ -2519,7 +2634,7 @@ static const struct pid_entry tgid_base_
42930 #ifdef CONFIG_SCHED_DEBUG
42931 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
42932 #endif
42933-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
42934+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
42935 INF("syscall", S_IRUSR, proc_pid_syscall),
42936 #endif
42937 INF("cmdline", S_IRUGO, proc_pid_cmdline),
42938@@ -2544,10 +2659,10 @@ static const struct pid_entry tgid_base_
42939 #ifdef CONFIG_SECURITY
42940 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
42941 #endif
42942-#ifdef CONFIG_KALLSYMS
42943+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42944 INF("wchan", S_IRUGO, proc_pid_wchan),
42945 #endif
42946-#ifdef CONFIG_STACKTRACE
42947+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42948 ONE("stack", S_IRUSR, proc_pid_stack),
42949 #endif
42950 #ifdef CONFIG_SCHEDSTATS
42951@@ -2577,6 +2692,9 @@ static const struct pid_entry tgid_base_
42952 #ifdef CONFIG_TASK_IO_ACCOUNTING
42953 INF("io", S_IRUGO, proc_tgid_io_accounting),
42954 #endif
42955+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
42956+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
42957+#endif
42958 };
42959
42960 static int proc_tgid_base_readdir(struct file * filp,
42961@@ -2701,7 +2819,14 @@ static struct dentry *proc_pid_instantia
42962 if (!inode)
42963 goto out;
42964
42965+#ifdef CONFIG_GRKERNSEC_PROC_USER
42966+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
42967+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42968+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
42969+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
42970+#else
42971 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
42972+#endif
42973 inode->i_op = &proc_tgid_base_inode_operations;
42974 inode->i_fop = &proc_tgid_base_operations;
42975 inode->i_flags|=S_IMMUTABLE;
42976@@ -2743,7 +2868,11 @@ struct dentry *proc_pid_lookup(struct in
42977 if (!task)
42978 goto out;
42979
42980+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
42981+ goto out_put_task;
42982+
42983 result = proc_pid_instantiate(dir, dentry, task, NULL);
42984+out_put_task:
42985 put_task_struct(task);
42986 out:
42987 return result;
42988@@ -2808,6 +2937,11 @@ int proc_pid_readdir(struct file * filp,
42989 {
42990 unsigned int nr;
42991 struct task_struct *reaper;
42992+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42993+ const struct cred *tmpcred = current_cred();
42994+ const struct cred *itercred;
42995+#endif
42996+ filldir_t __filldir = filldir;
42997 struct tgid_iter iter;
42998 struct pid_namespace *ns;
42999
43000@@ -2831,8 +2965,27 @@ int proc_pid_readdir(struct file * filp,
43001 for (iter = next_tgid(ns, iter);
43002 iter.task;
43003 iter.tgid += 1, iter = next_tgid(ns, iter)) {
43004+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43005+ rcu_read_lock();
43006+ itercred = __task_cred(iter.task);
43007+#endif
43008+ if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
43009+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43010+ || (tmpcred->uid && (itercred->uid != tmpcred->uid)
43011+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
43012+ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
43013+#endif
43014+ )
43015+#endif
43016+ )
43017+ __filldir = &gr_fake_filldir;
43018+ else
43019+ __filldir = filldir;
43020+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43021+ rcu_read_unlock();
43022+#endif
43023 filp->f_pos = iter.tgid + TGID_OFFSET;
43024- if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
43025+ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
43026 put_task_struct(iter.task);
43027 goto out;
43028 }
43029@@ -2858,7 +3011,7 @@ static const struct pid_entry tid_base_s
43030 #ifdef CONFIG_SCHED_DEBUG
43031 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
43032 #endif
43033-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
43034+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
43035 INF("syscall", S_IRUSR, proc_pid_syscall),
43036 #endif
43037 INF("cmdline", S_IRUGO, proc_pid_cmdline),
43038@@ -2882,10 +3035,10 @@ static const struct pid_entry tid_base_s
43039 #ifdef CONFIG_SECURITY
43040 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
43041 #endif
43042-#ifdef CONFIG_KALLSYMS
43043+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
43044 INF("wchan", S_IRUGO, proc_pid_wchan),
43045 #endif
43046-#ifdef CONFIG_STACKTRACE
43047+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
43048 ONE("stack", S_IRUSR, proc_pid_stack),
43049 #endif
43050 #ifdef CONFIG_SCHEDSTATS
43051diff -urNp linux-2.6.32.42/fs/proc/cmdline.c linux-2.6.32.42/fs/proc/cmdline.c
43052--- linux-2.6.32.42/fs/proc/cmdline.c 2011-03-27 14:31:47.000000000 -0400
43053+++ linux-2.6.32.42/fs/proc/cmdline.c 2011-04-17 15:56:46.000000000 -0400
43054@@ -23,7 +23,11 @@ static const struct file_operations cmdl
43055
43056 static int __init proc_cmdline_init(void)
43057 {
43058+#ifdef CONFIG_GRKERNSEC_PROC_ADD
43059+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
43060+#else
43061 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
43062+#endif
43063 return 0;
43064 }
43065 module_init(proc_cmdline_init);
43066diff -urNp linux-2.6.32.42/fs/proc/devices.c linux-2.6.32.42/fs/proc/devices.c
43067--- linux-2.6.32.42/fs/proc/devices.c 2011-03-27 14:31:47.000000000 -0400
43068+++ linux-2.6.32.42/fs/proc/devices.c 2011-04-17 15:56:46.000000000 -0400
43069@@ -64,7 +64,11 @@ static const struct file_operations proc
43070
43071 static int __init proc_devices_init(void)
43072 {
43073+#ifdef CONFIG_GRKERNSEC_PROC_ADD
43074+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
43075+#else
43076 proc_create("devices", 0, NULL, &proc_devinfo_operations);
43077+#endif
43078 return 0;
43079 }
43080 module_init(proc_devices_init);
43081diff -urNp linux-2.6.32.42/fs/proc/inode.c linux-2.6.32.42/fs/proc/inode.c
43082--- linux-2.6.32.42/fs/proc/inode.c 2011-03-27 14:31:47.000000000 -0400
43083+++ linux-2.6.32.42/fs/proc/inode.c 2011-04-17 15:56:46.000000000 -0400
43084@@ -457,7 +457,11 @@ struct inode *proc_get_inode(struct supe
43085 if (de->mode) {
43086 inode->i_mode = de->mode;
43087 inode->i_uid = de->uid;
43088+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
43089+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
43090+#else
43091 inode->i_gid = de->gid;
43092+#endif
43093 }
43094 if (de->size)
43095 inode->i_size = de->size;
43096diff -urNp linux-2.6.32.42/fs/proc/internal.h linux-2.6.32.42/fs/proc/internal.h
43097--- linux-2.6.32.42/fs/proc/internal.h 2011-03-27 14:31:47.000000000 -0400
43098+++ linux-2.6.32.42/fs/proc/internal.h 2011-04-17 15:56:46.000000000 -0400
43099@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
43100 struct pid *pid, struct task_struct *task);
43101 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
43102 struct pid *pid, struct task_struct *task);
43103+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
43104+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
43105+#endif
43106 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
43107
43108 extern const struct file_operations proc_maps_operations;
43109diff -urNp linux-2.6.32.42/fs/proc/Kconfig linux-2.6.32.42/fs/proc/Kconfig
43110--- linux-2.6.32.42/fs/proc/Kconfig 2011-03-27 14:31:47.000000000 -0400
43111+++ linux-2.6.32.42/fs/proc/Kconfig 2011-04-17 15:56:46.000000000 -0400
43112@@ -30,12 +30,12 @@ config PROC_FS
43113
43114 config PROC_KCORE
43115 bool "/proc/kcore support" if !ARM
43116- depends on PROC_FS && MMU
43117+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
43118
43119 config PROC_VMCORE
43120 bool "/proc/vmcore support (EXPERIMENTAL)"
43121- depends on PROC_FS && CRASH_DUMP
43122- default y
43123+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
43124+ default n
43125 help
43126 Exports the dump image of crashed kernel in ELF format.
43127
43128@@ -59,8 +59,8 @@ config PROC_SYSCTL
43129 limited in memory.
43130
43131 config PROC_PAGE_MONITOR
43132- default y
43133- depends on PROC_FS && MMU
43134+ default n
43135+ depends on PROC_FS && MMU && !GRKERNSEC
43136 bool "Enable /proc page monitoring" if EMBEDDED
43137 help
43138 Various /proc files exist to monitor process memory utilization:
43139diff -urNp linux-2.6.32.42/fs/proc/kcore.c linux-2.6.32.42/fs/proc/kcore.c
43140--- linux-2.6.32.42/fs/proc/kcore.c 2011-03-27 14:31:47.000000000 -0400
43141+++ linux-2.6.32.42/fs/proc/kcore.c 2011-05-16 21:46:57.000000000 -0400
43142@@ -320,6 +320,8 @@ static void elf_kcore_store_hdr(char *bu
43143 off_t offset = 0;
43144 struct kcore_list *m;
43145
43146+ pax_track_stack();
43147+
43148 /* setup ELF header */
43149 elf = (struct elfhdr *) bufp;
43150 bufp += sizeof(struct elfhdr);
43151@@ -477,9 +479,10 @@ read_kcore(struct file *file, char __use
43152 * the addresses in the elf_phdr on our list.
43153 */
43154 start = kc_offset_to_vaddr(*fpos - elf_buflen);
43155- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
43156+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
43157+ if (tsz > buflen)
43158 tsz = buflen;
43159-
43160+
43161 while (buflen) {
43162 struct kcore_list *m;
43163
43164@@ -508,20 +511,23 @@ read_kcore(struct file *file, char __use
43165 kfree(elf_buf);
43166 } else {
43167 if (kern_addr_valid(start)) {
43168- unsigned long n;
43169+ char *elf_buf;
43170+ mm_segment_t oldfs;
43171
43172- n = copy_to_user(buffer, (char *)start, tsz);
43173- /*
43174- * We cannot distingush between fault on source
43175- * and fault on destination. When this happens
43176- * we clear too and hope it will trigger the
43177- * EFAULT again.
43178- */
43179- if (n) {
43180- if (clear_user(buffer + tsz - n,
43181- n))
43182+ elf_buf = kmalloc(tsz, GFP_KERNEL);
43183+ if (!elf_buf)
43184+ return -ENOMEM;
43185+ oldfs = get_fs();
43186+ set_fs(KERNEL_DS);
43187+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
43188+ set_fs(oldfs);
43189+ if (copy_to_user(buffer, elf_buf, tsz)) {
43190+ kfree(elf_buf);
43191 return -EFAULT;
43192+ }
43193 }
43194+ set_fs(oldfs);
43195+ kfree(elf_buf);
43196 } else {
43197 if (clear_user(buffer, tsz))
43198 return -EFAULT;
43199@@ -541,6 +547,9 @@ read_kcore(struct file *file, char __use
43200
43201 static int open_kcore(struct inode *inode, struct file *filp)
43202 {
43203+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
43204+ return -EPERM;
43205+#endif
43206 if (!capable(CAP_SYS_RAWIO))
43207 return -EPERM;
43208 if (kcore_need_update)
43209diff -urNp linux-2.6.32.42/fs/proc/meminfo.c linux-2.6.32.42/fs/proc/meminfo.c
43210--- linux-2.6.32.42/fs/proc/meminfo.c 2011-03-27 14:31:47.000000000 -0400
43211+++ linux-2.6.32.42/fs/proc/meminfo.c 2011-05-16 21:46:57.000000000 -0400
43212@@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
43213 unsigned long pages[NR_LRU_LISTS];
43214 int lru;
43215
43216+ pax_track_stack();
43217+
43218 /*
43219 * display in kilobytes.
43220 */
43221@@ -149,7 +151,7 @@ static int meminfo_proc_show(struct seq_
43222 vmi.used >> 10,
43223 vmi.largest_chunk >> 10
43224 #ifdef CONFIG_MEMORY_FAILURE
43225- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
43226+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
43227 #endif
43228 );
43229
43230diff -urNp linux-2.6.32.42/fs/proc/nommu.c linux-2.6.32.42/fs/proc/nommu.c
43231--- linux-2.6.32.42/fs/proc/nommu.c 2011-03-27 14:31:47.000000000 -0400
43232+++ linux-2.6.32.42/fs/proc/nommu.c 2011-04-17 15:56:46.000000000 -0400
43233@@ -67,7 +67,7 @@ static int nommu_region_show(struct seq_
43234 if (len < 1)
43235 len = 1;
43236 seq_printf(m, "%*c", len, ' ');
43237- seq_path(m, &file->f_path, "");
43238+ seq_path(m, &file->f_path, "\n\\");
43239 }
43240
43241 seq_putc(m, '\n');
43242diff -urNp linux-2.6.32.42/fs/proc/proc_net.c linux-2.6.32.42/fs/proc/proc_net.c
43243--- linux-2.6.32.42/fs/proc/proc_net.c 2011-03-27 14:31:47.000000000 -0400
43244+++ linux-2.6.32.42/fs/proc/proc_net.c 2011-04-17 15:56:46.000000000 -0400
43245@@ -104,6 +104,17 @@ static struct net *get_proc_task_net(str
43246 struct task_struct *task;
43247 struct nsproxy *ns;
43248 struct net *net = NULL;
43249+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43250+ const struct cred *cred = current_cred();
43251+#endif
43252+
43253+#ifdef CONFIG_GRKERNSEC_PROC_USER
43254+ if (cred->fsuid)
43255+ return net;
43256+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43257+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
43258+ return net;
43259+#endif
43260
43261 rcu_read_lock();
43262 task = pid_task(proc_pid(dir), PIDTYPE_PID);
43263diff -urNp linux-2.6.32.42/fs/proc/proc_sysctl.c linux-2.6.32.42/fs/proc/proc_sysctl.c
43264--- linux-2.6.32.42/fs/proc/proc_sysctl.c 2011-03-27 14:31:47.000000000 -0400
43265+++ linux-2.6.32.42/fs/proc/proc_sysctl.c 2011-04-17 15:56:46.000000000 -0400
43266@@ -7,6 +7,8 @@
43267 #include <linux/security.h>
43268 #include "internal.h"
43269
43270+extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
43271+
43272 static const struct dentry_operations proc_sys_dentry_operations;
43273 static const struct file_operations proc_sys_file_operations;
43274 static const struct inode_operations proc_sys_inode_operations;
43275@@ -109,6 +111,9 @@ static struct dentry *proc_sys_lookup(st
43276 if (!p)
43277 goto out;
43278
43279+ if (gr_handle_sysctl(p, MAY_EXEC))
43280+ goto out;
43281+
43282 err = ERR_PTR(-ENOMEM);
43283 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
43284 if (h)
43285@@ -228,6 +233,9 @@ static int scan(struct ctl_table_header
43286 if (*pos < file->f_pos)
43287 continue;
43288
43289+ if (gr_handle_sysctl(table, 0))
43290+ continue;
43291+
43292 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
43293 if (res)
43294 return res;
43295@@ -344,6 +352,9 @@ static int proc_sys_getattr(struct vfsmo
43296 if (IS_ERR(head))
43297 return PTR_ERR(head);
43298
43299+ if (table && gr_handle_sysctl(table, MAY_EXEC))
43300+ return -ENOENT;
43301+
43302 generic_fillattr(inode, stat);
43303 if (table)
43304 stat->mode = (stat->mode & S_IFMT) | table->mode;
43305diff -urNp linux-2.6.32.42/fs/proc/root.c linux-2.6.32.42/fs/proc/root.c
43306--- linux-2.6.32.42/fs/proc/root.c 2011-03-27 14:31:47.000000000 -0400
43307+++ linux-2.6.32.42/fs/proc/root.c 2011-04-17 15:56:46.000000000 -0400
43308@@ -134,7 +134,15 @@ void __init proc_root_init(void)
43309 #ifdef CONFIG_PROC_DEVICETREE
43310 proc_device_tree_init();
43311 #endif
43312+#ifdef CONFIG_GRKERNSEC_PROC_ADD
43313+#ifdef CONFIG_GRKERNSEC_PROC_USER
43314+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
43315+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43316+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
43317+#endif
43318+#else
43319 proc_mkdir("bus", NULL);
43320+#endif
43321 proc_sys_init();
43322 }
43323
43324diff -urNp linux-2.6.32.42/fs/proc/task_mmu.c linux-2.6.32.42/fs/proc/task_mmu.c
43325--- linux-2.6.32.42/fs/proc/task_mmu.c 2011-03-27 14:31:47.000000000 -0400
43326+++ linux-2.6.32.42/fs/proc/task_mmu.c 2011-04-23 13:38:09.000000000 -0400
43327@@ -46,15 +46,26 @@ void task_mem(struct seq_file *m, struct
43328 "VmStk:\t%8lu kB\n"
43329 "VmExe:\t%8lu kB\n"
43330 "VmLib:\t%8lu kB\n"
43331- "VmPTE:\t%8lu kB\n",
43332- hiwater_vm << (PAGE_SHIFT-10),
43333+ "VmPTE:\t%8lu kB\n"
43334+
43335+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
43336+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
43337+#endif
43338+
43339+ ,hiwater_vm << (PAGE_SHIFT-10),
43340 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
43341 mm->locked_vm << (PAGE_SHIFT-10),
43342 hiwater_rss << (PAGE_SHIFT-10),
43343 total_rss << (PAGE_SHIFT-10),
43344 data << (PAGE_SHIFT-10),
43345 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
43346- (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
43347+ (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10
43348+
43349+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
43350+ , mm->context.user_cs_base, mm->context.user_cs_limit
43351+#endif
43352+
43353+ );
43354 }
43355
43356 unsigned long task_vsize(struct mm_struct *mm)
43357@@ -175,7 +186,8 @@ static void m_stop(struct seq_file *m, v
43358 struct proc_maps_private *priv = m->private;
43359 struct vm_area_struct *vma = v;
43360
43361- vma_stop(priv, vma);
43362+ if (!IS_ERR(vma))
43363+ vma_stop(priv, vma);
43364 if (priv->task)
43365 put_task_struct(priv->task);
43366 }
43367@@ -199,6 +211,12 @@ static int do_maps_open(struct inode *in
43368 return ret;
43369 }
43370
43371+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43372+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
43373+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
43374+ _mm->pax_flags & MF_PAX_SEGMEXEC))
43375+#endif
43376+
43377 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
43378 {
43379 struct mm_struct *mm = vma->vm_mm;
43380@@ -206,7 +224,6 @@ static void show_map_vma(struct seq_file
43381 int flags = vma->vm_flags;
43382 unsigned long ino = 0;
43383 unsigned long long pgoff = 0;
43384- unsigned long start;
43385 dev_t dev = 0;
43386 int len;
43387
43388@@ -217,20 +234,23 @@ static void show_map_vma(struct seq_file
43389 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
43390 }
43391
43392- /* We don't show the stack guard page in /proc/maps */
43393- start = vma->vm_start;
43394- if (vma->vm_flags & VM_GROWSDOWN)
43395- if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
43396- start += PAGE_SIZE;
43397-
43398 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
43399- start,
43400+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43401+ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start,
43402+ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end,
43403+#else
43404+ vma->vm_start,
43405 vma->vm_end,
43406+#endif
43407 flags & VM_READ ? 'r' : '-',
43408 flags & VM_WRITE ? 'w' : '-',
43409 flags & VM_EXEC ? 'x' : '-',
43410 flags & VM_MAYSHARE ? 's' : 'p',
43411+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43412+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
43413+#else
43414 pgoff,
43415+#endif
43416 MAJOR(dev), MINOR(dev), ino, &len);
43417
43418 /*
43419@@ -239,7 +259,7 @@ static void show_map_vma(struct seq_file
43420 */
43421 if (file) {
43422 pad_len_spaces(m, len);
43423- seq_path(m, &file->f_path, "\n");
43424+ seq_path(m, &file->f_path, "\n\\");
43425 } else {
43426 const char *name = arch_vma_name(vma);
43427 if (!name) {
43428@@ -247,8 +267,9 @@ static void show_map_vma(struct seq_file
43429 if (vma->vm_start <= mm->brk &&
43430 vma->vm_end >= mm->start_brk) {
43431 name = "[heap]";
43432- } else if (vma->vm_start <= mm->start_stack &&
43433- vma->vm_end >= mm->start_stack) {
43434+ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
43435+ (vma->vm_start <= mm->start_stack &&
43436+ vma->vm_end >= mm->start_stack)) {
43437 name = "[stack]";
43438 }
43439 } else {
43440@@ -391,9 +412,16 @@ static int show_smap(struct seq_file *m,
43441 };
43442
43443 memset(&mss, 0, sizeof mss);
43444- mss.vma = vma;
43445- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
43446- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
43447+
43448+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43449+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
43450+#endif
43451+ mss.vma = vma;
43452+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
43453+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
43454+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43455+ }
43456+#endif
43457
43458 show_map_vma(m, vma);
43459
43460@@ -409,7 +437,11 @@ static int show_smap(struct seq_file *m,
43461 "Swap: %8lu kB\n"
43462 "KernelPageSize: %8lu kB\n"
43463 "MMUPageSize: %8lu kB\n",
43464+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43465+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
43466+#else
43467 (vma->vm_end - vma->vm_start) >> 10,
43468+#endif
43469 mss.resident >> 10,
43470 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
43471 mss.shared_clean >> 10,
43472diff -urNp linux-2.6.32.42/fs/proc/task_nommu.c linux-2.6.32.42/fs/proc/task_nommu.c
43473--- linux-2.6.32.42/fs/proc/task_nommu.c 2011-03-27 14:31:47.000000000 -0400
43474+++ linux-2.6.32.42/fs/proc/task_nommu.c 2011-04-17 15:56:46.000000000 -0400
43475@@ -50,7 +50,7 @@ void task_mem(struct seq_file *m, struct
43476 else
43477 bytes += kobjsize(mm);
43478
43479- if (current->fs && current->fs->users > 1)
43480+ if (current->fs && atomic_read(&current->fs->users) > 1)
43481 sbytes += kobjsize(current->fs);
43482 else
43483 bytes += kobjsize(current->fs);
43484@@ -154,7 +154,7 @@ static int nommu_vma_show(struct seq_fil
43485 if (len < 1)
43486 len = 1;
43487 seq_printf(m, "%*c", len, ' ');
43488- seq_path(m, &file->f_path, "");
43489+ seq_path(m, &file->f_path, "\n\\");
43490 }
43491
43492 seq_putc(m, '\n');
43493diff -urNp linux-2.6.32.42/fs/readdir.c linux-2.6.32.42/fs/readdir.c
43494--- linux-2.6.32.42/fs/readdir.c 2011-03-27 14:31:47.000000000 -0400
43495+++ linux-2.6.32.42/fs/readdir.c 2011-04-17 15:56:46.000000000 -0400
43496@@ -16,6 +16,7 @@
43497 #include <linux/security.h>
43498 #include <linux/syscalls.h>
43499 #include <linux/unistd.h>
43500+#include <linux/namei.h>
43501
43502 #include <asm/uaccess.h>
43503
43504@@ -67,6 +68,7 @@ struct old_linux_dirent {
43505
43506 struct readdir_callback {
43507 struct old_linux_dirent __user * dirent;
43508+ struct file * file;
43509 int result;
43510 };
43511
43512@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
43513 buf->result = -EOVERFLOW;
43514 return -EOVERFLOW;
43515 }
43516+
43517+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43518+ return 0;
43519+
43520 buf->result++;
43521 dirent = buf->dirent;
43522 if (!access_ok(VERIFY_WRITE, dirent,
43523@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
43524
43525 buf.result = 0;
43526 buf.dirent = dirent;
43527+ buf.file = file;
43528
43529 error = vfs_readdir(file, fillonedir, &buf);
43530 if (buf.result)
43531@@ -142,6 +149,7 @@ struct linux_dirent {
43532 struct getdents_callback {
43533 struct linux_dirent __user * current_dir;
43534 struct linux_dirent __user * previous;
43535+ struct file * file;
43536 int count;
43537 int error;
43538 };
43539@@ -162,6 +170,10 @@ static int filldir(void * __buf, const c
43540 buf->error = -EOVERFLOW;
43541 return -EOVERFLOW;
43542 }
43543+
43544+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43545+ return 0;
43546+
43547 dirent = buf->previous;
43548 if (dirent) {
43549 if (__put_user(offset, &dirent->d_off))
43550@@ -209,6 +221,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
43551 buf.previous = NULL;
43552 buf.count = count;
43553 buf.error = 0;
43554+ buf.file = file;
43555
43556 error = vfs_readdir(file, filldir, &buf);
43557 if (error >= 0)
43558@@ -228,6 +241,7 @@ out:
43559 struct getdents_callback64 {
43560 struct linux_dirent64 __user * current_dir;
43561 struct linux_dirent64 __user * previous;
43562+ struct file *file;
43563 int count;
43564 int error;
43565 };
43566@@ -242,6 +256,10 @@ static int filldir64(void * __buf, const
43567 buf->error = -EINVAL; /* only used if we fail.. */
43568 if (reclen > buf->count)
43569 return -EINVAL;
43570+
43571+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43572+ return 0;
43573+
43574 dirent = buf->previous;
43575 if (dirent) {
43576 if (__put_user(offset, &dirent->d_off))
43577@@ -289,6 +307,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
43578
43579 buf.current_dir = dirent;
43580 buf.previous = NULL;
43581+ buf.file = file;
43582 buf.count = count;
43583 buf.error = 0;
43584
43585diff -urNp linux-2.6.32.42/fs/reiserfs/dir.c linux-2.6.32.42/fs/reiserfs/dir.c
43586--- linux-2.6.32.42/fs/reiserfs/dir.c 2011-03-27 14:31:47.000000000 -0400
43587+++ linux-2.6.32.42/fs/reiserfs/dir.c 2011-05-16 21:46:57.000000000 -0400
43588@@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentr
43589 struct reiserfs_dir_entry de;
43590 int ret = 0;
43591
43592+ pax_track_stack();
43593+
43594 reiserfs_write_lock(inode->i_sb);
43595
43596 reiserfs_check_lock_depth(inode->i_sb, "readdir");
43597diff -urNp linux-2.6.32.42/fs/reiserfs/do_balan.c linux-2.6.32.42/fs/reiserfs/do_balan.c
43598--- linux-2.6.32.42/fs/reiserfs/do_balan.c 2011-03-27 14:31:47.000000000 -0400
43599+++ linux-2.6.32.42/fs/reiserfs/do_balan.c 2011-04-17 15:56:46.000000000 -0400
43600@@ -2058,7 +2058,7 @@ void do_balance(struct tree_balance *tb,
43601 return;
43602 }
43603
43604- atomic_inc(&(fs_generation(tb->tb_sb)));
43605+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
43606 do_balance_starts(tb);
43607
43608 /* balance leaf returns 0 except if combining L R and S into
43609diff -urNp linux-2.6.32.42/fs/reiserfs/item_ops.c linux-2.6.32.42/fs/reiserfs/item_ops.c
43610--- linux-2.6.32.42/fs/reiserfs/item_ops.c 2011-03-27 14:31:47.000000000 -0400
43611+++ linux-2.6.32.42/fs/reiserfs/item_ops.c 2011-04-17 15:56:46.000000000 -0400
43612@@ -102,7 +102,7 @@ static void sd_print_vi(struct virtual_i
43613 vi->vi_index, vi->vi_type, vi->vi_ih);
43614 }
43615
43616-static struct item_operations stat_data_ops = {
43617+static const struct item_operations stat_data_ops = {
43618 .bytes_number = sd_bytes_number,
43619 .decrement_key = sd_decrement_key,
43620 .is_left_mergeable = sd_is_left_mergeable,
43621@@ -196,7 +196,7 @@ static void direct_print_vi(struct virtu
43622 vi->vi_index, vi->vi_type, vi->vi_ih);
43623 }
43624
43625-static struct item_operations direct_ops = {
43626+static const struct item_operations direct_ops = {
43627 .bytes_number = direct_bytes_number,
43628 .decrement_key = direct_decrement_key,
43629 .is_left_mergeable = direct_is_left_mergeable,
43630@@ -341,7 +341,7 @@ static void indirect_print_vi(struct vir
43631 vi->vi_index, vi->vi_type, vi->vi_ih);
43632 }
43633
43634-static struct item_operations indirect_ops = {
43635+static const struct item_operations indirect_ops = {
43636 .bytes_number = indirect_bytes_number,
43637 .decrement_key = indirect_decrement_key,
43638 .is_left_mergeable = indirect_is_left_mergeable,
43639@@ -628,7 +628,7 @@ static void direntry_print_vi(struct vir
43640 printk("\n");
43641 }
43642
43643-static struct item_operations direntry_ops = {
43644+static const struct item_operations direntry_ops = {
43645 .bytes_number = direntry_bytes_number,
43646 .decrement_key = direntry_decrement_key,
43647 .is_left_mergeable = direntry_is_left_mergeable,
43648@@ -724,7 +724,7 @@ static void errcatch_print_vi(struct vir
43649 "Invalid item type observed, run fsck ASAP");
43650 }
43651
43652-static struct item_operations errcatch_ops = {
43653+static const struct item_operations errcatch_ops = {
43654 errcatch_bytes_number,
43655 errcatch_decrement_key,
43656 errcatch_is_left_mergeable,
43657@@ -746,7 +746,7 @@ static struct item_operations errcatch_o
43658 #error Item types must use disk-format assigned values.
43659 #endif
43660
43661-struct item_operations *item_ops[TYPE_ANY + 1] = {
43662+const struct item_operations * const item_ops[TYPE_ANY + 1] = {
43663 &stat_data_ops,
43664 &indirect_ops,
43665 &direct_ops,
43666diff -urNp linux-2.6.32.42/fs/reiserfs/journal.c linux-2.6.32.42/fs/reiserfs/journal.c
43667--- linux-2.6.32.42/fs/reiserfs/journal.c 2011-03-27 14:31:47.000000000 -0400
43668+++ linux-2.6.32.42/fs/reiserfs/journal.c 2011-05-16 21:46:57.000000000 -0400
43669@@ -2329,6 +2329,8 @@ static struct buffer_head *reiserfs_brea
43670 struct buffer_head *bh;
43671 int i, j;
43672
43673+ pax_track_stack();
43674+
43675 bh = __getblk(dev, block, bufsize);
43676 if (buffer_uptodate(bh))
43677 return (bh);
43678diff -urNp linux-2.6.32.42/fs/reiserfs/namei.c linux-2.6.32.42/fs/reiserfs/namei.c
43679--- linux-2.6.32.42/fs/reiserfs/namei.c 2011-03-27 14:31:47.000000000 -0400
43680+++ linux-2.6.32.42/fs/reiserfs/namei.c 2011-05-16 21:46:57.000000000 -0400
43681@@ -1214,6 +1214,8 @@ static int reiserfs_rename(struct inode
43682 unsigned long savelink = 1;
43683 struct timespec ctime;
43684
43685+ pax_track_stack();
43686+
43687 /* three balancings: (1) old name removal, (2) new name insertion
43688 and (3) maybe "save" link insertion
43689 stat data updates: (1) old directory,
43690diff -urNp linux-2.6.32.42/fs/reiserfs/procfs.c linux-2.6.32.42/fs/reiserfs/procfs.c
43691--- linux-2.6.32.42/fs/reiserfs/procfs.c 2011-03-27 14:31:47.000000000 -0400
43692+++ linux-2.6.32.42/fs/reiserfs/procfs.c 2011-05-16 21:46:57.000000000 -0400
43693@@ -123,7 +123,7 @@ static int show_super(struct seq_file *m
43694 "SMALL_TAILS " : "NO_TAILS ",
43695 replay_only(sb) ? "REPLAY_ONLY " : "",
43696 convert_reiserfs(sb) ? "CONV " : "",
43697- atomic_read(&r->s_generation_counter),
43698+ atomic_read_unchecked(&r->s_generation_counter),
43699 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
43700 SF(s_do_balance), SF(s_unneeded_left_neighbor),
43701 SF(s_good_search_by_key_reada), SF(s_bmaps),
43702@@ -309,6 +309,8 @@ static int show_journal(struct seq_file
43703 struct journal_params *jp = &rs->s_v1.s_journal;
43704 char b[BDEVNAME_SIZE];
43705
43706+ pax_track_stack();
43707+
43708 seq_printf(m, /* on-disk fields */
43709 "jp_journal_1st_block: \t%i\n"
43710 "jp_journal_dev: \t%s[%x]\n"
43711diff -urNp linux-2.6.32.42/fs/reiserfs/stree.c linux-2.6.32.42/fs/reiserfs/stree.c
43712--- linux-2.6.32.42/fs/reiserfs/stree.c 2011-03-27 14:31:47.000000000 -0400
43713+++ linux-2.6.32.42/fs/reiserfs/stree.c 2011-05-16 21:46:57.000000000 -0400
43714@@ -1159,6 +1159,8 @@ int reiserfs_delete_item(struct reiserfs
43715 int iter = 0;
43716 #endif
43717
43718+ pax_track_stack();
43719+
43720 BUG_ON(!th->t_trans_id);
43721
43722 init_tb_struct(th, &s_del_balance, sb, path,
43723@@ -1296,6 +1298,8 @@ void reiserfs_delete_solid_item(struct r
43724 int retval;
43725 int quota_cut_bytes = 0;
43726
43727+ pax_track_stack();
43728+
43729 BUG_ON(!th->t_trans_id);
43730
43731 le_key2cpu_key(&cpu_key, key);
43732@@ -1525,6 +1529,8 @@ int reiserfs_cut_from_item(struct reiser
43733 int quota_cut_bytes;
43734 loff_t tail_pos = 0;
43735
43736+ pax_track_stack();
43737+
43738 BUG_ON(!th->t_trans_id);
43739
43740 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
43741@@ -1920,6 +1926,8 @@ int reiserfs_paste_into_item(struct reis
43742 int retval;
43743 int fs_gen;
43744
43745+ pax_track_stack();
43746+
43747 BUG_ON(!th->t_trans_id);
43748
43749 fs_gen = get_generation(inode->i_sb);
43750@@ -2007,6 +2015,8 @@ int reiserfs_insert_item(struct reiserfs
43751 int fs_gen = 0;
43752 int quota_bytes = 0;
43753
43754+ pax_track_stack();
43755+
43756 BUG_ON(!th->t_trans_id);
43757
43758 if (inode) { /* Do we count quotas for item? */
43759diff -urNp linux-2.6.32.42/fs/reiserfs/super.c linux-2.6.32.42/fs/reiserfs/super.c
43760--- linux-2.6.32.42/fs/reiserfs/super.c 2011-03-27 14:31:47.000000000 -0400
43761+++ linux-2.6.32.42/fs/reiserfs/super.c 2011-05-16 21:46:57.000000000 -0400
43762@@ -912,6 +912,8 @@ static int reiserfs_parse_options(struct
43763 {.option_name = NULL}
43764 };
43765
43766+ pax_track_stack();
43767+
43768 *blocks = 0;
43769 if (!options || !*options)
43770 /* use default configuration: create tails, journaling on, no
43771diff -urNp linux-2.6.32.42/fs/select.c linux-2.6.32.42/fs/select.c
43772--- linux-2.6.32.42/fs/select.c 2011-03-27 14:31:47.000000000 -0400
43773+++ linux-2.6.32.42/fs/select.c 2011-05-16 21:46:57.000000000 -0400
43774@@ -20,6 +20,7 @@
43775 #include <linux/module.h>
43776 #include <linux/slab.h>
43777 #include <linux/poll.h>
43778+#include <linux/security.h>
43779 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
43780 #include <linux/file.h>
43781 #include <linux/fdtable.h>
43782@@ -401,6 +402,8 @@ int do_select(int n, fd_set_bits *fds, s
43783 int retval, i, timed_out = 0;
43784 unsigned long slack = 0;
43785
43786+ pax_track_stack();
43787+
43788 rcu_read_lock();
43789 retval = max_select_fd(n, fds);
43790 rcu_read_unlock();
43791@@ -529,6 +532,8 @@ int core_sys_select(int n, fd_set __user
43792 /* Allocate small arguments on the stack to save memory and be faster */
43793 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
43794
43795+ pax_track_stack();
43796+
43797 ret = -EINVAL;
43798 if (n < 0)
43799 goto out_nofds;
43800@@ -821,6 +826,9 @@ int do_sys_poll(struct pollfd __user *uf
43801 struct poll_list *walk = head;
43802 unsigned long todo = nfds;
43803
43804+ pax_track_stack();
43805+
43806+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
43807 if (nfds > current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
43808 return -EINVAL;
43809
43810diff -urNp linux-2.6.32.42/fs/seq_file.c linux-2.6.32.42/fs/seq_file.c
43811--- linux-2.6.32.42/fs/seq_file.c 2011-03-27 14:31:47.000000000 -0400
43812+++ linux-2.6.32.42/fs/seq_file.c 2011-04-17 15:56:46.000000000 -0400
43813@@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
43814 return 0;
43815 }
43816 if (!m->buf) {
43817- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
43818+ m->size = PAGE_SIZE;
43819+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
43820 if (!m->buf)
43821 return -ENOMEM;
43822 }
43823@@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
43824 Eoverflow:
43825 m->op->stop(m, p);
43826 kfree(m->buf);
43827- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
43828+ m->size <<= 1;
43829+ m->buf = kmalloc(m->size, GFP_KERNEL);
43830 return !m->buf ? -ENOMEM : -EAGAIN;
43831 }
43832
43833@@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
43834 m->version = file->f_version;
43835 /* grab buffer if we didn't have one */
43836 if (!m->buf) {
43837- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
43838+ m->size = PAGE_SIZE;
43839+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
43840 if (!m->buf)
43841 goto Enomem;
43842 }
43843@@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
43844 goto Fill;
43845 m->op->stop(m, p);
43846 kfree(m->buf);
43847- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
43848+ m->size <<= 1;
43849+ m->buf = kmalloc(m->size, GFP_KERNEL);
43850 if (!m->buf)
43851 goto Enomem;
43852 m->count = 0;
43853diff -urNp linux-2.6.32.42/fs/smbfs/symlink.c linux-2.6.32.42/fs/smbfs/symlink.c
43854--- linux-2.6.32.42/fs/smbfs/symlink.c 2011-03-27 14:31:47.000000000 -0400
43855+++ linux-2.6.32.42/fs/smbfs/symlink.c 2011-04-17 15:56:46.000000000 -0400
43856@@ -55,7 +55,7 @@ static void *smb_follow_link(struct dent
43857
43858 static void smb_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
43859 {
43860- char *s = nd_get_link(nd);
43861+ const char *s = nd_get_link(nd);
43862 if (!IS_ERR(s))
43863 __putname(s);
43864 }
43865diff -urNp linux-2.6.32.42/fs/splice.c linux-2.6.32.42/fs/splice.c
43866--- linux-2.6.32.42/fs/splice.c 2011-03-27 14:31:47.000000000 -0400
43867+++ linux-2.6.32.42/fs/splice.c 2011-05-16 21:46:57.000000000 -0400
43868@@ -185,7 +185,7 @@ ssize_t splice_to_pipe(struct pipe_inode
43869 pipe_lock(pipe);
43870
43871 for (;;) {
43872- if (!pipe->readers) {
43873+ if (!atomic_read(&pipe->readers)) {
43874 send_sig(SIGPIPE, current, 0);
43875 if (!ret)
43876 ret = -EPIPE;
43877@@ -239,9 +239,9 @@ ssize_t splice_to_pipe(struct pipe_inode
43878 do_wakeup = 0;
43879 }
43880
43881- pipe->waiting_writers++;
43882+ atomic_inc(&pipe->waiting_writers);
43883 pipe_wait(pipe);
43884- pipe->waiting_writers--;
43885+ atomic_dec(&pipe->waiting_writers);
43886 }
43887
43888 pipe_unlock(pipe);
43889@@ -285,6 +285,8 @@ __generic_file_splice_read(struct file *
43890 .spd_release = spd_release_page,
43891 };
43892
43893+ pax_track_stack();
43894+
43895 index = *ppos >> PAGE_CACHE_SHIFT;
43896 loff = *ppos & ~PAGE_CACHE_MASK;
43897 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
43898@@ -521,7 +523,7 @@ static ssize_t kernel_readv(struct file
43899 old_fs = get_fs();
43900 set_fs(get_ds());
43901 /* The cast to a user pointer is valid due to the set_fs() */
43902- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
43903+ res = vfs_readv(file, (__force const struct iovec __user *)vec, vlen, &pos);
43904 set_fs(old_fs);
43905
43906 return res;
43907@@ -536,7 +538,7 @@ static ssize_t kernel_write(struct file
43908 old_fs = get_fs();
43909 set_fs(get_ds());
43910 /* The cast to a user pointer is valid due to the set_fs() */
43911- res = vfs_write(file, (const char __user *)buf, count, &pos);
43912+ res = vfs_write(file, (__force const char __user *)buf, count, &pos);
43913 set_fs(old_fs);
43914
43915 return res;
43916@@ -565,6 +567,8 @@ ssize_t default_file_splice_read(struct
43917 .spd_release = spd_release_page,
43918 };
43919
43920+ pax_track_stack();
43921+
43922 index = *ppos >> PAGE_CACHE_SHIFT;
43923 offset = *ppos & ~PAGE_CACHE_MASK;
43924 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
43925@@ -578,7 +582,7 @@ ssize_t default_file_splice_read(struct
43926 goto err;
43927
43928 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
43929- vec[i].iov_base = (void __user *) page_address(page);
43930+ vec[i].iov_base = (__force void __user *) page_address(page);
43931 vec[i].iov_len = this_len;
43932 pages[i] = page;
43933 spd.nr_pages++;
43934@@ -800,10 +804,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
43935 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
43936 {
43937 while (!pipe->nrbufs) {
43938- if (!pipe->writers)
43939+ if (!atomic_read(&pipe->writers))
43940 return 0;
43941
43942- if (!pipe->waiting_writers && sd->num_spliced)
43943+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
43944 return 0;
43945
43946 if (sd->flags & SPLICE_F_NONBLOCK)
43947@@ -1140,7 +1144,7 @@ ssize_t splice_direct_to_actor(struct fi
43948 * out of the pipe right after the splice_to_pipe(). So set
43949 * PIPE_READERS appropriately.
43950 */
43951- pipe->readers = 1;
43952+ atomic_set(&pipe->readers, 1);
43953
43954 current->splice_pipe = pipe;
43955 }
43956@@ -1592,6 +1596,8 @@ static long vmsplice_to_pipe(struct file
43957 .spd_release = spd_release_page,
43958 };
43959
43960+ pax_track_stack();
43961+
43962 pipe = pipe_info(file->f_path.dentry->d_inode);
43963 if (!pipe)
43964 return -EBADF;
43965@@ -1700,9 +1706,9 @@ static int ipipe_prep(struct pipe_inode_
43966 ret = -ERESTARTSYS;
43967 break;
43968 }
43969- if (!pipe->writers)
43970+ if (!atomic_read(&pipe->writers))
43971 break;
43972- if (!pipe->waiting_writers) {
43973+ if (!atomic_read(&pipe->waiting_writers)) {
43974 if (flags & SPLICE_F_NONBLOCK) {
43975 ret = -EAGAIN;
43976 break;
43977@@ -1734,7 +1740,7 @@ static int opipe_prep(struct pipe_inode_
43978 pipe_lock(pipe);
43979
43980 while (pipe->nrbufs >= PIPE_BUFFERS) {
43981- if (!pipe->readers) {
43982+ if (!atomic_read(&pipe->readers)) {
43983 send_sig(SIGPIPE, current, 0);
43984 ret = -EPIPE;
43985 break;
43986@@ -1747,9 +1753,9 @@ static int opipe_prep(struct pipe_inode_
43987 ret = -ERESTARTSYS;
43988 break;
43989 }
43990- pipe->waiting_writers++;
43991+ atomic_inc(&pipe->waiting_writers);
43992 pipe_wait(pipe);
43993- pipe->waiting_writers--;
43994+ atomic_dec(&pipe->waiting_writers);
43995 }
43996
43997 pipe_unlock(pipe);
43998@@ -1785,14 +1791,14 @@ retry:
43999 pipe_double_lock(ipipe, opipe);
44000
44001 do {
44002- if (!opipe->readers) {
44003+ if (!atomic_read(&opipe->readers)) {
44004 send_sig(SIGPIPE, current, 0);
44005 if (!ret)
44006 ret = -EPIPE;
44007 break;
44008 }
44009
44010- if (!ipipe->nrbufs && !ipipe->writers)
44011+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
44012 break;
44013
44014 /*
44015@@ -1892,7 +1898,7 @@ static int link_pipe(struct pipe_inode_i
44016 pipe_double_lock(ipipe, opipe);
44017
44018 do {
44019- if (!opipe->readers) {
44020+ if (!atomic_read(&opipe->readers)) {
44021 send_sig(SIGPIPE, current, 0);
44022 if (!ret)
44023 ret = -EPIPE;
44024@@ -1937,7 +1943,7 @@ static int link_pipe(struct pipe_inode_i
44025 * return EAGAIN if we have the potential of some data in the
44026 * future, otherwise just return 0
44027 */
44028- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
44029+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
44030 ret = -EAGAIN;
44031
44032 pipe_unlock(ipipe);
44033diff -urNp linux-2.6.32.42/fs/sysfs/file.c linux-2.6.32.42/fs/sysfs/file.c
44034--- linux-2.6.32.42/fs/sysfs/file.c 2011-03-27 14:31:47.000000000 -0400
44035+++ linux-2.6.32.42/fs/sysfs/file.c 2011-05-04 17:56:20.000000000 -0400
44036@@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
44037
44038 struct sysfs_open_dirent {
44039 atomic_t refcnt;
44040- atomic_t event;
44041+ atomic_unchecked_t event;
44042 wait_queue_head_t poll;
44043 struct list_head buffers; /* goes through sysfs_buffer.list */
44044 };
44045@@ -53,7 +53,7 @@ struct sysfs_buffer {
44046 size_t count;
44047 loff_t pos;
44048 char * page;
44049- struct sysfs_ops * ops;
44050+ const struct sysfs_ops * ops;
44051 struct mutex mutex;
44052 int needs_read_fill;
44053 int event;
44054@@ -75,7 +75,7 @@ static int fill_read_buffer(struct dentr
44055 {
44056 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
44057 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
44058- struct sysfs_ops * ops = buffer->ops;
44059+ const struct sysfs_ops * ops = buffer->ops;
44060 int ret = 0;
44061 ssize_t count;
44062
44063@@ -88,7 +88,7 @@ static int fill_read_buffer(struct dentr
44064 if (!sysfs_get_active_two(attr_sd))
44065 return -ENODEV;
44066
44067- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
44068+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
44069 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
44070
44071 sysfs_put_active_two(attr_sd);
44072@@ -199,7 +199,7 @@ flush_write_buffer(struct dentry * dentr
44073 {
44074 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
44075 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
44076- struct sysfs_ops * ops = buffer->ops;
44077+ const struct sysfs_ops * ops = buffer->ops;
44078 int rc;
44079
44080 /* need attr_sd for attr and ops, its parent for kobj */
44081@@ -294,7 +294,7 @@ static int sysfs_get_open_dirent(struct
44082 return -ENOMEM;
44083
44084 atomic_set(&new_od->refcnt, 0);
44085- atomic_set(&new_od->event, 1);
44086+ atomic_set_unchecked(&new_od->event, 1);
44087 init_waitqueue_head(&new_od->poll);
44088 INIT_LIST_HEAD(&new_od->buffers);
44089 goto retry;
44090@@ -335,7 +335,7 @@ static int sysfs_open_file(struct inode
44091 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
44092 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
44093 struct sysfs_buffer *buffer;
44094- struct sysfs_ops *ops;
44095+ const struct sysfs_ops *ops;
44096 int error = -EACCES;
44097 char *p;
44098
44099@@ -444,7 +444,7 @@ static unsigned int sysfs_poll(struct fi
44100
44101 sysfs_put_active_two(attr_sd);
44102
44103- if (buffer->event != atomic_read(&od->event))
44104+ if (buffer->event != atomic_read_unchecked(&od->event))
44105 goto trigger;
44106
44107 return DEFAULT_POLLMASK;
44108@@ -463,7 +463,7 @@ void sysfs_notify_dirent(struct sysfs_di
44109
44110 od = sd->s_attr.open;
44111 if (od) {
44112- atomic_inc(&od->event);
44113+ atomic_inc_unchecked(&od->event);
44114 wake_up_interruptible(&od->poll);
44115 }
44116
44117diff -urNp linux-2.6.32.42/fs/sysfs/mount.c linux-2.6.32.42/fs/sysfs/mount.c
44118--- linux-2.6.32.42/fs/sysfs/mount.c 2011-03-27 14:31:47.000000000 -0400
44119+++ linux-2.6.32.42/fs/sysfs/mount.c 2011-04-17 15:56:46.000000000 -0400
44120@@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
44121 .s_name = "",
44122 .s_count = ATOMIC_INIT(1),
44123 .s_flags = SYSFS_DIR,
44124+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
44125+ .s_mode = S_IFDIR | S_IRWXU,
44126+#else
44127 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
44128+#endif
44129 .s_ino = 1,
44130 };
44131
44132diff -urNp linux-2.6.32.42/fs/sysfs/symlink.c linux-2.6.32.42/fs/sysfs/symlink.c
44133--- linux-2.6.32.42/fs/sysfs/symlink.c 2011-03-27 14:31:47.000000000 -0400
44134+++ linux-2.6.32.42/fs/sysfs/symlink.c 2011-04-17 15:56:46.000000000 -0400
44135@@ -204,7 +204,7 @@ static void *sysfs_follow_link(struct de
44136
44137 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
44138 {
44139- char *page = nd_get_link(nd);
44140+ const char *page = nd_get_link(nd);
44141 if (!IS_ERR(page))
44142 free_page((unsigned long)page);
44143 }
44144diff -urNp linux-2.6.32.42/fs/udf/balloc.c linux-2.6.32.42/fs/udf/balloc.c
44145--- linux-2.6.32.42/fs/udf/balloc.c 2011-03-27 14:31:47.000000000 -0400
44146+++ linux-2.6.32.42/fs/udf/balloc.c 2011-04-17 15:56:46.000000000 -0400
44147@@ -172,9 +172,7 @@ static void udf_bitmap_free_blocks(struc
44148
44149 mutex_lock(&sbi->s_alloc_mutex);
44150 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
44151- if (bloc->logicalBlockNum < 0 ||
44152- (bloc->logicalBlockNum + count) >
44153- partmap->s_partition_len) {
44154+ if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
44155 udf_debug("%d < %d || %d + %d > %d\n",
44156 bloc->logicalBlockNum, 0, bloc->logicalBlockNum,
44157 count, partmap->s_partition_len);
44158@@ -436,9 +434,7 @@ static void udf_table_free_blocks(struct
44159
44160 mutex_lock(&sbi->s_alloc_mutex);
44161 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
44162- if (bloc->logicalBlockNum < 0 ||
44163- (bloc->logicalBlockNum + count) >
44164- partmap->s_partition_len) {
44165+ if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
44166 udf_debug("%d < %d || %d + %d > %d\n",
44167 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
44168 partmap->s_partition_len);
44169diff -urNp linux-2.6.32.42/fs/udf/inode.c linux-2.6.32.42/fs/udf/inode.c
44170--- linux-2.6.32.42/fs/udf/inode.c 2011-03-27 14:31:47.000000000 -0400
44171+++ linux-2.6.32.42/fs/udf/inode.c 2011-05-16 21:46:57.000000000 -0400
44172@@ -484,6 +484,8 @@ static struct buffer_head *inode_getblk(
44173 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
44174 int lastblock = 0;
44175
44176+ pax_track_stack();
44177+
44178 prev_epos.offset = udf_file_entry_alloc_offset(inode);
44179 prev_epos.block = iinfo->i_location;
44180 prev_epos.bh = NULL;
44181diff -urNp linux-2.6.32.42/fs/udf/misc.c linux-2.6.32.42/fs/udf/misc.c
44182--- linux-2.6.32.42/fs/udf/misc.c 2011-03-27 14:31:47.000000000 -0400
44183+++ linux-2.6.32.42/fs/udf/misc.c 2011-04-23 12:56:11.000000000 -0400
44184@@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
44185
44186 u8 udf_tag_checksum(const struct tag *t)
44187 {
44188- u8 *data = (u8 *)t;
44189+ const u8 *data = (const u8 *)t;
44190 u8 checksum = 0;
44191 int i;
44192 for (i = 0; i < sizeof(struct tag); ++i)
44193diff -urNp linux-2.6.32.42/fs/utimes.c linux-2.6.32.42/fs/utimes.c
44194--- linux-2.6.32.42/fs/utimes.c 2011-03-27 14:31:47.000000000 -0400
44195+++ linux-2.6.32.42/fs/utimes.c 2011-04-17 15:56:46.000000000 -0400
44196@@ -1,6 +1,7 @@
44197 #include <linux/compiler.h>
44198 #include <linux/file.h>
44199 #include <linux/fs.h>
44200+#include <linux/security.h>
44201 #include <linux/linkage.h>
44202 #include <linux/mount.h>
44203 #include <linux/namei.h>
44204@@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
44205 goto mnt_drop_write_and_out;
44206 }
44207 }
44208+
44209+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
44210+ error = -EACCES;
44211+ goto mnt_drop_write_and_out;
44212+ }
44213+
44214 mutex_lock(&inode->i_mutex);
44215 error = notify_change(path->dentry, &newattrs);
44216 mutex_unlock(&inode->i_mutex);
44217diff -urNp linux-2.6.32.42/fs/xattr_acl.c linux-2.6.32.42/fs/xattr_acl.c
44218--- linux-2.6.32.42/fs/xattr_acl.c 2011-03-27 14:31:47.000000000 -0400
44219+++ linux-2.6.32.42/fs/xattr_acl.c 2011-04-17 15:56:46.000000000 -0400
44220@@ -17,8 +17,8 @@
44221 struct posix_acl *
44222 posix_acl_from_xattr(const void *value, size_t size)
44223 {
44224- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
44225- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
44226+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
44227+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
44228 int count;
44229 struct posix_acl *acl;
44230 struct posix_acl_entry *acl_e;
44231diff -urNp linux-2.6.32.42/fs/xattr.c linux-2.6.32.42/fs/xattr.c
44232--- linux-2.6.32.42/fs/xattr.c 2011-03-27 14:31:47.000000000 -0400
44233+++ linux-2.6.32.42/fs/xattr.c 2011-04-17 15:56:46.000000000 -0400
44234@@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
44235 * Extended attribute SET operations
44236 */
44237 static long
44238-setxattr(struct dentry *d, const char __user *name, const void __user *value,
44239+setxattr(struct path *path, const char __user *name, const void __user *value,
44240 size_t size, int flags)
44241 {
44242 int error;
44243@@ -271,7 +271,13 @@ setxattr(struct dentry *d, const char __
44244 return PTR_ERR(kvalue);
44245 }
44246
44247- error = vfs_setxattr(d, kname, kvalue, size, flags);
44248+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
44249+ error = -EACCES;
44250+ goto out;
44251+ }
44252+
44253+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
44254+out:
44255 kfree(kvalue);
44256 return error;
44257 }
44258@@ -288,7 +294,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
44259 return error;
44260 error = mnt_want_write(path.mnt);
44261 if (!error) {
44262- error = setxattr(path.dentry, name, value, size, flags);
44263+ error = setxattr(&path, name, value, size, flags);
44264 mnt_drop_write(path.mnt);
44265 }
44266 path_put(&path);
44267@@ -307,7 +313,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
44268 return error;
44269 error = mnt_want_write(path.mnt);
44270 if (!error) {
44271- error = setxattr(path.dentry, name, value, size, flags);
44272+ error = setxattr(&path, name, value, size, flags);
44273 mnt_drop_write(path.mnt);
44274 }
44275 path_put(&path);
44276@@ -318,17 +324,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
44277 const void __user *,value, size_t, size, int, flags)
44278 {
44279 struct file *f;
44280- struct dentry *dentry;
44281 int error = -EBADF;
44282
44283 f = fget(fd);
44284 if (!f)
44285 return error;
44286- dentry = f->f_path.dentry;
44287- audit_inode(NULL, dentry);
44288+ audit_inode(NULL, f->f_path.dentry);
44289 error = mnt_want_write_file(f);
44290 if (!error) {
44291- error = setxattr(dentry, name, value, size, flags);
44292+ error = setxattr(&f->f_path, name, value, size, flags);
44293 mnt_drop_write(f->f_path.mnt);
44294 }
44295 fput(f);
44296diff -urNp linux-2.6.32.42/fs/xfs/linux-2.6/xfs_ioctl32.c linux-2.6.32.42/fs/xfs/linux-2.6/xfs_ioctl32.c
44297--- linux-2.6.32.42/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-03-27 14:31:47.000000000 -0400
44298+++ linux-2.6.32.42/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-04-17 15:56:46.000000000 -0400
44299@@ -75,6 +75,7 @@ xfs_compat_ioc_fsgeometry_v1(
44300 xfs_fsop_geom_t fsgeo;
44301 int error;
44302
44303+ memset(&fsgeo, 0, sizeof(fsgeo));
44304 error = xfs_fs_geometry(mp, &fsgeo, 3);
44305 if (error)
44306 return -error;
44307diff -urNp linux-2.6.32.42/fs/xfs/linux-2.6/xfs_ioctl.c linux-2.6.32.42/fs/xfs/linux-2.6/xfs_ioctl.c
44308--- linux-2.6.32.42/fs/xfs/linux-2.6/xfs_ioctl.c 2011-04-17 17:00:52.000000000 -0400
44309+++ linux-2.6.32.42/fs/xfs/linux-2.6/xfs_ioctl.c 2011-04-17 20:07:09.000000000 -0400
44310@@ -134,7 +134,7 @@ xfs_find_handle(
44311 }
44312
44313 error = -EFAULT;
44314- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
44315+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
44316 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
44317 goto out_put;
44318
44319@@ -423,7 +423,7 @@ xfs_attrlist_by_handle(
44320 if (IS_ERR(dentry))
44321 return PTR_ERR(dentry);
44322
44323- kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
44324+ kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL);
44325 if (!kbuf)
44326 goto out_dput;
44327
44328@@ -697,7 +697,7 @@ xfs_ioc_fsgeometry_v1(
44329 xfs_mount_t *mp,
44330 void __user *arg)
44331 {
44332- xfs_fsop_geom_t fsgeo;
44333+ xfs_fsop_geom_t fsgeo;
44334 int error;
44335
44336 error = xfs_fs_geometry(mp, &fsgeo, 3);
44337diff -urNp linux-2.6.32.42/fs/xfs/linux-2.6/xfs_iops.c linux-2.6.32.42/fs/xfs/linux-2.6/xfs_iops.c
44338--- linux-2.6.32.42/fs/xfs/linux-2.6/xfs_iops.c 2011-03-27 14:31:47.000000000 -0400
44339+++ linux-2.6.32.42/fs/xfs/linux-2.6/xfs_iops.c 2011-04-17 15:56:46.000000000 -0400
44340@@ -468,7 +468,7 @@ xfs_vn_put_link(
44341 struct nameidata *nd,
44342 void *p)
44343 {
44344- char *s = nd_get_link(nd);
44345+ const char *s = nd_get_link(nd);
44346
44347 if (!IS_ERR(s))
44348 kfree(s);
44349diff -urNp linux-2.6.32.42/fs/xfs/xfs_bmap.c linux-2.6.32.42/fs/xfs/xfs_bmap.c
44350--- linux-2.6.32.42/fs/xfs/xfs_bmap.c 2011-03-27 14:31:47.000000000 -0400
44351+++ linux-2.6.32.42/fs/xfs/xfs_bmap.c 2011-04-17 15:56:46.000000000 -0400
44352@@ -360,7 +360,7 @@ xfs_bmap_validate_ret(
44353 int nmap,
44354 int ret_nmap);
44355 #else
44356-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
44357+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
44358 #endif /* DEBUG */
44359
44360 #if defined(XFS_RW_TRACE)
44361diff -urNp linux-2.6.32.42/fs/xfs/xfs_dir2_sf.c linux-2.6.32.42/fs/xfs/xfs_dir2_sf.c
44362--- linux-2.6.32.42/fs/xfs/xfs_dir2_sf.c 2011-03-27 14:31:47.000000000 -0400
44363+++ linux-2.6.32.42/fs/xfs/xfs_dir2_sf.c 2011-04-18 22:07:30.000000000 -0400
44364@@ -779,7 +779,15 @@ xfs_dir2_sf_getdents(
44365 }
44366
44367 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
44368- if (filldir(dirent, sfep->name, sfep->namelen,
44369+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
44370+ char name[sfep->namelen];
44371+ memcpy(name, sfep->name, sfep->namelen);
44372+ if (filldir(dirent, name, sfep->namelen,
44373+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
44374+ *offset = off & 0x7fffffff;
44375+ return 0;
44376+ }
44377+ } else if (filldir(dirent, sfep->name, sfep->namelen,
44378 off & 0x7fffffff, ino, DT_UNKNOWN)) {
44379 *offset = off & 0x7fffffff;
44380 return 0;
44381diff -urNp linux-2.6.32.42/grsecurity/gracl_alloc.c linux-2.6.32.42/grsecurity/gracl_alloc.c
44382--- linux-2.6.32.42/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
44383+++ linux-2.6.32.42/grsecurity/gracl_alloc.c 2011-04-17 15:56:46.000000000 -0400
44384@@ -0,0 +1,105 @@
44385+#include <linux/kernel.h>
44386+#include <linux/mm.h>
44387+#include <linux/slab.h>
44388+#include <linux/vmalloc.h>
44389+#include <linux/gracl.h>
44390+#include <linux/grsecurity.h>
44391+
44392+static unsigned long alloc_stack_next = 1;
44393+static unsigned long alloc_stack_size = 1;
44394+static void **alloc_stack;
44395+
44396+static __inline__ int
44397+alloc_pop(void)
44398+{
44399+ if (alloc_stack_next == 1)
44400+ return 0;
44401+
44402+ kfree(alloc_stack[alloc_stack_next - 2]);
44403+
44404+ alloc_stack_next--;
44405+
44406+ return 1;
44407+}
44408+
44409+static __inline__ int
44410+alloc_push(void *buf)
44411+{
44412+ if (alloc_stack_next >= alloc_stack_size)
44413+ return 1;
44414+
44415+ alloc_stack[alloc_stack_next - 1] = buf;
44416+
44417+ alloc_stack_next++;
44418+
44419+ return 0;
44420+}
44421+
44422+void *
44423+acl_alloc(unsigned long len)
44424+{
44425+ void *ret = NULL;
44426+
44427+ if (!len || len > PAGE_SIZE)
44428+ goto out;
44429+
44430+ ret = kmalloc(len, GFP_KERNEL);
44431+
44432+ if (ret) {
44433+ if (alloc_push(ret)) {
44434+ kfree(ret);
44435+ ret = NULL;
44436+ }
44437+ }
44438+
44439+out:
44440+ return ret;
44441+}
44442+
44443+void *
44444+acl_alloc_num(unsigned long num, unsigned long len)
44445+{
44446+ if (!len || (num > (PAGE_SIZE / len)))
44447+ return NULL;
44448+
44449+ return acl_alloc(num * len);
44450+}
44451+
44452+void
44453+acl_free_all(void)
44454+{
44455+ if (gr_acl_is_enabled() || !alloc_stack)
44456+ return;
44457+
44458+ while (alloc_pop()) ;
44459+
44460+ if (alloc_stack) {
44461+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
44462+ kfree(alloc_stack);
44463+ else
44464+ vfree(alloc_stack);
44465+ }
44466+
44467+ alloc_stack = NULL;
44468+ alloc_stack_size = 1;
44469+ alloc_stack_next = 1;
44470+
44471+ return;
44472+}
44473+
44474+int
44475+acl_alloc_stack_init(unsigned long size)
44476+{
44477+ if ((size * sizeof (void *)) <= PAGE_SIZE)
44478+ alloc_stack =
44479+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
44480+ else
44481+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
44482+
44483+ alloc_stack_size = size;
44484+
44485+ if (!alloc_stack)
44486+ return 0;
44487+ else
44488+ return 1;
44489+}
44490diff -urNp linux-2.6.32.42/grsecurity/gracl.c linux-2.6.32.42/grsecurity/gracl.c
44491--- linux-2.6.32.42/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
44492+++ linux-2.6.32.42/grsecurity/gracl.c 2011-06-11 16:24:26.000000000 -0400
44493@@ -0,0 +1,4085 @@
44494+#include <linux/kernel.h>
44495+#include <linux/module.h>
44496+#include <linux/sched.h>
44497+#include <linux/mm.h>
44498+#include <linux/file.h>
44499+#include <linux/fs.h>
44500+#include <linux/namei.h>
44501+#include <linux/mount.h>
44502+#include <linux/tty.h>
44503+#include <linux/proc_fs.h>
44504+#include <linux/smp_lock.h>
44505+#include <linux/slab.h>
44506+#include <linux/vmalloc.h>
44507+#include <linux/types.h>
44508+#include <linux/sysctl.h>
44509+#include <linux/netdevice.h>
44510+#include <linux/ptrace.h>
44511+#include <linux/gracl.h>
44512+#include <linux/gralloc.h>
44513+#include <linux/grsecurity.h>
44514+#include <linux/grinternal.h>
44515+#include <linux/pid_namespace.h>
44516+#include <linux/fdtable.h>
44517+#include <linux/percpu.h>
44518+
44519+#include <asm/uaccess.h>
44520+#include <asm/errno.h>
44521+#include <asm/mman.h>
44522+
44523+static struct acl_role_db acl_role_set;
44524+static struct name_db name_set;
44525+static struct inodev_db inodev_set;
44526+
44527+/* for keeping track of userspace pointers used for subjects, so we
44528+ can share references in the kernel as well
44529+*/
44530+
44531+static struct dentry *real_root;
44532+static struct vfsmount *real_root_mnt;
44533+
44534+static struct acl_subj_map_db subj_map_set;
44535+
44536+static struct acl_role_label *default_role;
44537+
44538+static struct acl_role_label *role_list;
44539+
44540+static u16 acl_sp_role_value;
44541+
44542+extern char *gr_shared_page[4];
44543+static DEFINE_MUTEX(gr_dev_mutex);
44544+DEFINE_RWLOCK(gr_inode_lock);
44545+
44546+struct gr_arg *gr_usermode;
44547+
44548+static unsigned int gr_status __read_only = GR_STATUS_INIT;
44549+
44550+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
44551+extern void gr_clear_learn_entries(void);
44552+
44553+#ifdef CONFIG_GRKERNSEC_RESLOG
44554+extern void gr_log_resource(const struct task_struct *task,
44555+ const int res, const unsigned long wanted, const int gt);
44556+#endif
44557+
44558+unsigned char *gr_system_salt;
44559+unsigned char *gr_system_sum;
44560+
44561+static struct sprole_pw **acl_special_roles = NULL;
44562+static __u16 num_sprole_pws = 0;
44563+
44564+static struct acl_role_label *kernel_role = NULL;
44565+
44566+static unsigned int gr_auth_attempts = 0;
44567+static unsigned long gr_auth_expires = 0UL;
44568+
44569+#ifdef CONFIG_NET
44570+extern struct vfsmount *sock_mnt;
44571+#endif
44572+extern struct vfsmount *pipe_mnt;
44573+extern struct vfsmount *shm_mnt;
44574+#ifdef CONFIG_HUGETLBFS
44575+extern struct vfsmount *hugetlbfs_vfsmount;
44576+#endif
44577+
44578+static struct acl_object_label *fakefs_obj_rw;
44579+static struct acl_object_label *fakefs_obj_rwx;
44580+
44581+extern int gr_init_uidset(void);
44582+extern void gr_free_uidset(void);
44583+extern void gr_remove_uid(uid_t uid);
44584+extern int gr_find_uid(uid_t uid);
44585+
44586+__inline__ int
44587+gr_acl_is_enabled(void)
44588+{
44589+ return (gr_status & GR_READY);
44590+}
44591+
44592+#ifdef CONFIG_BTRFS_FS
44593+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
44594+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
44595+#endif
44596+
44597+static inline dev_t __get_dev(const struct dentry *dentry)
44598+{
44599+#ifdef CONFIG_BTRFS_FS
44600+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
44601+ return get_btrfs_dev_from_inode(dentry->d_inode);
44602+ else
44603+#endif
44604+ return dentry->d_inode->i_sb->s_dev;
44605+}
44606+
44607+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
44608+{
44609+ return __get_dev(dentry);
44610+}
44611+
44612+static char gr_task_roletype_to_char(struct task_struct *task)
44613+{
44614+ switch (task->role->roletype &
44615+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
44616+ GR_ROLE_SPECIAL)) {
44617+ case GR_ROLE_DEFAULT:
44618+ return 'D';
44619+ case GR_ROLE_USER:
44620+ return 'U';
44621+ case GR_ROLE_GROUP:
44622+ return 'G';
44623+ case GR_ROLE_SPECIAL:
44624+ return 'S';
44625+ }
44626+
44627+ return 'X';
44628+}
44629+
44630+char gr_roletype_to_char(void)
44631+{
44632+ return gr_task_roletype_to_char(current);
44633+}
44634+
44635+__inline__ int
44636+gr_acl_tpe_check(void)
44637+{
44638+ if (unlikely(!(gr_status & GR_READY)))
44639+ return 0;
44640+ if (current->role->roletype & GR_ROLE_TPE)
44641+ return 1;
44642+ else
44643+ return 0;
44644+}
44645+
44646+int
44647+gr_handle_rawio(const struct inode *inode)
44648+{
44649+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
44650+ if (inode && S_ISBLK(inode->i_mode) &&
44651+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
44652+ !capable(CAP_SYS_RAWIO))
44653+ return 1;
44654+#endif
44655+ return 0;
44656+}
44657+
44658+static int
44659+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
44660+{
44661+ if (likely(lena != lenb))
44662+ return 0;
44663+
44664+ return !memcmp(a, b, lena);
44665+}
44666+
44667+/* this must be called with vfsmount_lock and dcache_lock held */
44668+
44669+static char * __our_d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
44670+ struct dentry *root, struct vfsmount *rootmnt,
44671+ char *buffer, int buflen)
44672+{
44673+ char * end = buffer+buflen;
44674+ char * retval;
44675+ int namelen;
44676+
44677+ *--end = '\0';
44678+ buflen--;
44679+
44680+ if (buflen < 1)
44681+ goto Elong;
44682+ /* Get '/' right */
44683+ retval = end-1;
44684+ *retval = '/';
44685+
44686+ for (;;) {
44687+ struct dentry * parent;
44688+
44689+ if (dentry == root && vfsmnt == rootmnt)
44690+ break;
44691+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
44692+ /* Global root? */
44693+ if (vfsmnt->mnt_parent == vfsmnt)
44694+ goto global_root;
44695+ dentry = vfsmnt->mnt_mountpoint;
44696+ vfsmnt = vfsmnt->mnt_parent;
44697+ continue;
44698+ }
44699+ parent = dentry->d_parent;
44700+ prefetch(parent);
44701+ namelen = dentry->d_name.len;
44702+ buflen -= namelen + 1;
44703+ if (buflen < 0)
44704+ goto Elong;
44705+ end -= namelen;
44706+ memcpy(end, dentry->d_name.name, namelen);
44707+ *--end = '/';
44708+ retval = end;
44709+ dentry = parent;
44710+ }
44711+
44712+out:
44713+ return retval;
44714+
44715+global_root:
44716+ namelen = dentry->d_name.len;
44717+ buflen -= namelen;
44718+ if (buflen < 0)
44719+ goto Elong;
44720+ retval -= namelen-1; /* hit the slash */
44721+ memcpy(retval, dentry->d_name.name, namelen);
44722+ goto out;
44723+Elong:
44724+ retval = ERR_PTR(-ENAMETOOLONG);
44725+ goto out;
44726+}
44727+
44728+static char *
44729+gen_full_path(struct dentry *dentry, struct vfsmount *vfsmnt,
44730+ struct dentry *root, struct vfsmount *rootmnt, char *buf, int buflen)
44731+{
44732+ char *retval;
44733+
44734+ retval = __our_d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
44735+ if (unlikely(IS_ERR(retval)))
44736+ retval = strcpy(buf, "<path too long>");
44737+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
44738+ retval[1] = '\0';
44739+
44740+ return retval;
44741+}
44742+
44743+static char *
44744+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
44745+ char *buf, int buflen)
44746+{
44747+ char *res;
44748+
44749+ /* we can use real_root, real_root_mnt, because this is only called
44750+ by the RBAC system */
44751+ res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, real_root, real_root_mnt, buf, buflen);
44752+
44753+ return res;
44754+}
44755+
44756+static char *
44757+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
44758+ char *buf, int buflen)
44759+{
44760+ char *res;
44761+ struct dentry *root;
44762+ struct vfsmount *rootmnt;
44763+ struct task_struct *reaper = &init_task;
44764+
44765+ /* we can't use real_root, real_root_mnt, because they belong only to the RBAC system */
44766+ read_lock(&reaper->fs->lock);
44767+ root = dget(reaper->fs->root.dentry);
44768+ rootmnt = mntget(reaper->fs->root.mnt);
44769+ read_unlock(&reaper->fs->lock);
44770+
44771+ spin_lock(&dcache_lock);
44772+ spin_lock(&vfsmount_lock);
44773+ res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, root, rootmnt, buf, buflen);
44774+ spin_unlock(&vfsmount_lock);
44775+ spin_unlock(&dcache_lock);
44776+
44777+ dput(root);
44778+ mntput(rootmnt);
44779+ return res;
44780+}
44781+
44782+static char *
44783+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
44784+{
44785+ char *ret;
44786+ spin_lock(&dcache_lock);
44787+ spin_lock(&vfsmount_lock);
44788+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
44789+ PAGE_SIZE);
44790+ spin_unlock(&vfsmount_lock);
44791+ spin_unlock(&dcache_lock);
44792+ return ret;
44793+}
44794+
44795+char *
44796+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
44797+{
44798+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
44799+ PAGE_SIZE);
44800+}
44801+
44802+char *
44803+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
44804+{
44805+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
44806+ PAGE_SIZE);
44807+}
44808+
44809+char *
44810+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
44811+{
44812+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
44813+ PAGE_SIZE);
44814+}
44815+
44816+char *
44817+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
44818+{
44819+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
44820+ PAGE_SIZE);
44821+}
44822+
44823+char *
44824+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
44825+{
44826+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
44827+ PAGE_SIZE);
44828+}
44829+
44830+__inline__ __u32
44831+to_gr_audit(const __u32 reqmode)
44832+{
44833+ /* masks off auditable permission flags, then shifts them to create
44834+ auditing flags, and adds the special case of append auditing if
44835+ we're requesting write */
44836+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
44837+}
44838+
44839+struct acl_subject_label *
44840+lookup_subject_map(const struct acl_subject_label *userp)
44841+{
44842+ unsigned int index = shash(userp, subj_map_set.s_size);
44843+ struct subject_map *match;
44844+
44845+ match = subj_map_set.s_hash[index];
44846+
44847+ while (match && match->user != userp)
44848+ match = match->next;
44849+
44850+ if (match != NULL)
44851+ return match->kernel;
44852+ else
44853+ return NULL;
44854+}
44855+
44856+static void
44857+insert_subj_map_entry(struct subject_map *subjmap)
44858+{
44859+ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
44860+ struct subject_map **curr;
44861+
44862+ subjmap->prev = NULL;
44863+
44864+ curr = &subj_map_set.s_hash[index];
44865+ if (*curr != NULL)
44866+ (*curr)->prev = subjmap;
44867+
44868+ subjmap->next = *curr;
44869+ *curr = subjmap;
44870+
44871+ return;
44872+}
44873+
44874+static struct acl_role_label *
44875+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
44876+ const gid_t gid)
44877+{
44878+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
44879+ struct acl_role_label *match;
44880+ struct role_allowed_ip *ipp;
44881+ unsigned int x;
44882+ u32 curr_ip = task->signal->curr_ip;
44883+
44884+ task->signal->saved_ip = curr_ip;
44885+
44886+ match = acl_role_set.r_hash[index];
44887+
44888+ while (match) {
44889+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
44890+ for (x = 0; x < match->domain_child_num; x++) {
44891+ if (match->domain_children[x] == uid)
44892+ goto found;
44893+ }
44894+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
44895+ break;
44896+ match = match->next;
44897+ }
44898+found:
44899+ if (match == NULL) {
44900+ try_group:
44901+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
44902+ match = acl_role_set.r_hash[index];
44903+
44904+ while (match) {
44905+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
44906+ for (x = 0; x < match->domain_child_num; x++) {
44907+ if (match->domain_children[x] == gid)
44908+ goto found2;
44909+ }
44910+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
44911+ break;
44912+ match = match->next;
44913+ }
44914+found2:
44915+ if (match == NULL)
44916+ match = default_role;
44917+ if (match->allowed_ips == NULL)
44918+ return match;
44919+ else {
44920+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
44921+ if (likely
44922+ ((ntohl(curr_ip) & ipp->netmask) ==
44923+ (ntohl(ipp->addr) & ipp->netmask)))
44924+ return match;
44925+ }
44926+ match = default_role;
44927+ }
44928+ } else if (match->allowed_ips == NULL) {
44929+ return match;
44930+ } else {
44931+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
44932+ if (likely
44933+ ((ntohl(curr_ip) & ipp->netmask) ==
44934+ (ntohl(ipp->addr) & ipp->netmask)))
44935+ return match;
44936+ }
44937+ goto try_group;
44938+ }
44939+
44940+ return match;
44941+}
44942+
44943+struct acl_subject_label *
44944+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
44945+ const struct acl_role_label *role)
44946+{
44947+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
44948+ struct acl_subject_label *match;
44949+
44950+ match = role->subj_hash[index];
44951+
44952+ while (match && (match->inode != ino || match->device != dev ||
44953+ (match->mode & GR_DELETED))) {
44954+ match = match->next;
44955+ }
44956+
44957+ if (match && !(match->mode & GR_DELETED))
44958+ return match;
44959+ else
44960+ return NULL;
44961+}
44962+
44963+struct acl_subject_label *
44964+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
44965+ const struct acl_role_label *role)
44966+{
44967+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
44968+ struct acl_subject_label *match;
44969+
44970+ match = role->subj_hash[index];
44971+
44972+ while (match && (match->inode != ino || match->device != dev ||
44973+ !(match->mode & GR_DELETED))) {
44974+ match = match->next;
44975+ }
44976+
44977+ if (match && (match->mode & GR_DELETED))
44978+ return match;
44979+ else
44980+ return NULL;
44981+}
44982+
44983+static struct acl_object_label *
44984+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
44985+ const struct acl_subject_label *subj)
44986+{
44987+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
44988+ struct acl_object_label *match;
44989+
44990+ match = subj->obj_hash[index];
44991+
44992+ while (match && (match->inode != ino || match->device != dev ||
44993+ (match->mode & GR_DELETED))) {
44994+ match = match->next;
44995+ }
44996+
44997+ if (match && !(match->mode & GR_DELETED))
44998+ return match;
44999+ else
45000+ return NULL;
45001+}
45002+
45003+static struct acl_object_label *
45004+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
45005+ const struct acl_subject_label *subj)
45006+{
45007+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
45008+ struct acl_object_label *match;
45009+
45010+ match = subj->obj_hash[index];
45011+
45012+ while (match && (match->inode != ino || match->device != dev ||
45013+ !(match->mode & GR_DELETED))) {
45014+ match = match->next;
45015+ }
45016+
45017+ if (match && (match->mode & GR_DELETED))
45018+ return match;
45019+
45020+ match = subj->obj_hash[index];
45021+
45022+ while (match && (match->inode != ino || match->device != dev ||
45023+ (match->mode & GR_DELETED))) {
45024+ match = match->next;
45025+ }
45026+
45027+ if (match && !(match->mode & GR_DELETED))
45028+ return match;
45029+ else
45030+ return NULL;
45031+}
45032+
45033+static struct name_entry *
45034+lookup_name_entry(const char *name)
45035+{
45036+ unsigned int len = strlen(name);
45037+ unsigned int key = full_name_hash(name, len);
45038+ unsigned int index = key % name_set.n_size;
45039+ struct name_entry *match;
45040+
45041+ match = name_set.n_hash[index];
45042+
45043+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
45044+ match = match->next;
45045+
45046+ return match;
45047+}
45048+
45049+static struct name_entry *
45050+lookup_name_entry_create(const char *name)
45051+{
45052+ unsigned int len = strlen(name);
45053+ unsigned int key = full_name_hash(name, len);
45054+ unsigned int index = key % name_set.n_size;
45055+ struct name_entry *match;
45056+
45057+ match = name_set.n_hash[index];
45058+
45059+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
45060+ !match->deleted))
45061+ match = match->next;
45062+
45063+ if (match && match->deleted)
45064+ return match;
45065+
45066+ match = name_set.n_hash[index];
45067+
45068+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
45069+ match->deleted))
45070+ match = match->next;
45071+
45072+ if (match && !match->deleted)
45073+ return match;
45074+ else
45075+ return NULL;
45076+}
45077+
45078+static struct inodev_entry *
45079+lookup_inodev_entry(const ino_t ino, const dev_t dev)
45080+{
45081+ unsigned int index = fhash(ino, dev, inodev_set.i_size);
45082+ struct inodev_entry *match;
45083+
45084+ match = inodev_set.i_hash[index];
45085+
45086+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
45087+ match = match->next;
45088+
45089+ return match;
45090+}
45091+
45092+static void
45093+insert_inodev_entry(struct inodev_entry *entry)
45094+{
45095+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
45096+ inodev_set.i_size);
45097+ struct inodev_entry **curr;
45098+
45099+ entry->prev = NULL;
45100+
45101+ curr = &inodev_set.i_hash[index];
45102+ if (*curr != NULL)
45103+ (*curr)->prev = entry;
45104+
45105+ entry->next = *curr;
45106+ *curr = entry;
45107+
45108+ return;
45109+}
45110+
45111+static void
45112+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
45113+{
45114+ unsigned int index =
45115+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
45116+ struct acl_role_label **curr;
45117+ struct acl_role_label *tmp;
45118+
45119+ curr = &acl_role_set.r_hash[index];
45120+
45121+ /* if role was already inserted due to domains and already has
45122+ a role in the same bucket as it attached, then we need to
45123+ combine these two buckets
45124+ */
45125+ if (role->next) {
45126+ tmp = role->next;
45127+ while (tmp->next)
45128+ tmp = tmp->next;
45129+ tmp->next = *curr;
45130+ } else
45131+ role->next = *curr;
45132+ *curr = role;
45133+
45134+ return;
45135+}
45136+
45137+static void
45138+insert_acl_role_label(struct acl_role_label *role)
45139+{
45140+ int i;
45141+
45142+ if (role_list == NULL) {
45143+ role_list = role;
45144+ role->prev = NULL;
45145+ } else {
45146+ role->prev = role_list;
45147+ role_list = role;
45148+ }
45149+
45150+ /* used for hash chains */
45151+ role->next = NULL;
45152+
45153+ if (role->roletype & GR_ROLE_DOMAIN) {
45154+ for (i = 0; i < role->domain_child_num; i++)
45155+ __insert_acl_role_label(role, role->domain_children[i]);
45156+ } else
45157+ __insert_acl_role_label(role, role->uidgid);
45158+}
45159+
45160+static int
45161+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
45162+{
45163+ struct name_entry **curr, *nentry;
45164+ struct inodev_entry *ientry;
45165+ unsigned int len = strlen(name);
45166+ unsigned int key = full_name_hash(name, len);
45167+ unsigned int index = key % name_set.n_size;
45168+
45169+ curr = &name_set.n_hash[index];
45170+
45171+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
45172+ curr = &((*curr)->next);
45173+
45174+ if (*curr != NULL)
45175+ return 1;
45176+
45177+ nentry = acl_alloc(sizeof (struct name_entry));
45178+ if (nentry == NULL)
45179+ return 0;
45180+ ientry = acl_alloc(sizeof (struct inodev_entry));
45181+ if (ientry == NULL)
45182+ return 0;
45183+ ientry->nentry = nentry;
45184+
45185+ nentry->key = key;
45186+ nentry->name = name;
45187+ nentry->inode = inode;
45188+ nentry->device = device;
45189+ nentry->len = len;
45190+ nentry->deleted = deleted;
45191+
45192+ nentry->prev = NULL;
45193+ curr = &name_set.n_hash[index];
45194+ if (*curr != NULL)
45195+ (*curr)->prev = nentry;
45196+ nentry->next = *curr;
45197+ *curr = nentry;
45198+
45199+ /* insert us into the table searchable by inode/dev */
45200+ insert_inodev_entry(ientry);
45201+
45202+ return 1;
45203+}
45204+
45205+static void
45206+insert_acl_obj_label(struct acl_object_label *obj,
45207+ struct acl_subject_label *subj)
45208+{
45209+ unsigned int index =
45210+ fhash(obj->inode, obj->device, subj->obj_hash_size);
45211+ struct acl_object_label **curr;
45212+
45213+
45214+ obj->prev = NULL;
45215+
45216+ curr = &subj->obj_hash[index];
45217+ if (*curr != NULL)
45218+ (*curr)->prev = obj;
45219+
45220+ obj->next = *curr;
45221+ *curr = obj;
45222+
45223+ return;
45224+}
45225+
45226+static void
45227+insert_acl_subj_label(struct acl_subject_label *obj,
45228+ struct acl_role_label *role)
45229+{
45230+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
45231+ struct acl_subject_label **curr;
45232+
45233+ obj->prev = NULL;
45234+
45235+ curr = &role->subj_hash[index];
45236+ if (*curr != NULL)
45237+ (*curr)->prev = obj;
45238+
45239+ obj->next = *curr;
45240+ *curr = obj;
45241+
45242+ return;
45243+}
45244+
45245+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
45246+
45247+static void *
45248+create_table(__u32 * len, int elementsize)
45249+{
45250+ unsigned int table_sizes[] = {
45251+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
45252+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
45253+ 4194301, 8388593, 16777213, 33554393, 67108859
45254+ };
45255+ void *newtable = NULL;
45256+ unsigned int pwr = 0;
45257+
45258+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
45259+ table_sizes[pwr] <= *len)
45260+ pwr++;
45261+
45262+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
45263+ return newtable;
45264+
45265+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
45266+ newtable =
45267+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
45268+ else
45269+ newtable = vmalloc(table_sizes[pwr] * elementsize);
45270+
45271+ *len = table_sizes[pwr];
45272+
45273+ return newtable;
45274+}
45275+
45276+static int
45277+init_variables(const struct gr_arg *arg)
45278+{
45279+ struct task_struct *reaper = &init_task;
45280+ unsigned int stacksize;
45281+
45282+ subj_map_set.s_size = arg->role_db.num_subjects;
45283+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
45284+ name_set.n_size = arg->role_db.num_objects;
45285+ inodev_set.i_size = arg->role_db.num_objects;
45286+
45287+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
45288+ !name_set.n_size || !inodev_set.i_size)
45289+ return 1;
45290+
45291+ if (!gr_init_uidset())
45292+ return 1;
45293+
45294+ /* set up the stack that holds allocation info */
45295+
45296+ stacksize = arg->role_db.num_pointers + 5;
45297+
45298+ if (!acl_alloc_stack_init(stacksize))
45299+ return 1;
45300+
45301+ /* grab reference for the real root dentry and vfsmount */
45302+ read_lock(&reaper->fs->lock);
45303+ real_root = dget(reaper->fs->root.dentry);
45304+ real_root_mnt = mntget(reaper->fs->root.mnt);
45305+ read_unlock(&reaper->fs->lock);
45306+
45307+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
45308+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root), real_root->d_inode->i_ino);
45309+#endif
45310+
45311+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
45312+ if (fakefs_obj_rw == NULL)
45313+ return 1;
45314+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
45315+
45316+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
45317+ if (fakefs_obj_rwx == NULL)
45318+ return 1;
45319+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
45320+
45321+ subj_map_set.s_hash =
45322+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
45323+ acl_role_set.r_hash =
45324+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
45325+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
45326+ inodev_set.i_hash =
45327+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
45328+
45329+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
45330+ !name_set.n_hash || !inodev_set.i_hash)
45331+ return 1;
45332+
45333+ memset(subj_map_set.s_hash, 0,
45334+ sizeof(struct subject_map *) * subj_map_set.s_size);
45335+ memset(acl_role_set.r_hash, 0,
45336+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
45337+ memset(name_set.n_hash, 0,
45338+ sizeof (struct name_entry *) * name_set.n_size);
45339+ memset(inodev_set.i_hash, 0,
45340+ sizeof (struct inodev_entry *) * inodev_set.i_size);
45341+
45342+ return 0;
45343+}
45344+
45345+/* free information not needed after startup
45346+ currently contains user->kernel pointer mappings for subjects
45347+*/
45348+
45349+static void
45350+free_init_variables(void)
45351+{
45352+ __u32 i;
45353+
45354+ if (subj_map_set.s_hash) {
45355+ for (i = 0; i < subj_map_set.s_size; i++) {
45356+ if (subj_map_set.s_hash[i]) {
45357+ kfree(subj_map_set.s_hash[i]);
45358+ subj_map_set.s_hash[i] = NULL;
45359+ }
45360+ }
45361+
45362+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
45363+ PAGE_SIZE)
45364+ kfree(subj_map_set.s_hash);
45365+ else
45366+ vfree(subj_map_set.s_hash);
45367+ }
45368+
45369+ return;
45370+}
45371+
45372+static void
45373+free_variables(void)
45374+{
45375+ struct acl_subject_label *s;
45376+ struct acl_role_label *r;
45377+ struct task_struct *task, *task2;
45378+ unsigned int x;
45379+
45380+ gr_clear_learn_entries();
45381+
45382+ read_lock(&tasklist_lock);
45383+ do_each_thread(task2, task) {
45384+ task->acl_sp_role = 0;
45385+ task->acl_role_id = 0;
45386+ task->acl = NULL;
45387+ task->role = NULL;
45388+ } while_each_thread(task2, task);
45389+ read_unlock(&tasklist_lock);
45390+
45391+ /* release the reference to the real root dentry and vfsmount */
45392+ if (real_root)
45393+ dput(real_root);
45394+ real_root = NULL;
45395+ if (real_root_mnt)
45396+ mntput(real_root_mnt);
45397+ real_root_mnt = NULL;
45398+
45399+ /* free all object hash tables */
45400+
45401+ FOR_EACH_ROLE_START(r)
45402+ if (r->subj_hash == NULL)
45403+ goto next_role;
45404+ FOR_EACH_SUBJECT_START(r, s, x)
45405+ if (s->obj_hash == NULL)
45406+ break;
45407+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
45408+ kfree(s->obj_hash);
45409+ else
45410+ vfree(s->obj_hash);
45411+ FOR_EACH_SUBJECT_END(s, x)
45412+ FOR_EACH_NESTED_SUBJECT_START(r, s)
45413+ if (s->obj_hash == NULL)
45414+ break;
45415+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
45416+ kfree(s->obj_hash);
45417+ else
45418+ vfree(s->obj_hash);
45419+ FOR_EACH_NESTED_SUBJECT_END(s)
45420+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
45421+ kfree(r->subj_hash);
45422+ else
45423+ vfree(r->subj_hash);
45424+ r->subj_hash = NULL;
45425+next_role:
45426+ FOR_EACH_ROLE_END(r)
45427+
45428+ acl_free_all();
45429+
45430+ if (acl_role_set.r_hash) {
45431+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
45432+ PAGE_SIZE)
45433+ kfree(acl_role_set.r_hash);
45434+ else
45435+ vfree(acl_role_set.r_hash);
45436+ }
45437+ if (name_set.n_hash) {
45438+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
45439+ PAGE_SIZE)
45440+ kfree(name_set.n_hash);
45441+ else
45442+ vfree(name_set.n_hash);
45443+ }
45444+
45445+ if (inodev_set.i_hash) {
45446+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
45447+ PAGE_SIZE)
45448+ kfree(inodev_set.i_hash);
45449+ else
45450+ vfree(inodev_set.i_hash);
45451+ }
45452+
45453+ gr_free_uidset();
45454+
45455+ memset(&name_set, 0, sizeof (struct name_db));
45456+ memset(&inodev_set, 0, sizeof (struct inodev_db));
45457+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
45458+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
45459+
45460+ default_role = NULL;
45461+ role_list = NULL;
45462+
45463+ return;
45464+}
45465+
45466+static __u32
45467+count_user_objs(struct acl_object_label *userp)
45468+{
45469+ struct acl_object_label o_tmp;
45470+ __u32 num = 0;
45471+
45472+ while (userp) {
45473+ if (copy_from_user(&o_tmp, userp,
45474+ sizeof (struct acl_object_label)))
45475+ break;
45476+
45477+ userp = o_tmp.prev;
45478+ num++;
45479+ }
45480+
45481+ return num;
45482+}
45483+
45484+static struct acl_subject_label *
45485+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
45486+
45487+static int
45488+copy_user_glob(struct acl_object_label *obj)
45489+{
45490+ struct acl_object_label *g_tmp, **guser;
45491+ unsigned int len;
45492+ char *tmp;
45493+
45494+ if (obj->globbed == NULL)
45495+ return 0;
45496+
45497+ guser = &obj->globbed;
45498+ while (*guser) {
45499+ g_tmp = (struct acl_object_label *)
45500+ acl_alloc(sizeof (struct acl_object_label));
45501+ if (g_tmp == NULL)
45502+ return -ENOMEM;
45503+
45504+ if (copy_from_user(g_tmp, *guser,
45505+ sizeof (struct acl_object_label)))
45506+ return -EFAULT;
45507+
45508+ len = strnlen_user(g_tmp->filename, PATH_MAX);
45509+
45510+ if (!len || len >= PATH_MAX)
45511+ return -EINVAL;
45512+
45513+ if ((tmp = (char *) acl_alloc(len)) == NULL)
45514+ return -ENOMEM;
45515+
45516+ if (copy_from_user(tmp, g_tmp->filename, len))
45517+ return -EFAULT;
45518+ tmp[len-1] = '\0';
45519+ g_tmp->filename = tmp;
45520+
45521+ *guser = g_tmp;
45522+ guser = &(g_tmp->next);
45523+ }
45524+
45525+ return 0;
45526+}
45527+
45528+static int
45529+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
45530+ struct acl_role_label *role)
45531+{
45532+ struct acl_object_label *o_tmp;
45533+ unsigned int len;
45534+ int ret;
45535+ char *tmp;
45536+
45537+ while (userp) {
45538+ if ((o_tmp = (struct acl_object_label *)
45539+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
45540+ return -ENOMEM;
45541+
45542+ if (copy_from_user(o_tmp, userp,
45543+ sizeof (struct acl_object_label)))
45544+ return -EFAULT;
45545+
45546+ userp = o_tmp->prev;
45547+
45548+ len = strnlen_user(o_tmp->filename, PATH_MAX);
45549+
45550+ if (!len || len >= PATH_MAX)
45551+ return -EINVAL;
45552+
45553+ if ((tmp = (char *) acl_alloc(len)) == NULL)
45554+ return -ENOMEM;
45555+
45556+ if (copy_from_user(tmp, o_tmp->filename, len))
45557+ return -EFAULT;
45558+ tmp[len-1] = '\0';
45559+ o_tmp->filename = tmp;
45560+
45561+ insert_acl_obj_label(o_tmp, subj);
45562+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
45563+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
45564+ return -ENOMEM;
45565+
45566+ ret = copy_user_glob(o_tmp);
45567+ if (ret)
45568+ return ret;
45569+
45570+ if (o_tmp->nested) {
45571+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
45572+ if (IS_ERR(o_tmp->nested))
45573+ return PTR_ERR(o_tmp->nested);
45574+
45575+ /* insert into nested subject list */
45576+ o_tmp->nested->next = role->hash->first;
45577+ role->hash->first = o_tmp->nested;
45578+ }
45579+ }
45580+
45581+ return 0;
45582+}
45583+
45584+static __u32
45585+count_user_subjs(struct acl_subject_label *userp)
45586+{
45587+ struct acl_subject_label s_tmp;
45588+ __u32 num = 0;
45589+
45590+ while (userp) {
45591+ if (copy_from_user(&s_tmp, userp,
45592+ sizeof (struct acl_subject_label)))
45593+ break;
45594+
45595+ userp = s_tmp.prev;
45596+ /* do not count nested subjects against this count, since
45597+ they are not included in the hash table, but are
45598+ attached to objects. We have already counted
45599+ the subjects in userspace for the allocation
45600+ stack
45601+ */
45602+ if (!(s_tmp.mode & GR_NESTED))
45603+ num++;
45604+ }
45605+
45606+ return num;
45607+}
45608+
45609+static int
45610+copy_user_allowedips(struct acl_role_label *rolep)
45611+{
45612+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
45613+
45614+ ruserip = rolep->allowed_ips;
45615+
45616+ while (ruserip) {
45617+ rlast = rtmp;
45618+
45619+ if ((rtmp = (struct role_allowed_ip *)
45620+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
45621+ return -ENOMEM;
45622+
45623+ if (copy_from_user(rtmp, ruserip,
45624+ sizeof (struct role_allowed_ip)))
45625+ return -EFAULT;
45626+
45627+ ruserip = rtmp->prev;
45628+
45629+ if (!rlast) {
45630+ rtmp->prev = NULL;
45631+ rolep->allowed_ips = rtmp;
45632+ } else {
45633+ rlast->next = rtmp;
45634+ rtmp->prev = rlast;
45635+ }
45636+
45637+ if (!ruserip)
45638+ rtmp->next = NULL;
45639+ }
45640+
45641+ return 0;
45642+}
45643+
45644+static int
45645+copy_user_transitions(struct acl_role_label *rolep)
45646+{
45647+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
45648+
45649+ unsigned int len;
45650+ char *tmp;
45651+
45652+ rusertp = rolep->transitions;
45653+
45654+ while (rusertp) {
45655+ rlast = rtmp;
45656+
45657+ if ((rtmp = (struct role_transition *)
45658+ acl_alloc(sizeof (struct role_transition))) == NULL)
45659+ return -ENOMEM;
45660+
45661+ if (copy_from_user(rtmp, rusertp,
45662+ sizeof (struct role_transition)))
45663+ return -EFAULT;
45664+
45665+ rusertp = rtmp->prev;
45666+
45667+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
45668+
45669+ if (!len || len >= GR_SPROLE_LEN)
45670+ return -EINVAL;
45671+
45672+ if ((tmp = (char *) acl_alloc(len)) == NULL)
45673+ return -ENOMEM;
45674+
45675+ if (copy_from_user(tmp, rtmp->rolename, len))
45676+ return -EFAULT;
45677+ tmp[len-1] = '\0';
45678+ rtmp->rolename = tmp;
45679+
45680+ if (!rlast) {
45681+ rtmp->prev = NULL;
45682+ rolep->transitions = rtmp;
45683+ } else {
45684+ rlast->next = rtmp;
45685+ rtmp->prev = rlast;
45686+ }
45687+
45688+ if (!rusertp)
45689+ rtmp->next = NULL;
45690+ }
45691+
45692+ return 0;
45693+}
45694+
45695+static struct acl_subject_label *
45696+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
45697+{
45698+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
45699+ unsigned int len;
45700+ char *tmp;
45701+ __u32 num_objs;
45702+ struct acl_ip_label **i_tmp, *i_utmp2;
45703+ struct gr_hash_struct ghash;
45704+ struct subject_map *subjmap;
45705+ unsigned int i_num;
45706+ int err;
45707+
45708+ s_tmp = lookup_subject_map(userp);
45709+
45710+ /* we've already copied this subject into the kernel, just return
45711+ the reference to it, and don't copy it over again
45712+ */
45713+ if (s_tmp)
45714+ return(s_tmp);
45715+
45716+ if ((s_tmp = (struct acl_subject_label *)
45717+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
45718+ return ERR_PTR(-ENOMEM);
45719+
45720+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
45721+ if (subjmap == NULL)
45722+ return ERR_PTR(-ENOMEM);
45723+
45724+ subjmap->user = userp;
45725+ subjmap->kernel = s_tmp;
45726+ insert_subj_map_entry(subjmap);
45727+
45728+ if (copy_from_user(s_tmp, userp,
45729+ sizeof (struct acl_subject_label)))
45730+ return ERR_PTR(-EFAULT);
45731+
45732+ len = strnlen_user(s_tmp->filename, PATH_MAX);
45733+
45734+ if (!len || len >= PATH_MAX)
45735+ return ERR_PTR(-EINVAL);
45736+
45737+ if ((tmp = (char *) acl_alloc(len)) == NULL)
45738+ return ERR_PTR(-ENOMEM);
45739+
45740+ if (copy_from_user(tmp, s_tmp->filename, len))
45741+ return ERR_PTR(-EFAULT);
45742+ tmp[len-1] = '\0';
45743+ s_tmp->filename = tmp;
45744+
45745+ if (!strcmp(s_tmp->filename, "/"))
45746+ role->root_label = s_tmp;
45747+
45748+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
45749+ return ERR_PTR(-EFAULT);
45750+
45751+ /* copy user and group transition tables */
45752+
45753+ if (s_tmp->user_trans_num) {
45754+ uid_t *uidlist;
45755+
45756+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
45757+ if (uidlist == NULL)
45758+ return ERR_PTR(-ENOMEM);
45759+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
45760+ return ERR_PTR(-EFAULT);
45761+
45762+ s_tmp->user_transitions = uidlist;
45763+ }
45764+
45765+ if (s_tmp->group_trans_num) {
45766+ gid_t *gidlist;
45767+
45768+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
45769+ if (gidlist == NULL)
45770+ return ERR_PTR(-ENOMEM);
45771+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
45772+ return ERR_PTR(-EFAULT);
45773+
45774+ s_tmp->group_transitions = gidlist;
45775+ }
45776+
45777+ /* set up object hash table */
45778+ num_objs = count_user_objs(ghash.first);
45779+
45780+ s_tmp->obj_hash_size = num_objs;
45781+ s_tmp->obj_hash =
45782+ (struct acl_object_label **)
45783+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
45784+
45785+ if (!s_tmp->obj_hash)
45786+ return ERR_PTR(-ENOMEM);
45787+
45788+ memset(s_tmp->obj_hash, 0,
45789+ s_tmp->obj_hash_size *
45790+ sizeof (struct acl_object_label *));
45791+
45792+ /* add in objects */
45793+ err = copy_user_objs(ghash.first, s_tmp, role);
45794+
45795+ if (err)
45796+ return ERR_PTR(err);
45797+
45798+ /* set pointer for parent subject */
45799+ if (s_tmp->parent_subject) {
45800+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
45801+
45802+ if (IS_ERR(s_tmp2))
45803+ return s_tmp2;
45804+
45805+ s_tmp->parent_subject = s_tmp2;
45806+ }
45807+
45808+ /* add in ip acls */
45809+
45810+ if (!s_tmp->ip_num) {
45811+ s_tmp->ips = NULL;
45812+ goto insert;
45813+ }
45814+
45815+ i_tmp =
45816+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
45817+ sizeof (struct acl_ip_label *));
45818+
45819+ if (!i_tmp)
45820+ return ERR_PTR(-ENOMEM);
45821+
45822+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
45823+ *(i_tmp + i_num) =
45824+ (struct acl_ip_label *)
45825+ acl_alloc(sizeof (struct acl_ip_label));
45826+ if (!*(i_tmp + i_num))
45827+ return ERR_PTR(-ENOMEM);
45828+
45829+ if (copy_from_user
45830+ (&i_utmp2, s_tmp->ips + i_num,
45831+ sizeof (struct acl_ip_label *)))
45832+ return ERR_PTR(-EFAULT);
45833+
45834+ if (copy_from_user
45835+ (*(i_tmp + i_num), i_utmp2,
45836+ sizeof (struct acl_ip_label)))
45837+ return ERR_PTR(-EFAULT);
45838+
45839+ if ((*(i_tmp + i_num))->iface == NULL)
45840+ continue;
45841+
45842+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
45843+ if (!len || len >= IFNAMSIZ)
45844+ return ERR_PTR(-EINVAL);
45845+ tmp = acl_alloc(len);
45846+ if (tmp == NULL)
45847+ return ERR_PTR(-ENOMEM);
45848+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
45849+ return ERR_PTR(-EFAULT);
45850+ (*(i_tmp + i_num))->iface = tmp;
45851+ }
45852+
45853+ s_tmp->ips = i_tmp;
45854+
45855+insert:
45856+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
45857+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
45858+ return ERR_PTR(-ENOMEM);
45859+
45860+ return s_tmp;
45861+}
45862+
45863+static int
45864+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
45865+{
45866+ struct acl_subject_label s_pre;
45867+ struct acl_subject_label * ret;
45868+ int err;
45869+
45870+ while (userp) {
45871+ if (copy_from_user(&s_pre, userp,
45872+ sizeof (struct acl_subject_label)))
45873+ return -EFAULT;
45874+
45875+ /* do not add nested subjects here, add
45876+ while parsing objects
45877+ */
45878+
45879+ if (s_pre.mode & GR_NESTED) {
45880+ userp = s_pre.prev;
45881+ continue;
45882+ }
45883+
45884+ ret = do_copy_user_subj(userp, role);
45885+
45886+ err = PTR_ERR(ret);
45887+ if (IS_ERR(ret))
45888+ return err;
45889+
45890+ insert_acl_subj_label(ret, role);
45891+
45892+ userp = s_pre.prev;
45893+ }
45894+
45895+ return 0;
45896+}
45897+
45898+static int
45899+copy_user_acl(struct gr_arg *arg)
45900+{
45901+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
45902+ struct sprole_pw *sptmp;
45903+ struct gr_hash_struct *ghash;
45904+ uid_t *domainlist;
45905+ unsigned int r_num;
45906+ unsigned int len;
45907+ char *tmp;
45908+ int err = 0;
45909+ __u16 i;
45910+ __u32 num_subjs;
45911+
45912+ /* we need a default and kernel role */
45913+ if (arg->role_db.num_roles < 2)
45914+ return -EINVAL;
45915+
45916+ /* copy special role authentication info from userspace */
45917+
45918+ num_sprole_pws = arg->num_sprole_pws;
45919+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
45920+
45921+ if (!acl_special_roles) {
45922+ err = -ENOMEM;
45923+ goto cleanup;
45924+ }
45925+
45926+ for (i = 0; i < num_sprole_pws; i++) {
45927+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
45928+ if (!sptmp) {
45929+ err = -ENOMEM;
45930+ goto cleanup;
45931+ }
45932+ if (copy_from_user(sptmp, arg->sprole_pws + i,
45933+ sizeof (struct sprole_pw))) {
45934+ err = -EFAULT;
45935+ goto cleanup;
45936+ }
45937+
45938+ len =
45939+ strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
45940+
45941+ if (!len || len >= GR_SPROLE_LEN) {
45942+ err = -EINVAL;
45943+ goto cleanup;
45944+ }
45945+
45946+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
45947+ err = -ENOMEM;
45948+ goto cleanup;
45949+ }
45950+
45951+ if (copy_from_user(tmp, sptmp->rolename, len)) {
45952+ err = -EFAULT;
45953+ goto cleanup;
45954+ }
45955+ tmp[len-1] = '\0';
45956+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
45957+ printk(KERN_ALERT "Copying special role %s\n", tmp);
45958+#endif
45959+ sptmp->rolename = tmp;
45960+ acl_special_roles[i] = sptmp;
45961+ }
45962+
45963+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
45964+
45965+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
45966+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
45967+
45968+ if (!r_tmp) {
45969+ err = -ENOMEM;
45970+ goto cleanup;
45971+ }
45972+
45973+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
45974+ sizeof (struct acl_role_label *))) {
45975+ err = -EFAULT;
45976+ goto cleanup;
45977+ }
45978+
45979+ if (copy_from_user(r_tmp, r_utmp2,
45980+ sizeof (struct acl_role_label))) {
45981+ err = -EFAULT;
45982+ goto cleanup;
45983+ }
45984+
45985+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
45986+
45987+ if (!len || len >= PATH_MAX) {
45988+ err = -EINVAL;
45989+ goto cleanup;
45990+ }
45991+
45992+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
45993+ err = -ENOMEM;
45994+ goto cleanup;
45995+ }
45996+ if (copy_from_user(tmp, r_tmp->rolename, len)) {
45997+ err = -EFAULT;
45998+ goto cleanup;
45999+ }
46000+ tmp[len-1] = '\0';
46001+ r_tmp->rolename = tmp;
46002+
46003+ if (!strcmp(r_tmp->rolename, "default")
46004+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
46005+ default_role = r_tmp;
46006+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
46007+ kernel_role = r_tmp;
46008+ }
46009+
46010+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
46011+ err = -ENOMEM;
46012+ goto cleanup;
46013+ }
46014+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
46015+ err = -EFAULT;
46016+ goto cleanup;
46017+ }
46018+
46019+ r_tmp->hash = ghash;
46020+
46021+ num_subjs = count_user_subjs(r_tmp->hash->first);
46022+
46023+ r_tmp->subj_hash_size = num_subjs;
46024+ r_tmp->subj_hash =
46025+ (struct acl_subject_label **)
46026+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
46027+
46028+ if (!r_tmp->subj_hash) {
46029+ err = -ENOMEM;
46030+ goto cleanup;
46031+ }
46032+
46033+ err = copy_user_allowedips(r_tmp);
46034+ if (err)
46035+ goto cleanup;
46036+
46037+ /* copy domain info */
46038+ if (r_tmp->domain_children != NULL) {
46039+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
46040+ if (domainlist == NULL) {
46041+ err = -ENOMEM;
46042+ goto cleanup;
46043+ }
46044+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
46045+ err = -EFAULT;
46046+ goto cleanup;
46047+ }
46048+ r_tmp->domain_children = domainlist;
46049+ }
46050+
46051+ err = copy_user_transitions(r_tmp);
46052+ if (err)
46053+ goto cleanup;
46054+
46055+ memset(r_tmp->subj_hash, 0,
46056+ r_tmp->subj_hash_size *
46057+ sizeof (struct acl_subject_label *));
46058+
46059+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
46060+
46061+ if (err)
46062+ goto cleanup;
46063+
46064+ /* set nested subject list to null */
46065+ r_tmp->hash->first = NULL;
46066+
46067+ insert_acl_role_label(r_tmp);
46068+ }
46069+
46070+ goto return_err;
46071+ cleanup:
46072+ free_variables();
46073+ return_err:
46074+ return err;
46075+
46076+}
46077+
46078+static int
46079+gracl_init(struct gr_arg *args)
46080+{
46081+ int error = 0;
46082+
46083+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
46084+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
46085+
46086+ if (init_variables(args)) {
46087+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
46088+ error = -ENOMEM;
46089+ free_variables();
46090+ goto out;
46091+ }
46092+
46093+ error = copy_user_acl(args);
46094+ free_init_variables();
46095+ if (error) {
46096+ free_variables();
46097+ goto out;
46098+ }
46099+
46100+ if ((error = gr_set_acls(0))) {
46101+ free_variables();
46102+ goto out;
46103+ }
46104+
46105+ pax_open_kernel();
46106+ gr_status |= GR_READY;
46107+ pax_close_kernel();
46108+
46109+ out:
46110+ return error;
46111+}
46112+
46113+/* derived from glibc fnmatch() 0: match, 1: no match*/
46114+
46115+static int
46116+glob_match(const char *p, const char *n)
46117+{
46118+ char c;
46119+
46120+ while ((c = *p++) != '\0') {
46121+ switch (c) {
46122+ case '?':
46123+ if (*n == '\0')
46124+ return 1;
46125+ else if (*n == '/')
46126+ return 1;
46127+ break;
46128+ case '\\':
46129+ if (*n != c)
46130+ return 1;
46131+ break;
46132+ case '*':
46133+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
46134+ if (*n == '/')
46135+ return 1;
46136+ else if (c == '?') {
46137+ if (*n == '\0')
46138+ return 1;
46139+ else
46140+ ++n;
46141+ }
46142+ }
46143+ if (c == '\0') {
46144+ return 0;
46145+ } else {
46146+ const char *endp;
46147+
46148+ if ((endp = strchr(n, '/')) == NULL)
46149+ endp = n + strlen(n);
46150+
46151+ if (c == '[') {
46152+ for (--p; n < endp; ++n)
46153+ if (!glob_match(p, n))
46154+ return 0;
46155+ } else if (c == '/') {
46156+ while (*n != '\0' && *n != '/')
46157+ ++n;
46158+ if (*n == '/' && !glob_match(p, n + 1))
46159+ return 0;
46160+ } else {
46161+ for (--p; n < endp; ++n)
46162+ if (*n == c && !glob_match(p, n))
46163+ return 0;
46164+ }
46165+
46166+ return 1;
46167+ }
46168+ case '[':
46169+ {
46170+ int not;
46171+ char cold;
46172+
46173+ if (*n == '\0' || *n == '/')
46174+ return 1;
46175+
46176+ not = (*p == '!' || *p == '^');
46177+ if (not)
46178+ ++p;
46179+
46180+ c = *p++;
46181+ for (;;) {
46182+ unsigned char fn = (unsigned char)*n;
46183+
46184+ if (c == '\0')
46185+ return 1;
46186+ else {
46187+ if (c == fn)
46188+ goto matched;
46189+ cold = c;
46190+ c = *p++;
46191+
46192+ if (c == '-' && *p != ']') {
46193+ unsigned char cend = *p++;
46194+
46195+ if (cend == '\0')
46196+ return 1;
46197+
46198+ if (cold <= fn && fn <= cend)
46199+ goto matched;
46200+
46201+ c = *p++;
46202+ }
46203+ }
46204+
46205+ if (c == ']')
46206+ break;
46207+ }
46208+ if (!not)
46209+ return 1;
46210+ break;
46211+ matched:
46212+ while (c != ']') {
46213+ if (c == '\0')
46214+ return 1;
46215+
46216+ c = *p++;
46217+ }
46218+ if (not)
46219+ return 1;
46220+ }
46221+ break;
46222+ default:
46223+ if (c != *n)
46224+ return 1;
46225+ }
46226+
46227+ ++n;
46228+ }
46229+
46230+ if (*n == '\0')
46231+ return 0;
46232+
46233+ if (*n == '/')
46234+ return 0;
46235+
46236+ return 1;
46237+}
46238+
46239+static struct acl_object_label *
46240+chk_glob_label(struct acl_object_label *globbed,
46241+ struct dentry *dentry, struct vfsmount *mnt, char **path)
46242+{
46243+ struct acl_object_label *tmp;
46244+
46245+ if (*path == NULL)
46246+ *path = gr_to_filename_nolock(dentry, mnt);
46247+
46248+ tmp = globbed;
46249+
46250+ while (tmp) {
46251+ if (!glob_match(tmp->filename, *path))
46252+ return tmp;
46253+ tmp = tmp->next;
46254+ }
46255+
46256+ return NULL;
46257+}
46258+
46259+static struct acl_object_label *
46260+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
46261+ const ino_t curr_ino, const dev_t curr_dev,
46262+ const struct acl_subject_label *subj, char **path, const int checkglob)
46263+{
46264+ struct acl_subject_label *tmpsubj;
46265+ struct acl_object_label *retval;
46266+ struct acl_object_label *retval2;
46267+
46268+ tmpsubj = (struct acl_subject_label *) subj;
46269+ read_lock(&gr_inode_lock);
46270+ do {
46271+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
46272+ if (retval) {
46273+ if (checkglob && retval->globbed) {
46274+ retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
46275+ (struct vfsmount *)orig_mnt, path);
46276+ if (retval2)
46277+ retval = retval2;
46278+ }
46279+ break;
46280+ }
46281+ } while ((tmpsubj = tmpsubj->parent_subject));
46282+ read_unlock(&gr_inode_lock);
46283+
46284+ return retval;
46285+}
46286+
46287+static __inline__ struct acl_object_label *
46288+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
46289+ const struct dentry *curr_dentry,
46290+ const struct acl_subject_label *subj, char **path, const int checkglob)
46291+{
46292+ int newglob = checkglob;
46293+
46294+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
46295+ as we don't want a / * rule to match instead of the / object
46296+ don't do this for create lookups that call this function though, since they're looking up
46297+ on the parent and thus need globbing checks on all paths
46298+ */
46299+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
46300+ newglob = GR_NO_GLOB;
46301+
46302+ return __full_lookup(orig_dentry, orig_mnt,
46303+ curr_dentry->d_inode->i_ino,
46304+ __get_dev(curr_dentry), subj, path, newglob);
46305+}
46306+
46307+static struct acl_object_label *
46308+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
46309+ const struct acl_subject_label *subj, char *path, const int checkglob)
46310+{
46311+ struct dentry *dentry = (struct dentry *) l_dentry;
46312+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
46313+ struct acl_object_label *retval;
46314+
46315+ spin_lock(&dcache_lock);
46316+ spin_lock(&vfsmount_lock);
46317+
46318+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
46319+#ifdef CONFIG_NET
46320+ mnt == sock_mnt ||
46321+#endif
46322+#ifdef CONFIG_HUGETLBFS
46323+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
46324+#endif
46325+ /* ignore Eric Biederman */
46326+ IS_PRIVATE(l_dentry->d_inode))) {
46327+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
46328+ goto out;
46329+ }
46330+
46331+ for (;;) {
46332+ if (dentry == real_root && mnt == real_root_mnt)
46333+ break;
46334+
46335+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
46336+ if (mnt->mnt_parent == mnt)
46337+ break;
46338+
46339+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
46340+ if (retval != NULL)
46341+ goto out;
46342+
46343+ dentry = mnt->mnt_mountpoint;
46344+ mnt = mnt->mnt_parent;
46345+ continue;
46346+ }
46347+
46348+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
46349+ if (retval != NULL)
46350+ goto out;
46351+
46352+ dentry = dentry->d_parent;
46353+ }
46354+
46355+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
46356+
46357+ if (retval == NULL)
46358+ retval = full_lookup(l_dentry, l_mnt, real_root, subj, &path, checkglob);
46359+out:
46360+ spin_unlock(&vfsmount_lock);
46361+ spin_unlock(&dcache_lock);
46362+
46363+ BUG_ON(retval == NULL);
46364+
46365+ return retval;
46366+}
46367+
46368+static __inline__ struct acl_object_label *
46369+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
46370+ const struct acl_subject_label *subj)
46371+{
46372+ char *path = NULL;
46373+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
46374+}
46375+
46376+static __inline__ struct acl_object_label *
46377+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
46378+ const struct acl_subject_label *subj)
46379+{
46380+ char *path = NULL;
46381+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
46382+}
46383+
46384+static __inline__ struct acl_object_label *
46385+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
46386+ const struct acl_subject_label *subj, char *path)
46387+{
46388+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
46389+}
46390+
46391+static struct acl_subject_label *
46392+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
46393+ const struct acl_role_label *role)
46394+{
46395+ struct dentry *dentry = (struct dentry *) l_dentry;
46396+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
46397+ struct acl_subject_label *retval;
46398+
46399+ spin_lock(&dcache_lock);
46400+ spin_lock(&vfsmount_lock);
46401+
46402+ for (;;) {
46403+ if (dentry == real_root && mnt == real_root_mnt)
46404+ break;
46405+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
46406+ if (mnt->mnt_parent == mnt)
46407+ break;
46408+
46409+ read_lock(&gr_inode_lock);
46410+ retval =
46411+ lookup_acl_subj_label(dentry->d_inode->i_ino,
46412+ __get_dev(dentry), role);
46413+ read_unlock(&gr_inode_lock);
46414+ if (retval != NULL)
46415+ goto out;
46416+
46417+ dentry = mnt->mnt_mountpoint;
46418+ mnt = mnt->mnt_parent;
46419+ continue;
46420+ }
46421+
46422+ read_lock(&gr_inode_lock);
46423+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
46424+ __get_dev(dentry), role);
46425+ read_unlock(&gr_inode_lock);
46426+ if (retval != NULL)
46427+ goto out;
46428+
46429+ dentry = dentry->d_parent;
46430+ }
46431+
46432+ read_lock(&gr_inode_lock);
46433+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
46434+ __get_dev(dentry), role);
46435+ read_unlock(&gr_inode_lock);
46436+
46437+ if (unlikely(retval == NULL)) {
46438+ read_lock(&gr_inode_lock);
46439+ retval = lookup_acl_subj_label(real_root->d_inode->i_ino,
46440+ __get_dev(real_root), role);
46441+ read_unlock(&gr_inode_lock);
46442+ }
46443+out:
46444+ spin_unlock(&vfsmount_lock);
46445+ spin_unlock(&dcache_lock);
46446+
46447+ BUG_ON(retval == NULL);
46448+
46449+ return retval;
46450+}
46451+
46452+static void
46453+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
46454+{
46455+ struct task_struct *task = current;
46456+ const struct cred *cred = current_cred();
46457+
46458+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
46459+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
46460+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
46461+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
46462+
46463+ return;
46464+}
46465+
46466+static void
46467+gr_log_learn_sysctl(const char *path, const __u32 mode)
46468+{
46469+ struct task_struct *task = current;
46470+ const struct cred *cred = current_cred();
46471+
46472+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
46473+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
46474+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
46475+ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
46476+
46477+ return;
46478+}
46479+
46480+static void
46481+gr_log_learn_id_change(const char type, const unsigned int real,
46482+ const unsigned int effective, const unsigned int fs)
46483+{
46484+ struct task_struct *task = current;
46485+ const struct cred *cred = current_cred();
46486+
46487+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
46488+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
46489+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
46490+ type, real, effective, fs, &task->signal->saved_ip);
46491+
46492+ return;
46493+}
46494+
46495+__u32
46496+gr_check_link(const struct dentry * new_dentry,
46497+ const struct dentry * parent_dentry,
46498+ const struct vfsmount * parent_mnt,
46499+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
46500+{
46501+ struct acl_object_label *obj;
46502+ __u32 oldmode, newmode;
46503+ __u32 needmode;
46504+
46505+ if (unlikely(!(gr_status & GR_READY)))
46506+ return (GR_CREATE | GR_LINK);
46507+
46508+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
46509+ oldmode = obj->mode;
46510+
46511+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
46512+ oldmode |= (GR_CREATE | GR_LINK);
46513+
46514+ needmode = GR_CREATE | GR_AUDIT_CREATE | GR_SUPPRESS;
46515+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
46516+ needmode |= GR_SETID | GR_AUDIT_SETID;
46517+
46518+ newmode =
46519+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
46520+ oldmode | needmode);
46521+
46522+ needmode = newmode & (GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC |
46523+ GR_SETID | GR_READ | GR_FIND | GR_DELETE |
46524+ GR_INHERIT | GR_AUDIT_INHERIT);
46525+
46526+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID) && !(newmode & GR_SETID))
46527+ goto bad;
46528+
46529+ if ((oldmode & needmode) != needmode)
46530+ goto bad;
46531+
46532+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
46533+ if ((newmode & needmode) != needmode)
46534+ goto bad;
46535+
46536+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
46537+ return newmode;
46538+bad:
46539+ needmode = oldmode;
46540+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
46541+ needmode |= GR_SETID;
46542+
46543+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
46544+ gr_log_learn(old_dentry, old_mnt, needmode);
46545+ return (GR_CREATE | GR_LINK);
46546+ } else if (newmode & GR_SUPPRESS)
46547+ return GR_SUPPRESS;
46548+ else
46549+ return 0;
46550+}
46551+
46552+__u32
46553+gr_search_file(const struct dentry * dentry, const __u32 mode,
46554+ const struct vfsmount * mnt)
46555+{
46556+ __u32 retval = mode;
46557+ struct acl_subject_label *curracl;
46558+ struct acl_object_label *currobj;
46559+
46560+ if (unlikely(!(gr_status & GR_READY)))
46561+ return (mode & ~GR_AUDITS);
46562+
46563+ curracl = current->acl;
46564+
46565+ currobj = chk_obj_label(dentry, mnt, curracl);
46566+ retval = currobj->mode & mode;
46567+
46568+ /* if we're opening a specified transfer file for writing
46569+ (e.g. /dev/initctl), then transfer our role to init
46570+ */
46571+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
46572+ current->role->roletype & GR_ROLE_PERSIST)) {
46573+ struct task_struct *task = init_pid_ns.child_reaper;
46574+
46575+ if (task->role != current->role) {
46576+ task->acl_sp_role = 0;
46577+ task->acl_role_id = current->acl_role_id;
46578+ task->role = current->role;
46579+ rcu_read_lock();
46580+ read_lock(&grsec_exec_file_lock);
46581+ gr_apply_subject_to_task(task);
46582+ read_unlock(&grsec_exec_file_lock);
46583+ rcu_read_unlock();
46584+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
46585+ }
46586+ }
46587+
46588+ if (unlikely
46589+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
46590+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
46591+ __u32 new_mode = mode;
46592+
46593+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
46594+
46595+ retval = new_mode;
46596+
46597+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
46598+ new_mode |= GR_INHERIT;
46599+
46600+ if (!(mode & GR_NOLEARN))
46601+ gr_log_learn(dentry, mnt, new_mode);
46602+ }
46603+
46604+ return retval;
46605+}
46606+
46607+__u32
46608+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
46609+ const struct vfsmount * mnt, const __u32 mode)
46610+{
46611+ struct name_entry *match;
46612+ struct acl_object_label *matchpo;
46613+ struct acl_subject_label *curracl;
46614+ char *path;
46615+ __u32 retval;
46616+
46617+ if (unlikely(!(gr_status & GR_READY)))
46618+ return (mode & ~GR_AUDITS);
46619+
46620+ preempt_disable();
46621+ path = gr_to_filename_rbac(new_dentry, mnt);
46622+ match = lookup_name_entry_create(path);
46623+
46624+ if (!match)
46625+ goto check_parent;
46626+
46627+ curracl = current->acl;
46628+
46629+ read_lock(&gr_inode_lock);
46630+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
46631+ read_unlock(&gr_inode_lock);
46632+
46633+ if (matchpo) {
46634+ if ((matchpo->mode & mode) !=
46635+ (mode & ~(GR_AUDITS | GR_SUPPRESS))
46636+ && curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
46637+ __u32 new_mode = mode;
46638+
46639+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
46640+
46641+ gr_log_learn(new_dentry, mnt, new_mode);
46642+
46643+ preempt_enable();
46644+ return new_mode;
46645+ }
46646+ preempt_enable();
46647+ return (matchpo->mode & mode);
46648+ }
46649+
46650+ check_parent:
46651+ curracl = current->acl;
46652+
46653+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
46654+ retval = matchpo->mode & mode;
46655+
46656+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
46657+ && (curracl->mode & (GR_LEARN | GR_INHERITLEARN))) {
46658+ __u32 new_mode = mode;
46659+
46660+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
46661+
46662+ gr_log_learn(new_dentry, mnt, new_mode);
46663+ preempt_enable();
46664+ return new_mode;
46665+ }
46666+
46667+ preempt_enable();
46668+ return retval;
46669+}
46670+
46671+int
46672+gr_check_hidden_task(const struct task_struct *task)
46673+{
46674+ if (unlikely(!(gr_status & GR_READY)))
46675+ return 0;
46676+
46677+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
46678+ return 1;
46679+
46680+ return 0;
46681+}
46682+
46683+int
46684+gr_check_protected_task(const struct task_struct *task)
46685+{
46686+ if (unlikely(!(gr_status & GR_READY) || !task))
46687+ return 0;
46688+
46689+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
46690+ task->acl != current->acl)
46691+ return 1;
46692+
46693+ return 0;
46694+}
46695+
46696+int
46697+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
46698+{
46699+ struct task_struct *p;
46700+ int ret = 0;
46701+
46702+ if (unlikely(!(gr_status & GR_READY) || !pid))
46703+ return ret;
46704+
46705+ read_lock(&tasklist_lock);
46706+ do_each_pid_task(pid, type, p) {
46707+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
46708+ p->acl != current->acl) {
46709+ ret = 1;
46710+ goto out;
46711+ }
46712+ } while_each_pid_task(pid, type, p);
46713+out:
46714+ read_unlock(&tasklist_lock);
46715+
46716+ return ret;
46717+}
46718+
46719+void
46720+gr_copy_label(struct task_struct *tsk)
46721+{
46722+ tsk->signal->used_accept = 0;
46723+ tsk->acl_sp_role = 0;
46724+ tsk->acl_role_id = current->acl_role_id;
46725+ tsk->acl = current->acl;
46726+ tsk->role = current->role;
46727+ tsk->signal->curr_ip = current->signal->curr_ip;
46728+ tsk->signal->saved_ip = current->signal->saved_ip;
46729+ if (current->exec_file)
46730+ get_file(current->exec_file);
46731+ tsk->exec_file = current->exec_file;
46732+ tsk->is_writable = current->is_writable;
46733+ if (unlikely(current->signal->used_accept)) {
46734+ current->signal->curr_ip = 0;
46735+ current->signal->saved_ip = 0;
46736+ }
46737+
46738+ return;
46739+}
46740+
46741+static void
46742+gr_set_proc_res(struct task_struct *task)
46743+{
46744+ struct acl_subject_label *proc;
46745+ unsigned short i;
46746+
46747+ proc = task->acl;
46748+
46749+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
46750+ return;
46751+
46752+ for (i = 0; i < RLIM_NLIMITS; i++) {
46753+ if (!(proc->resmask & (1 << i)))
46754+ continue;
46755+
46756+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
46757+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
46758+ }
46759+
46760+ return;
46761+}
46762+
46763+extern int __gr_process_user_ban(struct user_struct *user);
46764+
46765+int
46766+gr_check_user_change(int real, int effective, int fs)
46767+{
46768+ unsigned int i;
46769+ __u16 num;
46770+ uid_t *uidlist;
46771+ int curuid;
46772+ int realok = 0;
46773+ int effectiveok = 0;
46774+ int fsok = 0;
46775+
46776+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
46777+ struct user_struct *user;
46778+
46779+ if (real == -1)
46780+ goto skipit;
46781+
46782+ user = find_user(real);
46783+ if (user == NULL)
46784+ goto skipit;
46785+
46786+ if (__gr_process_user_ban(user)) {
46787+ /* for find_user */
46788+ free_uid(user);
46789+ return 1;
46790+ }
46791+
46792+ /* for find_user */
46793+ free_uid(user);
46794+
46795+skipit:
46796+#endif
46797+
46798+ if (unlikely(!(gr_status & GR_READY)))
46799+ return 0;
46800+
46801+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
46802+ gr_log_learn_id_change('u', real, effective, fs);
46803+
46804+ num = current->acl->user_trans_num;
46805+ uidlist = current->acl->user_transitions;
46806+
46807+ if (uidlist == NULL)
46808+ return 0;
46809+
46810+ if (real == -1)
46811+ realok = 1;
46812+ if (effective == -1)
46813+ effectiveok = 1;
46814+ if (fs == -1)
46815+ fsok = 1;
46816+
46817+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
46818+ for (i = 0; i < num; i++) {
46819+ curuid = (int)uidlist[i];
46820+ if (real == curuid)
46821+ realok = 1;
46822+ if (effective == curuid)
46823+ effectiveok = 1;
46824+ if (fs == curuid)
46825+ fsok = 1;
46826+ }
46827+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
46828+ for (i = 0; i < num; i++) {
46829+ curuid = (int)uidlist[i];
46830+ if (real == curuid)
46831+ break;
46832+ if (effective == curuid)
46833+ break;
46834+ if (fs == curuid)
46835+ break;
46836+ }
46837+ /* not in deny list */
46838+ if (i == num) {
46839+ realok = 1;
46840+ effectiveok = 1;
46841+ fsok = 1;
46842+ }
46843+ }
46844+
46845+ if (realok && effectiveok && fsok)
46846+ return 0;
46847+ else {
46848+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
46849+ return 1;
46850+ }
46851+}
46852+
46853+int
46854+gr_check_group_change(int real, int effective, int fs)
46855+{
46856+ unsigned int i;
46857+ __u16 num;
46858+ gid_t *gidlist;
46859+ int curgid;
46860+ int realok = 0;
46861+ int effectiveok = 0;
46862+ int fsok = 0;
46863+
46864+ if (unlikely(!(gr_status & GR_READY)))
46865+ return 0;
46866+
46867+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
46868+ gr_log_learn_id_change('g', real, effective, fs);
46869+
46870+ num = current->acl->group_trans_num;
46871+ gidlist = current->acl->group_transitions;
46872+
46873+ if (gidlist == NULL)
46874+ return 0;
46875+
46876+ if (real == -1)
46877+ realok = 1;
46878+ if (effective == -1)
46879+ effectiveok = 1;
46880+ if (fs == -1)
46881+ fsok = 1;
46882+
46883+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
46884+ for (i = 0; i < num; i++) {
46885+ curgid = (int)gidlist[i];
46886+ if (real == curgid)
46887+ realok = 1;
46888+ if (effective == curgid)
46889+ effectiveok = 1;
46890+ if (fs == curgid)
46891+ fsok = 1;
46892+ }
46893+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
46894+ for (i = 0; i < num; i++) {
46895+ curgid = (int)gidlist[i];
46896+ if (real == curgid)
46897+ break;
46898+ if (effective == curgid)
46899+ break;
46900+ if (fs == curgid)
46901+ break;
46902+ }
46903+ /* not in deny list */
46904+ if (i == num) {
46905+ realok = 1;
46906+ effectiveok = 1;
46907+ fsok = 1;
46908+ }
46909+ }
46910+
46911+ if (realok && effectiveok && fsok)
46912+ return 0;
46913+ else {
46914+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
46915+ return 1;
46916+ }
46917+}
46918+
46919+void
46920+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
46921+{
46922+ struct acl_role_label *role = task->role;
46923+ struct acl_subject_label *subj = NULL;
46924+ struct acl_object_label *obj;
46925+ struct file *filp;
46926+
46927+ if (unlikely(!(gr_status & GR_READY)))
46928+ return;
46929+
46930+ filp = task->exec_file;
46931+
46932+ /* kernel process, we'll give them the kernel role */
46933+ if (unlikely(!filp)) {
46934+ task->role = kernel_role;
46935+ task->acl = kernel_role->root_label;
46936+ return;
46937+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
46938+ role = lookup_acl_role_label(task, uid, gid);
46939+
46940+ /* perform subject lookup in possibly new role
46941+ we can use this result below in the case where role == task->role
46942+ */
46943+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
46944+
46945+ /* if we changed uid/gid, but result in the same role
46946+ and are using inheritance, don't lose the inherited subject
46947+ if current subject is other than what normal lookup
46948+ would result in, we arrived via inheritance, don't
46949+ lose subject
46950+ */
46951+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
46952+ (subj == task->acl)))
46953+ task->acl = subj;
46954+
46955+ task->role = role;
46956+
46957+ task->is_writable = 0;
46958+
46959+ /* ignore additional mmap checks for processes that are writable
46960+ by the default ACL */
46961+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
46962+ if (unlikely(obj->mode & GR_WRITE))
46963+ task->is_writable = 1;
46964+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
46965+ if (unlikely(obj->mode & GR_WRITE))
46966+ task->is_writable = 1;
46967+
46968+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
46969+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
46970+#endif
46971+
46972+ gr_set_proc_res(task);
46973+
46974+ return;
46975+}
46976+
46977+int
46978+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
46979+ const int unsafe_share)
46980+{
46981+ struct task_struct *task = current;
46982+ struct acl_subject_label *newacl;
46983+ struct acl_object_label *obj;
46984+ __u32 retmode;
46985+
46986+ if (unlikely(!(gr_status & GR_READY)))
46987+ return 0;
46988+
46989+ newacl = chk_subj_label(dentry, mnt, task->role);
46990+
46991+ task_lock(task);
46992+ if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
46993+ !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
46994+ !(task->role->roletype & GR_ROLE_GOD) &&
46995+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
46996+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
46997+ task_unlock(task);
46998+ if (unsafe_share)
46999+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
47000+ else
47001+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
47002+ return -EACCES;
47003+ }
47004+ task_unlock(task);
47005+
47006+ obj = chk_obj_label(dentry, mnt, task->acl);
47007+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
47008+
47009+ if (!(task->acl->mode & GR_INHERITLEARN) &&
47010+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
47011+ if (obj->nested)
47012+ task->acl = obj->nested;
47013+ else
47014+ task->acl = newacl;
47015+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
47016+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
47017+
47018+ task->is_writable = 0;
47019+
47020+ /* ignore additional mmap checks for processes that are writable
47021+ by the default ACL */
47022+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
47023+ if (unlikely(obj->mode & GR_WRITE))
47024+ task->is_writable = 1;
47025+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
47026+ if (unlikely(obj->mode & GR_WRITE))
47027+ task->is_writable = 1;
47028+
47029+ gr_set_proc_res(task);
47030+
47031+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
47032+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
47033+#endif
47034+ return 0;
47035+}
47036+
47037+/* always called with valid inodev ptr */
47038+static void
47039+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
47040+{
47041+ struct acl_object_label *matchpo;
47042+ struct acl_subject_label *matchps;
47043+ struct acl_subject_label *subj;
47044+ struct acl_role_label *role;
47045+ unsigned int x;
47046+
47047+ FOR_EACH_ROLE_START(role)
47048+ FOR_EACH_SUBJECT_START(role, subj, x)
47049+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
47050+ matchpo->mode |= GR_DELETED;
47051+ FOR_EACH_SUBJECT_END(subj,x)
47052+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
47053+ if (subj->inode == ino && subj->device == dev)
47054+ subj->mode |= GR_DELETED;
47055+ FOR_EACH_NESTED_SUBJECT_END(subj)
47056+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
47057+ matchps->mode |= GR_DELETED;
47058+ FOR_EACH_ROLE_END(role)
47059+
47060+ inodev->nentry->deleted = 1;
47061+
47062+ return;
47063+}
47064+
47065+void
47066+gr_handle_delete(const ino_t ino, const dev_t dev)
47067+{
47068+ struct inodev_entry *inodev;
47069+
47070+ if (unlikely(!(gr_status & GR_READY)))
47071+ return;
47072+
47073+ write_lock(&gr_inode_lock);
47074+ inodev = lookup_inodev_entry(ino, dev);
47075+ if (inodev != NULL)
47076+ do_handle_delete(inodev, ino, dev);
47077+ write_unlock(&gr_inode_lock);
47078+
47079+ return;
47080+}
47081+
47082+static void
47083+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
47084+ const ino_t newinode, const dev_t newdevice,
47085+ struct acl_subject_label *subj)
47086+{
47087+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
47088+ struct acl_object_label *match;
47089+
47090+ match = subj->obj_hash[index];
47091+
47092+ while (match && (match->inode != oldinode ||
47093+ match->device != olddevice ||
47094+ !(match->mode & GR_DELETED)))
47095+ match = match->next;
47096+
47097+ if (match && (match->inode == oldinode)
47098+ && (match->device == olddevice)
47099+ && (match->mode & GR_DELETED)) {
47100+ if (match->prev == NULL) {
47101+ subj->obj_hash[index] = match->next;
47102+ if (match->next != NULL)
47103+ match->next->prev = NULL;
47104+ } else {
47105+ match->prev->next = match->next;
47106+ if (match->next != NULL)
47107+ match->next->prev = match->prev;
47108+ }
47109+ match->prev = NULL;
47110+ match->next = NULL;
47111+ match->inode = newinode;
47112+ match->device = newdevice;
47113+ match->mode &= ~GR_DELETED;
47114+
47115+ insert_acl_obj_label(match, subj);
47116+ }
47117+
47118+ return;
47119+}
47120+
47121+static void
47122+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
47123+ const ino_t newinode, const dev_t newdevice,
47124+ struct acl_role_label *role)
47125+{
47126+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
47127+ struct acl_subject_label *match;
47128+
47129+ match = role->subj_hash[index];
47130+
47131+ while (match && (match->inode != oldinode ||
47132+ match->device != olddevice ||
47133+ !(match->mode & GR_DELETED)))
47134+ match = match->next;
47135+
47136+ if (match && (match->inode == oldinode)
47137+ && (match->device == olddevice)
47138+ && (match->mode & GR_DELETED)) {
47139+ if (match->prev == NULL) {
47140+ role->subj_hash[index] = match->next;
47141+ if (match->next != NULL)
47142+ match->next->prev = NULL;
47143+ } else {
47144+ match->prev->next = match->next;
47145+ if (match->next != NULL)
47146+ match->next->prev = match->prev;
47147+ }
47148+ match->prev = NULL;
47149+ match->next = NULL;
47150+ match->inode = newinode;
47151+ match->device = newdevice;
47152+ match->mode &= ~GR_DELETED;
47153+
47154+ insert_acl_subj_label(match, role);
47155+ }
47156+
47157+ return;
47158+}
47159+
47160+static void
47161+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
47162+ const ino_t newinode, const dev_t newdevice)
47163+{
47164+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
47165+ struct inodev_entry *match;
47166+
47167+ match = inodev_set.i_hash[index];
47168+
47169+ while (match && (match->nentry->inode != oldinode ||
47170+ match->nentry->device != olddevice || !match->nentry->deleted))
47171+ match = match->next;
47172+
47173+ if (match && (match->nentry->inode == oldinode)
47174+ && (match->nentry->device == olddevice) &&
47175+ match->nentry->deleted) {
47176+ if (match->prev == NULL) {
47177+ inodev_set.i_hash[index] = match->next;
47178+ if (match->next != NULL)
47179+ match->next->prev = NULL;
47180+ } else {
47181+ match->prev->next = match->next;
47182+ if (match->next != NULL)
47183+ match->next->prev = match->prev;
47184+ }
47185+ match->prev = NULL;
47186+ match->next = NULL;
47187+ match->nentry->inode = newinode;
47188+ match->nentry->device = newdevice;
47189+ match->nentry->deleted = 0;
47190+
47191+ insert_inodev_entry(match);
47192+ }
47193+
47194+ return;
47195+}
47196+
47197+static void
47198+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
47199+ const struct vfsmount *mnt)
47200+{
47201+ struct acl_subject_label *subj;
47202+ struct acl_role_label *role;
47203+ unsigned int x;
47204+ ino_t inode = dentry->d_inode->i_ino;
47205+ dev_t dev = __get_dev(dentry);
47206+
47207+ FOR_EACH_ROLE_START(role)
47208+ update_acl_subj_label(matchn->inode, matchn->device,
47209+ inode, dev, role);
47210+
47211+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
47212+ if ((subj->inode == inode) && (subj->device == dev)) {
47213+ subj->inode = inode;
47214+ subj->device = dev;
47215+ }
47216+ FOR_EACH_NESTED_SUBJECT_END(subj)
47217+ FOR_EACH_SUBJECT_START(role, subj, x)
47218+ update_acl_obj_label(matchn->inode, matchn->device,
47219+ inode, dev, subj);
47220+ FOR_EACH_SUBJECT_END(subj,x)
47221+ FOR_EACH_ROLE_END(role)
47222+
47223+ update_inodev_entry(matchn->inode, matchn->device, inode, dev);
47224+
47225+ return;
47226+}
47227+
47228+void
47229+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
47230+{
47231+ struct name_entry *matchn;
47232+
47233+ if (unlikely(!(gr_status & GR_READY)))
47234+ return;
47235+
47236+ preempt_disable();
47237+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
47238+
47239+ if (unlikely((unsigned long)matchn)) {
47240+ write_lock(&gr_inode_lock);
47241+ do_handle_create(matchn, dentry, mnt);
47242+ write_unlock(&gr_inode_lock);
47243+ }
47244+ preempt_enable();
47245+
47246+ return;
47247+}
47248+
47249+void
47250+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
47251+ struct dentry *old_dentry,
47252+ struct dentry *new_dentry,
47253+ struct vfsmount *mnt, const __u8 replace)
47254+{
47255+ struct name_entry *matchn;
47256+ struct inodev_entry *inodev;
47257+ ino_t oldinode = old_dentry->d_inode->i_ino;
47258+ dev_t olddev = __get_dev(old_dentry);
47259+
47260+ /* vfs_rename swaps the name and parent link for old_dentry and
47261+ new_dentry
47262+ at this point, old_dentry has the new name, parent link, and inode
47263+ for the renamed file
47264+ if a file is being replaced by a rename, new_dentry has the inode
47265+ and name for the replaced file
47266+ */
47267+
47268+ if (unlikely(!(gr_status & GR_READY)))
47269+ return;
47270+
47271+ preempt_disable();
47272+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
47273+
47274+ /* we wouldn't have to check d_inode if it weren't for
47275+ NFS silly-renaming
47276+ */
47277+
47278+ write_lock(&gr_inode_lock);
47279+ if (unlikely(replace && new_dentry->d_inode)) {
47280+ ino_t newinode = new_dentry->d_inode->i_ino;
47281+ dev_t newdev = __get_dev(new_dentry);
47282+ inodev = lookup_inodev_entry(newinode, newdev);
47283+ if (inodev != NULL && (new_dentry->d_inode->i_nlink <= 1))
47284+ do_handle_delete(inodev, newinode, newdev);
47285+ }
47286+
47287+ inodev = lookup_inodev_entry(oldinode, olddev);
47288+ if (inodev != NULL && (old_dentry->d_inode->i_nlink <= 1))
47289+ do_handle_delete(inodev, oldinode, olddev);
47290+
47291+ if (unlikely((unsigned long)matchn))
47292+ do_handle_create(matchn, old_dentry, mnt);
47293+
47294+ write_unlock(&gr_inode_lock);
47295+ preempt_enable();
47296+
47297+ return;
47298+}
47299+
47300+static int
47301+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
47302+ unsigned char **sum)
47303+{
47304+ struct acl_role_label *r;
47305+ struct role_allowed_ip *ipp;
47306+ struct role_transition *trans;
47307+ unsigned int i;
47308+ int found = 0;
47309+ u32 curr_ip = current->signal->curr_ip;
47310+
47311+ current->signal->saved_ip = curr_ip;
47312+
47313+ /* check transition table */
47314+
47315+ for (trans = current->role->transitions; trans; trans = trans->next) {
47316+ if (!strcmp(rolename, trans->rolename)) {
47317+ found = 1;
47318+ break;
47319+ }
47320+ }
47321+
47322+ if (!found)
47323+ return 0;
47324+
47325+ /* handle special roles that do not require authentication
47326+ and check ip */
47327+
47328+ FOR_EACH_ROLE_START(r)
47329+ if (!strcmp(rolename, r->rolename) &&
47330+ (r->roletype & GR_ROLE_SPECIAL)) {
47331+ found = 0;
47332+ if (r->allowed_ips != NULL) {
47333+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
47334+ if ((ntohl(curr_ip) & ipp->netmask) ==
47335+ (ntohl(ipp->addr) & ipp->netmask))
47336+ found = 1;
47337+ }
47338+ } else
47339+ found = 2;
47340+ if (!found)
47341+ return 0;
47342+
47343+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
47344+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
47345+ *salt = NULL;
47346+ *sum = NULL;
47347+ return 1;
47348+ }
47349+ }
47350+ FOR_EACH_ROLE_END(r)
47351+
47352+ for (i = 0; i < num_sprole_pws; i++) {
47353+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
47354+ *salt = acl_special_roles[i]->salt;
47355+ *sum = acl_special_roles[i]->sum;
47356+ return 1;
47357+ }
47358+ }
47359+
47360+ return 0;
47361+}
47362+
47363+static void
47364+assign_special_role(char *rolename)
47365+{
47366+ struct acl_object_label *obj;
47367+ struct acl_role_label *r;
47368+ struct acl_role_label *assigned = NULL;
47369+ struct task_struct *tsk;
47370+ struct file *filp;
47371+
47372+ FOR_EACH_ROLE_START(r)
47373+ if (!strcmp(rolename, r->rolename) &&
47374+ (r->roletype & GR_ROLE_SPECIAL)) {
47375+ assigned = r;
47376+ break;
47377+ }
47378+ FOR_EACH_ROLE_END(r)
47379+
47380+ if (!assigned)
47381+ return;
47382+
47383+ read_lock(&tasklist_lock);
47384+ read_lock(&grsec_exec_file_lock);
47385+
47386+ tsk = current->real_parent;
47387+ if (tsk == NULL)
47388+ goto out_unlock;
47389+
47390+ filp = tsk->exec_file;
47391+ if (filp == NULL)
47392+ goto out_unlock;
47393+
47394+ tsk->is_writable = 0;
47395+
47396+ tsk->acl_sp_role = 1;
47397+ tsk->acl_role_id = ++acl_sp_role_value;
47398+ tsk->role = assigned;
47399+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
47400+
47401+ /* ignore additional mmap checks for processes that are writable
47402+ by the default ACL */
47403+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
47404+ if (unlikely(obj->mode & GR_WRITE))
47405+ tsk->is_writable = 1;
47406+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
47407+ if (unlikely(obj->mode & GR_WRITE))
47408+ tsk->is_writable = 1;
47409+
47410+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
47411+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
47412+#endif
47413+
47414+out_unlock:
47415+ read_unlock(&grsec_exec_file_lock);
47416+ read_unlock(&tasklist_lock);
47417+ return;
47418+}
47419+
47420+int gr_check_secure_terminal(struct task_struct *task)
47421+{
47422+ struct task_struct *p, *p2, *p3;
47423+ struct files_struct *files;
47424+ struct fdtable *fdt;
47425+ struct file *our_file = NULL, *file;
47426+ int i;
47427+
47428+ if (task->signal->tty == NULL)
47429+ return 1;
47430+
47431+ files = get_files_struct(task);
47432+ if (files != NULL) {
47433+ rcu_read_lock();
47434+ fdt = files_fdtable(files);
47435+ for (i=0; i < fdt->max_fds; i++) {
47436+ file = fcheck_files(files, i);
47437+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
47438+ get_file(file);
47439+ our_file = file;
47440+ }
47441+ }
47442+ rcu_read_unlock();
47443+ put_files_struct(files);
47444+ }
47445+
47446+ if (our_file == NULL)
47447+ return 1;
47448+
47449+ read_lock(&tasklist_lock);
47450+ do_each_thread(p2, p) {
47451+ files = get_files_struct(p);
47452+ if (files == NULL ||
47453+ (p->signal && p->signal->tty == task->signal->tty)) {
47454+ if (files != NULL)
47455+ put_files_struct(files);
47456+ continue;
47457+ }
47458+ rcu_read_lock();
47459+ fdt = files_fdtable(files);
47460+ for (i=0; i < fdt->max_fds; i++) {
47461+ file = fcheck_files(files, i);
47462+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
47463+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
47464+ p3 = task;
47465+ while (p3->pid > 0) {
47466+ if (p3 == p)
47467+ break;
47468+ p3 = p3->real_parent;
47469+ }
47470+ if (p3 == p)
47471+ break;
47472+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
47473+ gr_handle_alertkill(p);
47474+ rcu_read_unlock();
47475+ put_files_struct(files);
47476+ read_unlock(&tasklist_lock);
47477+ fput(our_file);
47478+ return 0;
47479+ }
47480+ }
47481+ rcu_read_unlock();
47482+ put_files_struct(files);
47483+ } while_each_thread(p2, p);
47484+ read_unlock(&tasklist_lock);
47485+
47486+ fput(our_file);
47487+ return 1;
47488+}
47489+
47490+ssize_t
47491+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
47492+{
47493+ struct gr_arg_wrapper uwrap;
47494+ unsigned char *sprole_salt = NULL;
47495+ unsigned char *sprole_sum = NULL;
47496+ int error = sizeof (struct gr_arg_wrapper);
47497+ int error2 = 0;
47498+
47499+ mutex_lock(&gr_dev_mutex);
47500+
47501+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
47502+ error = -EPERM;
47503+ goto out;
47504+ }
47505+
47506+ if (count != sizeof (struct gr_arg_wrapper)) {
47507+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
47508+ error = -EINVAL;
47509+ goto out;
47510+ }
47511+
47512+
47513+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
47514+ gr_auth_expires = 0;
47515+ gr_auth_attempts = 0;
47516+ }
47517+
47518+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
47519+ error = -EFAULT;
47520+ goto out;
47521+ }
47522+
47523+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
47524+ error = -EINVAL;
47525+ goto out;
47526+ }
47527+
47528+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
47529+ error = -EFAULT;
47530+ goto out;
47531+ }
47532+
47533+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
47534+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
47535+ time_after(gr_auth_expires, get_seconds())) {
47536+ error = -EBUSY;
47537+ goto out;
47538+ }
47539+
47540+ /* if non-root trying to do anything other than use a special role,
47541+ do not attempt authentication, do not count towards authentication
47542+ locking
47543+ */
47544+
47545+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
47546+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
47547+ current_uid()) {
47548+ error = -EPERM;
47549+ goto out;
47550+ }
47551+
47552+ /* ensure pw and special role name are null terminated */
47553+
47554+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
47555+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
47556+
47557+ /* Okay.
47558+ * We have our enough of the argument structure..(we have yet
47559+ * to copy_from_user the tables themselves) . Copy the tables
47560+ * only if we need them, i.e. for loading operations. */
47561+
47562+ switch (gr_usermode->mode) {
47563+ case GR_STATUS:
47564+ if (gr_status & GR_READY) {
47565+ error = 1;
47566+ if (!gr_check_secure_terminal(current))
47567+ error = 3;
47568+ } else
47569+ error = 2;
47570+ goto out;
47571+ case GR_SHUTDOWN:
47572+ if ((gr_status & GR_READY)
47573+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
47574+ pax_open_kernel();
47575+ gr_status &= ~GR_READY;
47576+ pax_close_kernel();
47577+
47578+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
47579+ free_variables();
47580+ memset(gr_usermode, 0, sizeof (struct gr_arg));
47581+ memset(gr_system_salt, 0, GR_SALT_LEN);
47582+ memset(gr_system_sum, 0, GR_SHA_LEN);
47583+ } else if (gr_status & GR_READY) {
47584+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
47585+ error = -EPERM;
47586+ } else {
47587+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
47588+ error = -EAGAIN;
47589+ }
47590+ break;
47591+ case GR_ENABLE:
47592+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
47593+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
47594+ else {
47595+ if (gr_status & GR_READY)
47596+ error = -EAGAIN;
47597+ else
47598+ error = error2;
47599+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
47600+ }
47601+ break;
47602+ case GR_RELOAD:
47603+ if (!(gr_status & GR_READY)) {
47604+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
47605+ error = -EAGAIN;
47606+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
47607+ lock_kernel();
47608+
47609+ pax_open_kernel();
47610+ gr_status &= ~GR_READY;
47611+ pax_close_kernel();
47612+
47613+ free_variables();
47614+ if (!(error2 = gracl_init(gr_usermode))) {
47615+ unlock_kernel();
47616+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
47617+ } else {
47618+ unlock_kernel();
47619+ error = error2;
47620+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
47621+ }
47622+ } else {
47623+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
47624+ error = -EPERM;
47625+ }
47626+ break;
47627+ case GR_SEGVMOD:
47628+ if (unlikely(!(gr_status & GR_READY))) {
47629+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
47630+ error = -EAGAIN;
47631+ break;
47632+ }
47633+
47634+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
47635+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
47636+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
47637+ struct acl_subject_label *segvacl;
47638+ segvacl =
47639+ lookup_acl_subj_label(gr_usermode->segv_inode,
47640+ gr_usermode->segv_device,
47641+ current->role);
47642+ if (segvacl) {
47643+ segvacl->crashes = 0;
47644+ segvacl->expires = 0;
47645+ }
47646+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
47647+ gr_remove_uid(gr_usermode->segv_uid);
47648+ }
47649+ } else {
47650+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
47651+ error = -EPERM;
47652+ }
47653+ break;
47654+ case GR_SPROLE:
47655+ case GR_SPROLEPAM:
47656+ if (unlikely(!(gr_status & GR_READY))) {
47657+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
47658+ error = -EAGAIN;
47659+ break;
47660+ }
47661+
47662+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
47663+ current->role->expires = 0;
47664+ current->role->auth_attempts = 0;
47665+ }
47666+
47667+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
47668+ time_after(current->role->expires, get_seconds())) {
47669+ error = -EBUSY;
47670+ goto out;
47671+ }
47672+
47673+ if (lookup_special_role_auth
47674+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
47675+ && ((!sprole_salt && !sprole_sum)
47676+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
47677+ char *p = "";
47678+ assign_special_role(gr_usermode->sp_role);
47679+ read_lock(&tasklist_lock);
47680+ if (current->real_parent)
47681+ p = current->real_parent->role->rolename;
47682+ read_unlock(&tasklist_lock);
47683+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
47684+ p, acl_sp_role_value);
47685+ } else {
47686+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
47687+ error = -EPERM;
47688+ if(!(current->role->auth_attempts++))
47689+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
47690+
47691+ goto out;
47692+ }
47693+ break;
47694+ case GR_UNSPROLE:
47695+ if (unlikely(!(gr_status & GR_READY))) {
47696+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
47697+ error = -EAGAIN;
47698+ break;
47699+ }
47700+
47701+ if (current->role->roletype & GR_ROLE_SPECIAL) {
47702+ char *p = "";
47703+ int i = 0;
47704+
47705+ read_lock(&tasklist_lock);
47706+ if (current->real_parent) {
47707+ p = current->real_parent->role->rolename;
47708+ i = current->real_parent->acl_role_id;
47709+ }
47710+ read_unlock(&tasklist_lock);
47711+
47712+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
47713+ gr_set_acls(1);
47714+ } else {
47715+ error = -EPERM;
47716+ goto out;
47717+ }
47718+ break;
47719+ default:
47720+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
47721+ error = -EINVAL;
47722+ break;
47723+ }
47724+
47725+ if (error != -EPERM)
47726+ goto out;
47727+
47728+ if(!(gr_auth_attempts++))
47729+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
47730+
47731+ out:
47732+ mutex_unlock(&gr_dev_mutex);
47733+ return error;
47734+}
47735+
47736+/* must be called with
47737+ rcu_read_lock();
47738+ read_lock(&tasklist_lock);
47739+ read_lock(&grsec_exec_file_lock);
47740+*/
47741+int gr_apply_subject_to_task(struct task_struct *task)
47742+{
47743+ struct acl_object_label *obj;
47744+ char *tmpname;
47745+ struct acl_subject_label *tmpsubj;
47746+ struct file *filp;
47747+ struct name_entry *nmatch;
47748+
47749+ filp = task->exec_file;
47750+ if (filp == NULL)
47751+ return 0;
47752+
47753+ /* the following is to apply the correct subject
47754+ on binaries running when the RBAC system
47755+ is enabled, when the binaries have been
47756+ replaced or deleted since their execution
47757+ -----
47758+ when the RBAC system starts, the inode/dev
47759+ from exec_file will be one the RBAC system
47760+ is unaware of. It only knows the inode/dev
47761+ of the present file on disk, or the absence
47762+ of it.
47763+ */
47764+ preempt_disable();
47765+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
47766+
47767+ nmatch = lookup_name_entry(tmpname);
47768+ preempt_enable();
47769+ tmpsubj = NULL;
47770+ if (nmatch) {
47771+ if (nmatch->deleted)
47772+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
47773+ else
47774+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
47775+ if (tmpsubj != NULL)
47776+ task->acl = tmpsubj;
47777+ }
47778+ if (tmpsubj == NULL)
47779+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
47780+ task->role);
47781+ if (task->acl) {
47782+ struct acl_subject_label *curr;
47783+ curr = task->acl;
47784+
47785+ task->is_writable = 0;
47786+ /* ignore additional mmap checks for processes that are writable
47787+ by the default ACL */
47788+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
47789+ if (unlikely(obj->mode & GR_WRITE))
47790+ task->is_writable = 1;
47791+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
47792+ if (unlikely(obj->mode & GR_WRITE))
47793+ task->is_writable = 1;
47794+
47795+ gr_set_proc_res(task);
47796+
47797+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
47798+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
47799+#endif
47800+ } else {
47801+ return 1;
47802+ }
47803+
47804+ return 0;
47805+}
47806+
47807+int
47808+gr_set_acls(const int type)
47809+{
47810+ struct task_struct *task, *task2;
47811+ struct acl_role_label *role = current->role;
47812+ __u16 acl_role_id = current->acl_role_id;
47813+ const struct cred *cred;
47814+ int ret;
47815+
47816+ rcu_read_lock();
47817+ read_lock(&tasklist_lock);
47818+ read_lock(&grsec_exec_file_lock);
47819+ do_each_thread(task2, task) {
47820+ /* check to see if we're called from the exit handler,
47821+ if so, only replace ACLs that have inherited the admin
47822+ ACL */
47823+
47824+ if (type && (task->role != role ||
47825+ task->acl_role_id != acl_role_id))
47826+ continue;
47827+
47828+ task->acl_role_id = 0;
47829+ task->acl_sp_role = 0;
47830+
47831+ if (task->exec_file) {
47832+ cred = __task_cred(task);
47833+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
47834+
47835+ ret = gr_apply_subject_to_task(task);
47836+ if (ret) {
47837+ read_unlock(&grsec_exec_file_lock);
47838+ read_unlock(&tasklist_lock);
47839+ rcu_read_unlock();
47840+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
47841+ return ret;
47842+ }
47843+ } else {
47844+ // it's a kernel process
47845+ task->role = kernel_role;
47846+ task->acl = kernel_role->root_label;
47847+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
47848+ task->acl->mode &= ~GR_PROCFIND;
47849+#endif
47850+ }
47851+ } while_each_thread(task2, task);
47852+ read_unlock(&grsec_exec_file_lock);
47853+ read_unlock(&tasklist_lock);
47854+ rcu_read_unlock();
47855+
47856+ return 0;
47857+}
47858+
47859+void
47860+gr_learn_resource(const struct task_struct *task,
47861+ const int res, const unsigned long wanted, const int gt)
47862+{
47863+ struct acl_subject_label *acl;
47864+ const struct cred *cred;
47865+
47866+ if (unlikely((gr_status & GR_READY) &&
47867+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
47868+ goto skip_reslog;
47869+
47870+#ifdef CONFIG_GRKERNSEC_RESLOG
47871+ gr_log_resource(task, res, wanted, gt);
47872+#endif
47873+ skip_reslog:
47874+
47875+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
47876+ return;
47877+
47878+ acl = task->acl;
47879+
47880+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
47881+ !(acl->resmask & (1 << (unsigned short) res))))
47882+ return;
47883+
47884+ if (wanted >= acl->res[res].rlim_cur) {
47885+ unsigned long res_add;
47886+
47887+ res_add = wanted;
47888+ switch (res) {
47889+ case RLIMIT_CPU:
47890+ res_add += GR_RLIM_CPU_BUMP;
47891+ break;
47892+ case RLIMIT_FSIZE:
47893+ res_add += GR_RLIM_FSIZE_BUMP;
47894+ break;
47895+ case RLIMIT_DATA:
47896+ res_add += GR_RLIM_DATA_BUMP;
47897+ break;
47898+ case RLIMIT_STACK:
47899+ res_add += GR_RLIM_STACK_BUMP;
47900+ break;
47901+ case RLIMIT_CORE:
47902+ res_add += GR_RLIM_CORE_BUMP;
47903+ break;
47904+ case RLIMIT_RSS:
47905+ res_add += GR_RLIM_RSS_BUMP;
47906+ break;
47907+ case RLIMIT_NPROC:
47908+ res_add += GR_RLIM_NPROC_BUMP;
47909+ break;
47910+ case RLIMIT_NOFILE:
47911+ res_add += GR_RLIM_NOFILE_BUMP;
47912+ break;
47913+ case RLIMIT_MEMLOCK:
47914+ res_add += GR_RLIM_MEMLOCK_BUMP;
47915+ break;
47916+ case RLIMIT_AS:
47917+ res_add += GR_RLIM_AS_BUMP;
47918+ break;
47919+ case RLIMIT_LOCKS:
47920+ res_add += GR_RLIM_LOCKS_BUMP;
47921+ break;
47922+ case RLIMIT_SIGPENDING:
47923+ res_add += GR_RLIM_SIGPENDING_BUMP;
47924+ break;
47925+ case RLIMIT_MSGQUEUE:
47926+ res_add += GR_RLIM_MSGQUEUE_BUMP;
47927+ break;
47928+ case RLIMIT_NICE:
47929+ res_add += GR_RLIM_NICE_BUMP;
47930+ break;
47931+ case RLIMIT_RTPRIO:
47932+ res_add += GR_RLIM_RTPRIO_BUMP;
47933+ break;
47934+ case RLIMIT_RTTIME:
47935+ res_add += GR_RLIM_RTTIME_BUMP;
47936+ break;
47937+ }
47938+
47939+ acl->res[res].rlim_cur = res_add;
47940+
47941+ if (wanted > acl->res[res].rlim_max)
47942+ acl->res[res].rlim_max = res_add;
47943+
47944+ /* only log the subject filename, since resource logging is supported for
47945+ single-subject learning only */
47946+ rcu_read_lock();
47947+ cred = __task_cred(task);
47948+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
47949+ task->role->roletype, cred->uid, cred->gid, acl->filename,
47950+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
47951+ "", (unsigned long) res, &task->signal->saved_ip);
47952+ rcu_read_unlock();
47953+ }
47954+
47955+ return;
47956+}
47957+
47958+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
47959+void
47960+pax_set_initial_flags(struct linux_binprm *bprm)
47961+{
47962+ struct task_struct *task = current;
47963+ struct acl_subject_label *proc;
47964+ unsigned long flags;
47965+
47966+ if (unlikely(!(gr_status & GR_READY)))
47967+ return;
47968+
47969+ flags = pax_get_flags(task);
47970+
47971+ proc = task->acl;
47972+
47973+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
47974+ flags &= ~MF_PAX_PAGEEXEC;
47975+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
47976+ flags &= ~MF_PAX_SEGMEXEC;
47977+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
47978+ flags &= ~MF_PAX_RANDMMAP;
47979+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
47980+ flags &= ~MF_PAX_EMUTRAMP;
47981+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
47982+ flags &= ~MF_PAX_MPROTECT;
47983+
47984+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
47985+ flags |= MF_PAX_PAGEEXEC;
47986+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
47987+ flags |= MF_PAX_SEGMEXEC;
47988+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
47989+ flags |= MF_PAX_RANDMMAP;
47990+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
47991+ flags |= MF_PAX_EMUTRAMP;
47992+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
47993+ flags |= MF_PAX_MPROTECT;
47994+
47995+ pax_set_flags(task, flags);
47996+
47997+ return;
47998+}
47999+#endif
48000+
48001+#ifdef CONFIG_SYSCTL
48002+/* Eric Biederman likes breaking userland ABI and every inode-based security
48003+ system to save 35kb of memory */
48004+
48005+/* we modify the passed in filename, but adjust it back before returning */
48006+static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
48007+{
48008+ struct name_entry *nmatch;
48009+ char *p, *lastp = NULL;
48010+ struct acl_object_label *obj = NULL, *tmp;
48011+ struct acl_subject_label *tmpsubj;
48012+ char c = '\0';
48013+
48014+ read_lock(&gr_inode_lock);
48015+
48016+ p = name + len - 1;
48017+ do {
48018+ nmatch = lookup_name_entry(name);
48019+ if (lastp != NULL)
48020+ *lastp = c;
48021+
48022+ if (nmatch == NULL)
48023+ goto next_component;
48024+ tmpsubj = current->acl;
48025+ do {
48026+ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
48027+ if (obj != NULL) {
48028+ tmp = obj->globbed;
48029+ while (tmp) {
48030+ if (!glob_match(tmp->filename, name)) {
48031+ obj = tmp;
48032+ goto found_obj;
48033+ }
48034+ tmp = tmp->next;
48035+ }
48036+ goto found_obj;
48037+ }
48038+ } while ((tmpsubj = tmpsubj->parent_subject));
48039+next_component:
48040+ /* end case */
48041+ if (p == name)
48042+ break;
48043+
48044+ while (*p != '/')
48045+ p--;
48046+ if (p == name)
48047+ lastp = p + 1;
48048+ else {
48049+ lastp = p;
48050+ p--;
48051+ }
48052+ c = *lastp;
48053+ *lastp = '\0';
48054+ } while (1);
48055+found_obj:
48056+ read_unlock(&gr_inode_lock);
48057+ /* obj returned will always be non-null */
48058+ return obj;
48059+}
48060+
48061+/* returns 0 when allowing, non-zero on error
48062+ op of 0 is used for readdir, so we don't log the names of hidden files
48063+*/
48064+__u32
48065+gr_handle_sysctl(const struct ctl_table *table, const int op)
48066+{
48067+ ctl_table *tmp;
48068+ const char *proc_sys = "/proc/sys";
48069+ char *path;
48070+ struct acl_object_label *obj;
48071+ unsigned short len = 0, pos = 0, depth = 0, i;
48072+ __u32 err = 0;
48073+ __u32 mode = 0;
48074+
48075+ if (unlikely(!(gr_status & GR_READY)))
48076+ return 0;
48077+
48078+ /* for now, ignore operations on non-sysctl entries if it's not a
48079+ readdir*/
48080+ if (table->child != NULL && op != 0)
48081+ return 0;
48082+
48083+ mode |= GR_FIND;
48084+ /* it's only a read if it's an entry, read on dirs is for readdir */
48085+ if (op & MAY_READ)
48086+ mode |= GR_READ;
48087+ if (op & MAY_WRITE)
48088+ mode |= GR_WRITE;
48089+
48090+ preempt_disable();
48091+
48092+ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
48093+
48094+ /* it's only a read/write if it's an actual entry, not a dir
48095+ (which are opened for readdir)
48096+ */
48097+
48098+ /* convert the requested sysctl entry into a pathname */
48099+
48100+ for (tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
48101+ len += strlen(tmp->procname);
48102+ len++;
48103+ depth++;
48104+ }
48105+
48106+ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
48107+ /* deny */
48108+ goto out;
48109+ }
48110+
48111+ memset(path, 0, PAGE_SIZE);
48112+
48113+ memcpy(path, proc_sys, strlen(proc_sys));
48114+
48115+ pos += strlen(proc_sys);
48116+
48117+ for (; depth > 0; depth--) {
48118+ path[pos] = '/';
48119+ pos++;
48120+ for (i = 1, tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
48121+ if (depth == i) {
48122+ memcpy(path + pos, tmp->procname,
48123+ strlen(tmp->procname));
48124+ pos += strlen(tmp->procname);
48125+ }
48126+ i++;
48127+ }
48128+ }
48129+
48130+ obj = gr_lookup_by_name(path, pos);
48131+ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
48132+
48133+ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
48134+ ((err & mode) != mode))) {
48135+ __u32 new_mode = mode;
48136+
48137+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
48138+
48139+ err = 0;
48140+ gr_log_learn_sysctl(path, new_mode);
48141+ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
48142+ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
48143+ err = -ENOENT;
48144+ } else if (!(err & GR_FIND)) {
48145+ err = -ENOENT;
48146+ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
48147+ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
48148+ path, (mode & GR_READ) ? " reading" : "",
48149+ (mode & GR_WRITE) ? " writing" : "");
48150+ err = -EACCES;
48151+ } else if ((err & mode) != mode) {
48152+ err = -EACCES;
48153+ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
48154+ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
48155+ path, (mode & GR_READ) ? " reading" : "",
48156+ (mode & GR_WRITE) ? " writing" : "");
48157+ err = 0;
48158+ } else
48159+ err = 0;
48160+
48161+ out:
48162+ preempt_enable();
48163+
48164+ return err;
48165+}
48166+#endif
48167+
48168+int
48169+gr_handle_proc_ptrace(struct task_struct *task)
48170+{
48171+ struct file *filp;
48172+ struct task_struct *tmp = task;
48173+ struct task_struct *curtemp = current;
48174+ __u32 retmode;
48175+
48176+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
48177+ if (unlikely(!(gr_status & GR_READY)))
48178+ return 0;
48179+#endif
48180+
48181+ read_lock(&tasklist_lock);
48182+ read_lock(&grsec_exec_file_lock);
48183+ filp = task->exec_file;
48184+
48185+ while (tmp->pid > 0) {
48186+ if (tmp == curtemp)
48187+ break;
48188+ tmp = tmp->real_parent;
48189+ }
48190+
48191+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
48192+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
48193+ read_unlock(&grsec_exec_file_lock);
48194+ read_unlock(&tasklist_lock);
48195+ return 1;
48196+ }
48197+
48198+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
48199+ if (!(gr_status & GR_READY)) {
48200+ read_unlock(&grsec_exec_file_lock);
48201+ read_unlock(&tasklist_lock);
48202+ return 0;
48203+ }
48204+#endif
48205+
48206+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
48207+ read_unlock(&grsec_exec_file_lock);
48208+ read_unlock(&tasklist_lock);
48209+
48210+ if (retmode & GR_NOPTRACE)
48211+ return 1;
48212+
48213+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
48214+ && (current->acl != task->acl || (current->acl != current->role->root_label
48215+ && current->pid != task->pid)))
48216+ return 1;
48217+
48218+ return 0;
48219+}
48220+
48221+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
48222+{
48223+ if (unlikely(!(gr_status & GR_READY)))
48224+ return;
48225+
48226+ if (!(current->role->roletype & GR_ROLE_GOD))
48227+ return;
48228+
48229+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
48230+ p->role->rolename, gr_task_roletype_to_char(p),
48231+ p->acl->filename);
48232+}
48233+
48234+int
48235+gr_handle_ptrace(struct task_struct *task, const long request)
48236+{
48237+ struct task_struct *tmp = task;
48238+ struct task_struct *curtemp = current;
48239+ __u32 retmode;
48240+
48241+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
48242+ if (unlikely(!(gr_status & GR_READY)))
48243+ return 0;
48244+#endif
48245+
48246+ read_lock(&tasklist_lock);
48247+ while (tmp->pid > 0) {
48248+ if (tmp == curtemp)
48249+ break;
48250+ tmp = tmp->real_parent;
48251+ }
48252+
48253+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
48254+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
48255+ read_unlock(&tasklist_lock);
48256+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
48257+ return 1;
48258+ }
48259+ read_unlock(&tasklist_lock);
48260+
48261+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
48262+ if (!(gr_status & GR_READY))
48263+ return 0;
48264+#endif
48265+
48266+ read_lock(&grsec_exec_file_lock);
48267+ if (unlikely(!task->exec_file)) {
48268+ read_unlock(&grsec_exec_file_lock);
48269+ return 0;
48270+ }
48271+
48272+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
48273+ read_unlock(&grsec_exec_file_lock);
48274+
48275+ if (retmode & GR_NOPTRACE) {
48276+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
48277+ return 1;
48278+ }
48279+
48280+ if (retmode & GR_PTRACERD) {
48281+ switch (request) {
48282+ case PTRACE_POKETEXT:
48283+ case PTRACE_POKEDATA:
48284+ case PTRACE_POKEUSR:
48285+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
48286+ case PTRACE_SETREGS:
48287+ case PTRACE_SETFPREGS:
48288+#endif
48289+#ifdef CONFIG_X86
48290+ case PTRACE_SETFPXREGS:
48291+#endif
48292+#ifdef CONFIG_ALTIVEC
48293+ case PTRACE_SETVRREGS:
48294+#endif
48295+ return 1;
48296+ default:
48297+ return 0;
48298+ }
48299+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
48300+ !(current->role->roletype & GR_ROLE_GOD) &&
48301+ (current->acl != task->acl)) {
48302+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
48303+ return 1;
48304+ }
48305+
48306+ return 0;
48307+}
48308+
48309+static int is_writable_mmap(const struct file *filp)
48310+{
48311+ struct task_struct *task = current;
48312+ struct acl_object_label *obj, *obj2;
48313+
48314+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
48315+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
48316+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
48317+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
48318+ task->role->root_label);
48319+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
48320+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
48321+ return 1;
48322+ }
48323+ }
48324+ return 0;
48325+}
48326+
48327+int
48328+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
48329+{
48330+ __u32 mode;
48331+
48332+ if (unlikely(!file || !(prot & PROT_EXEC)))
48333+ return 1;
48334+
48335+ if (is_writable_mmap(file))
48336+ return 0;
48337+
48338+ mode =
48339+ gr_search_file(file->f_path.dentry,
48340+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
48341+ file->f_path.mnt);
48342+
48343+ if (!gr_tpe_allow(file))
48344+ return 0;
48345+
48346+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
48347+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
48348+ return 0;
48349+ } else if (unlikely(!(mode & GR_EXEC))) {
48350+ return 0;
48351+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
48352+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
48353+ return 1;
48354+ }
48355+
48356+ return 1;
48357+}
48358+
48359+int
48360+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
48361+{
48362+ __u32 mode;
48363+
48364+ if (unlikely(!file || !(prot & PROT_EXEC)))
48365+ return 1;
48366+
48367+ if (is_writable_mmap(file))
48368+ return 0;
48369+
48370+ mode =
48371+ gr_search_file(file->f_path.dentry,
48372+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
48373+ file->f_path.mnt);
48374+
48375+ if (!gr_tpe_allow(file))
48376+ return 0;
48377+
48378+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
48379+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
48380+ return 0;
48381+ } else if (unlikely(!(mode & GR_EXEC))) {
48382+ return 0;
48383+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
48384+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
48385+ return 1;
48386+ }
48387+
48388+ return 1;
48389+}
48390+
48391+void
48392+gr_acl_handle_psacct(struct task_struct *task, const long code)
48393+{
48394+ unsigned long runtime;
48395+ unsigned long cputime;
48396+ unsigned int wday, cday;
48397+ __u8 whr, chr;
48398+ __u8 wmin, cmin;
48399+ __u8 wsec, csec;
48400+ struct timespec timeval;
48401+
48402+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
48403+ !(task->acl->mode & GR_PROCACCT)))
48404+ return;
48405+
48406+ do_posix_clock_monotonic_gettime(&timeval);
48407+ runtime = timeval.tv_sec - task->start_time.tv_sec;
48408+ wday = runtime / (3600 * 24);
48409+ runtime -= wday * (3600 * 24);
48410+ whr = runtime / 3600;
48411+ runtime -= whr * 3600;
48412+ wmin = runtime / 60;
48413+ runtime -= wmin * 60;
48414+ wsec = runtime;
48415+
48416+ cputime = (task->utime + task->stime) / HZ;
48417+ cday = cputime / (3600 * 24);
48418+ cputime -= cday * (3600 * 24);
48419+ chr = cputime / 3600;
48420+ cputime -= chr * 3600;
48421+ cmin = cputime / 60;
48422+ cputime -= cmin * 60;
48423+ csec = cputime;
48424+
48425+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
48426+
48427+ return;
48428+}
48429+
48430+void gr_set_kernel_label(struct task_struct *task)
48431+{
48432+ if (gr_status & GR_READY) {
48433+ task->role = kernel_role;
48434+ task->acl = kernel_role->root_label;
48435+ }
48436+ return;
48437+}
48438+
48439+#ifdef CONFIG_TASKSTATS
48440+int gr_is_taskstats_denied(int pid)
48441+{
48442+ struct task_struct *task;
48443+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48444+ const struct cred *cred;
48445+#endif
48446+ int ret = 0;
48447+
48448+ /* restrict taskstats viewing to un-chrooted root users
48449+ who have the 'view' subject flag if the RBAC system is enabled
48450+ */
48451+
48452+ rcu_read_lock();
48453+ read_lock(&tasklist_lock);
48454+ task = find_task_by_vpid(pid);
48455+ if (task) {
48456+#ifdef CONFIG_GRKERNSEC_CHROOT
48457+ if (proc_is_chrooted(task))
48458+ ret = -EACCES;
48459+#endif
48460+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48461+ cred = __task_cred(task);
48462+#ifdef CONFIG_GRKERNSEC_PROC_USER
48463+ if (cred->uid != 0)
48464+ ret = -EACCES;
48465+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48466+ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
48467+ ret = -EACCES;
48468+#endif
48469+#endif
48470+ if (gr_status & GR_READY) {
48471+ if (!(task->acl->mode & GR_VIEW))
48472+ ret = -EACCES;
48473+ }
48474+ } else
48475+ ret = -ENOENT;
48476+
48477+ read_unlock(&tasklist_lock);
48478+ rcu_read_unlock();
48479+
48480+ return ret;
48481+}
48482+#endif
48483+
48484+/* AUXV entries are filled via a descendant of search_binary_handler
48485+ after we've already applied the subject for the target
48486+*/
48487+int gr_acl_enable_at_secure(void)
48488+{
48489+ if (unlikely(!(gr_status & GR_READY)))
48490+ return 0;
48491+
48492+ if (current->acl->mode & GR_ATSECURE)
48493+ return 1;
48494+
48495+ return 0;
48496+}
48497+
48498+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
48499+{
48500+ struct task_struct *task = current;
48501+ struct dentry *dentry = file->f_path.dentry;
48502+ struct vfsmount *mnt = file->f_path.mnt;
48503+ struct acl_object_label *obj, *tmp;
48504+ struct acl_subject_label *subj;
48505+ unsigned int bufsize;
48506+ int is_not_root;
48507+ char *path;
48508+ dev_t dev = __get_dev(dentry);
48509+
48510+ if (unlikely(!(gr_status & GR_READY)))
48511+ return 1;
48512+
48513+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
48514+ return 1;
48515+
48516+ /* ignore Eric Biederman */
48517+ if (IS_PRIVATE(dentry->d_inode))
48518+ return 1;
48519+
48520+ subj = task->acl;
48521+ do {
48522+ obj = lookup_acl_obj_label(ino, dev, subj);
48523+ if (obj != NULL)
48524+ return (obj->mode & GR_FIND) ? 1 : 0;
48525+ } while ((subj = subj->parent_subject));
48526+
48527+ /* this is purely an optimization since we're looking for an object
48528+ for the directory we're doing a readdir on
48529+ if it's possible for any globbed object to match the entry we're
48530+ filling into the directory, then the object we find here will be
48531+ an anchor point with attached globbed objects
48532+ */
48533+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
48534+ if (obj->globbed == NULL)
48535+ return (obj->mode & GR_FIND) ? 1 : 0;
48536+
48537+ is_not_root = ((obj->filename[0] == '/') &&
48538+ (obj->filename[1] == '\0')) ? 0 : 1;
48539+ bufsize = PAGE_SIZE - namelen - is_not_root;
48540+
48541+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
48542+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
48543+ return 1;
48544+
48545+ preempt_disable();
48546+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
48547+ bufsize);
48548+
48549+ bufsize = strlen(path);
48550+
48551+ /* if base is "/", don't append an additional slash */
48552+ if (is_not_root)
48553+ *(path + bufsize) = '/';
48554+ memcpy(path + bufsize + is_not_root, name, namelen);
48555+ *(path + bufsize + namelen + is_not_root) = '\0';
48556+
48557+ tmp = obj->globbed;
48558+ while (tmp) {
48559+ if (!glob_match(tmp->filename, path)) {
48560+ preempt_enable();
48561+ return (tmp->mode & GR_FIND) ? 1 : 0;
48562+ }
48563+ tmp = tmp->next;
48564+ }
48565+ preempt_enable();
48566+ return (obj->mode & GR_FIND) ? 1 : 0;
48567+}
48568+
48569+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
48570+EXPORT_SYMBOL(gr_acl_is_enabled);
48571+#endif
48572+EXPORT_SYMBOL(gr_learn_resource);
48573+EXPORT_SYMBOL(gr_set_kernel_label);
48574+#ifdef CONFIG_SECURITY
48575+EXPORT_SYMBOL(gr_check_user_change);
48576+EXPORT_SYMBOL(gr_check_group_change);
48577+#endif
48578+
48579diff -urNp linux-2.6.32.42/grsecurity/gracl_cap.c linux-2.6.32.42/grsecurity/gracl_cap.c
48580--- linux-2.6.32.42/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
48581+++ linux-2.6.32.42/grsecurity/gracl_cap.c 2011-04-17 15:56:46.000000000 -0400
48582@@ -0,0 +1,138 @@
48583+#include <linux/kernel.h>
48584+#include <linux/module.h>
48585+#include <linux/sched.h>
48586+#include <linux/gracl.h>
48587+#include <linux/grsecurity.h>
48588+#include <linux/grinternal.h>
48589+
48590+static const char *captab_log[] = {
48591+ "CAP_CHOWN",
48592+ "CAP_DAC_OVERRIDE",
48593+ "CAP_DAC_READ_SEARCH",
48594+ "CAP_FOWNER",
48595+ "CAP_FSETID",
48596+ "CAP_KILL",
48597+ "CAP_SETGID",
48598+ "CAP_SETUID",
48599+ "CAP_SETPCAP",
48600+ "CAP_LINUX_IMMUTABLE",
48601+ "CAP_NET_BIND_SERVICE",
48602+ "CAP_NET_BROADCAST",
48603+ "CAP_NET_ADMIN",
48604+ "CAP_NET_RAW",
48605+ "CAP_IPC_LOCK",
48606+ "CAP_IPC_OWNER",
48607+ "CAP_SYS_MODULE",
48608+ "CAP_SYS_RAWIO",
48609+ "CAP_SYS_CHROOT",
48610+ "CAP_SYS_PTRACE",
48611+ "CAP_SYS_PACCT",
48612+ "CAP_SYS_ADMIN",
48613+ "CAP_SYS_BOOT",
48614+ "CAP_SYS_NICE",
48615+ "CAP_SYS_RESOURCE",
48616+ "CAP_SYS_TIME",
48617+ "CAP_SYS_TTY_CONFIG",
48618+ "CAP_MKNOD",
48619+ "CAP_LEASE",
48620+ "CAP_AUDIT_WRITE",
48621+ "CAP_AUDIT_CONTROL",
48622+ "CAP_SETFCAP",
48623+ "CAP_MAC_OVERRIDE",
48624+ "CAP_MAC_ADMIN"
48625+};
48626+
48627+EXPORT_SYMBOL(gr_is_capable);
48628+EXPORT_SYMBOL(gr_is_capable_nolog);
48629+
48630+int
48631+gr_is_capable(const int cap)
48632+{
48633+ struct task_struct *task = current;
48634+ const struct cred *cred = current_cred();
48635+ struct acl_subject_label *curracl;
48636+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
48637+ kernel_cap_t cap_audit = __cap_empty_set;
48638+
48639+ if (!gr_acl_is_enabled())
48640+ return 1;
48641+
48642+ curracl = task->acl;
48643+
48644+ cap_drop = curracl->cap_lower;
48645+ cap_mask = curracl->cap_mask;
48646+ cap_audit = curracl->cap_invert_audit;
48647+
48648+ while ((curracl = curracl->parent_subject)) {
48649+ /* if the cap isn't specified in the current computed mask but is specified in the
48650+ current level subject, and is lowered in the current level subject, then add
48651+ it to the set of dropped capabilities
48652+ otherwise, add the current level subject's mask to the current computed mask
48653+ */
48654+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
48655+ cap_raise(cap_mask, cap);
48656+ if (cap_raised(curracl->cap_lower, cap))
48657+ cap_raise(cap_drop, cap);
48658+ if (cap_raised(curracl->cap_invert_audit, cap))
48659+ cap_raise(cap_audit, cap);
48660+ }
48661+ }
48662+
48663+ if (!cap_raised(cap_drop, cap)) {
48664+ if (cap_raised(cap_audit, cap))
48665+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
48666+ return 1;
48667+ }
48668+
48669+ curracl = task->acl;
48670+
48671+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
48672+ && cap_raised(cred->cap_effective, cap)) {
48673+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
48674+ task->role->roletype, cred->uid,
48675+ cred->gid, task->exec_file ?
48676+ gr_to_filename(task->exec_file->f_path.dentry,
48677+ task->exec_file->f_path.mnt) : curracl->filename,
48678+ curracl->filename, 0UL,
48679+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
48680+ return 1;
48681+ }
48682+
48683+ if ((cap >= 0) && (cap < (sizeof(captab_log)/sizeof(captab_log[0]))) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
48684+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
48685+ return 0;
48686+}
48687+
48688+int
48689+gr_is_capable_nolog(const int cap)
48690+{
48691+ struct acl_subject_label *curracl;
48692+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
48693+
48694+ if (!gr_acl_is_enabled())
48695+ return 1;
48696+
48697+ curracl = current->acl;
48698+
48699+ cap_drop = curracl->cap_lower;
48700+ cap_mask = curracl->cap_mask;
48701+
48702+ while ((curracl = curracl->parent_subject)) {
48703+ /* if the cap isn't specified in the current computed mask but is specified in the
48704+ current level subject, and is lowered in the current level subject, then add
48705+ it to the set of dropped capabilities
48706+ otherwise, add the current level subject's mask to the current computed mask
48707+ */
48708+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
48709+ cap_raise(cap_mask, cap);
48710+ if (cap_raised(curracl->cap_lower, cap))
48711+ cap_raise(cap_drop, cap);
48712+ }
48713+ }
48714+
48715+ if (!cap_raised(cap_drop, cap))
48716+ return 1;
48717+
48718+ return 0;
48719+}
48720+
48721diff -urNp linux-2.6.32.42/grsecurity/gracl_fs.c linux-2.6.32.42/grsecurity/gracl_fs.c
48722--- linux-2.6.32.42/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
48723+++ linux-2.6.32.42/grsecurity/gracl_fs.c 2011-04-17 15:56:46.000000000 -0400
48724@@ -0,0 +1,431 @@
48725+#include <linux/kernel.h>
48726+#include <linux/sched.h>
48727+#include <linux/types.h>
48728+#include <linux/fs.h>
48729+#include <linux/file.h>
48730+#include <linux/stat.h>
48731+#include <linux/grsecurity.h>
48732+#include <linux/grinternal.h>
48733+#include <linux/gracl.h>
48734+
48735+__u32
48736+gr_acl_handle_hidden_file(const struct dentry * dentry,
48737+ const struct vfsmount * mnt)
48738+{
48739+ __u32 mode;
48740+
48741+ if (unlikely(!dentry->d_inode))
48742+ return GR_FIND;
48743+
48744+ mode =
48745+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
48746+
48747+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
48748+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
48749+ return mode;
48750+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
48751+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
48752+ return 0;
48753+ } else if (unlikely(!(mode & GR_FIND)))
48754+ return 0;
48755+
48756+ return GR_FIND;
48757+}
48758+
48759+__u32
48760+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
48761+ const int fmode)
48762+{
48763+ __u32 reqmode = GR_FIND;
48764+ __u32 mode;
48765+
48766+ if (unlikely(!dentry->d_inode))
48767+ return reqmode;
48768+
48769+ if (unlikely(fmode & O_APPEND))
48770+ reqmode |= GR_APPEND;
48771+ else if (unlikely(fmode & FMODE_WRITE))
48772+ reqmode |= GR_WRITE;
48773+ if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
48774+ reqmode |= GR_READ;
48775+ if ((fmode & FMODE_GREXEC) && (fmode & FMODE_EXEC))
48776+ reqmode &= ~GR_READ;
48777+ mode =
48778+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
48779+ mnt);
48780+
48781+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
48782+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
48783+ reqmode & GR_READ ? " reading" : "",
48784+ reqmode & GR_WRITE ? " writing" : reqmode &
48785+ GR_APPEND ? " appending" : "");
48786+ return reqmode;
48787+ } else
48788+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
48789+ {
48790+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
48791+ reqmode & GR_READ ? " reading" : "",
48792+ reqmode & GR_WRITE ? " writing" : reqmode &
48793+ GR_APPEND ? " appending" : "");
48794+ return 0;
48795+ } else if (unlikely((mode & reqmode) != reqmode))
48796+ return 0;
48797+
48798+ return reqmode;
48799+}
48800+
48801+__u32
48802+gr_acl_handle_creat(const struct dentry * dentry,
48803+ const struct dentry * p_dentry,
48804+ const struct vfsmount * p_mnt, const int fmode,
48805+ const int imode)
48806+{
48807+ __u32 reqmode = GR_WRITE | GR_CREATE;
48808+ __u32 mode;
48809+
48810+ if (unlikely(fmode & O_APPEND))
48811+ reqmode |= GR_APPEND;
48812+ if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
48813+ reqmode |= GR_READ;
48814+ if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
48815+ reqmode |= GR_SETID;
48816+
48817+ mode =
48818+ gr_check_create(dentry, p_dentry, p_mnt,
48819+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
48820+
48821+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
48822+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
48823+ reqmode & GR_READ ? " reading" : "",
48824+ reqmode & GR_WRITE ? " writing" : reqmode &
48825+ GR_APPEND ? " appending" : "");
48826+ return reqmode;
48827+ } else
48828+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
48829+ {
48830+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
48831+ reqmode & GR_READ ? " reading" : "",
48832+ reqmode & GR_WRITE ? " writing" : reqmode &
48833+ GR_APPEND ? " appending" : "");
48834+ return 0;
48835+ } else if (unlikely((mode & reqmode) != reqmode))
48836+ return 0;
48837+
48838+ return reqmode;
48839+}
48840+
48841+__u32
48842+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
48843+ const int fmode)
48844+{
48845+ __u32 mode, reqmode = GR_FIND;
48846+
48847+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
48848+ reqmode |= GR_EXEC;
48849+ if (fmode & S_IWOTH)
48850+ reqmode |= GR_WRITE;
48851+ if (fmode & S_IROTH)
48852+ reqmode |= GR_READ;
48853+
48854+ mode =
48855+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
48856+ mnt);
48857+
48858+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
48859+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
48860+ reqmode & GR_READ ? " reading" : "",
48861+ reqmode & GR_WRITE ? " writing" : "",
48862+ reqmode & GR_EXEC ? " executing" : "");
48863+ return reqmode;
48864+ } else
48865+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
48866+ {
48867+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
48868+ reqmode & GR_READ ? " reading" : "",
48869+ reqmode & GR_WRITE ? " writing" : "",
48870+ reqmode & GR_EXEC ? " executing" : "");
48871+ return 0;
48872+ } else if (unlikely((mode & reqmode) != reqmode))
48873+ return 0;
48874+
48875+ return reqmode;
48876+}
48877+
48878+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
48879+{
48880+ __u32 mode;
48881+
48882+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
48883+
48884+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
48885+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
48886+ return mode;
48887+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
48888+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
48889+ return 0;
48890+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
48891+ return 0;
48892+
48893+ return (reqmode);
48894+}
48895+
48896+__u32
48897+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
48898+{
48899+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
48900+}
48901+
48902+__u32
48903+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
48904+{
48905+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
48906+}
48907+
48908+__u32
48909+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
48910+{
48911+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
48912+}
48913+
48914+__u32
48915+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
48916+{
48917+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
48918+}
48919+
48920+__u32
48921+gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
48922+ mode_t mode)
48923+{
48924+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
48925+ return 1;
48926+
48927+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
48928+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
48929+ GR_FCHMOD_ACL_MSG);
48930+ } else {
48931+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
48932+ }
48933+}
48934+
48935+__u32
48936+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
48937+ mode_t mode)
48938+{
48939+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
48940+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
48941+ GR_CHMOD_ACL_MSG);
48942+ } else {
48943+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
48944+ }
48945+}
48946+
48947+__u32
48948+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
48949+{
48950+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
48951+}
48952+
48953+__u32
48954+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
48955+{
48956+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
48957+}
48958+
48959+__u32
48960+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
48961+{
48962+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
48963+}
48964+
48965+__u32
48966+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
48967+{
48968+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
48969+ GR_UNIXCONNECT_ACL_MSG);
48970+}
48971+
48972+/* hardlinks require at minimum create permission,
48973+ any additional privilege required is based on the
48974+ privilege of the file being linked to
48975+*/
48976+__u32
48977+gr_acl_handle_link(const struct dentry * new_dentry,
48978+ const struct dentry * parent_dentry,
48979+ const struct vfsmount * parent_mnt,
48980+ const struct dentry * old_dentry,
48981+ const struct vfsmount * old_mnt, const char *to)
48982+{
48983+ __u32 mode;
48984+ __u32 needmode = GR_CREATE | GR_LINK;
48985+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
48986+
48987+ mode =
48988+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
48989+ old_mnt);
48990+
48991+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
48992+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
48993+ return mode;
48994+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
48995+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
48996+ return 0;
48997+ } else if (unlikely((mode & needmode) != needmode))
48998+ return 0;
48999+
49000+ return 1;
49001+}
49002+
49003+__u32
49004+gr_acl_handle_symlink(const struct dentry * new_dentry,
49005+ const struct dentry * parent_dentry,
49006+ const struct vfsmount * parent_mnt, const char *from)
49007+{
49008+ __u32 needmode = GR_WRITE | GR_CREATE;
49009+ __u32 mode;
49010+
49011+ mode =
49012+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
49013+ GR_CREATE | GR_AUDIT_CREATE |
49014+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
49015+
49016+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
49017+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
49018+ return mode;
49019+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
49020+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
49021+ return 0;
49022+ } else if (unlikely((mode & needmode) != needmode))
49023+ return 0;
49024+
49025+ return (GR_WRITE | GR_CREATE);
49026+}
49027+
49028+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
49029+{
49030+ __u32 mode;
49031+
49032+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
49033+
49034+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
49035+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
49036+ return mode;
49037+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
49038+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
49039+ return 0;
49040+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
49041+ return 0;
49042+
49043+ return (reqmode);
49044+}
49045+
49046+__u32
49047+gr_acl_handle_mknod(const struct dentry * new_dentry,
49048+ const struct dentry * parent_dentry,
49049+ const struct vfsmount * parent_mnt,
49050+ const int mode)
49051+{
49052+ __u32 reqmode = GR_WRITE | GR_CREATE;
49053+ if (unlikely(mode & (S_ISUID | S_ISGID)))
49054+ reqmode |= GR_SETID;
49055+
49056+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
49057+ reqmode, GR_MKNOD_ACL_MSG);
49058+}
49059+
49060+__u32
49061+gr_acl_handle_mkdir(const struct dentry *new_dentry,
49062+ const struct dentry *parent_dentry,
49063+ const struct vfsmount *parent_mnt)
49064+{
49065+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
49066+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
49067+}
49068+
49069+#define RENAME_CHECK_SUCCESS(old, new) \
49070+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
49071+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
49072+
49073+int
49074+gr_acl_handle_rename(struct dentry *new_dentry,
49075+ struct dentry *parent_dentry,
49076+ const struct vfsmount *parent_mnt,
49077+ struct dentry *old_dentry,
49078+ struct inode *old_parent_inode,
49079+ struct vfsmount *old_mnt, const char *newname)
49080+{
49081+ __u32 comp1, comp2;
49082+ int error = 0;
49083+
49084+ if (unlikely(!gr_acl_is_enabled()))
49085+ return 0;
49086+
49087+ if (!new_dentry->d_inode) {
49088+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
49089+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
49090+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
49091+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
49092+ GR_DELETE | GR_AUDIT_DELETE |
49093+ GR_AUDIT_READ | GR_AUDIT_WRITE |
49094+ GR_SUPPRESS, old_mnt);
49095+ } else {
49096+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
49097+ GR_CREATE | GR_DELETE |
49098+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
49099+ GR_AUDIT_READ | GR_AUDIT_WRITE |
49100+ GR_SUPPRESS, parent_mnt);
49101+ comp2 =
49102+ gr_search_file(old_dentry,
49103+ GR_READ | GR_WRITE | GR_AUDIT_READ |
49104+ GR_DELETE | GR_AUDIT_DELETE |
49105+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
49106+ }
49107+
49108+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
49109+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
49110+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
49111+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
49112+ && !(comp2 & GR_SUPPRESS)) {
49113+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
49114+ error = -EACCES;
49115+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
49116+ error = -EACCES;
49117+
49118+ return error;
49119+}
49120+
49121+void
49122+gr_acl_handle_exit(void)
49123+{
49124+ u16 id;
49125+ char *rolename;
49126+ struct file *exec_file;
49127+
49128+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
49129+ !(current->role->roletype & GR_ROLE_PERSIST))) {
49130+ id = current->acl_role_id;
49131+ rolename = current->role->rolename;
49132+ gr_set_acls(1);
49133+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
49134+ }
49135+
49136+ write_lock(&grsec_exec_file_lock);
49137+ exec_file = current->exec_file;
49138+ current->exec_file = NULL;
49139+ write_unlock(&grsec_exec_file_lock);
49140+
49141+ if (exec_file)
49142+ fput(exec_file);
49143+}
49144+
49145+int
49146+gr_acl_handle_procpidmem(const struct task_struct *task)
49147+{
49148+ if (unlikely(!gr_acl_is_enabled()))
49149+ return 0;
49150+
49151+ if (task != current && task->acl->mode & GR_PROTPROCFD)
49152+ return -EACCES;
49153+
49154+ return 0;
49155+}
49156diff -urNp linux-2.6.32.42/grsecurity/gracl_ip.c linux-2.6.32.42/grsecurity/gracl_ip.c
49157--- linux-2.6.32.42/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
49158+++ linux-2.6.32.42/grsecurity/gracl_ip.c 2011-04-17 15:56:46.000000000 -0400
49159@@ -0,0 +1,382 @@
49160+#include <linux/kernel.h>
49161+#include <asm/uaccess.h>
49162+#include <asm/errno.h>
49163+#include <net/sock.h>
49164+#include <linux/file.h>
49165+#include <linux/fs.h>
49166+#include <linux/net.h>
49167+#include <linux/in.h>
49168+#include <linux/skbuff.h>
49169+#include <linux/ip.h>
49170+#include <linux/udp.h>
49171+#include <linux/smp_lock.h>
49172+#include <linux/types.h>
49173+#include <linux/sched.h>
49174+#include <linux/netdevice.h>
49175+#include <linux/inetdevice.h>
49176+#include <linux/gracl.h>
49177+#include <linux/grsecurity.h>
49178+#include <linux/grinternal.h>
49179+
49180+#define GR_BIND 0x01
49181+#define GR_CONNECT 0x02
49182+#define GR_INVERT 0x04
49183+#define GR_BINDOVERRIDE 0x08
49184+#define GR_CONNECTOVERRIDE 0x10
49185+#define GR_SOCK_FAMILY 0x20
49186+
49187+static const char * gr_protocols[IPPROTO_MAX] = {
49188+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
49189+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
49190+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
49191+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
49192+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
49193+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
49194+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
49195+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
49196+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
49197+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
49198+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
49199+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
49200+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
49201+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
49202+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
49203+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
49204+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
49205+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
49206+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
49207+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
49208+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
49209+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
49210+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
49211+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
49212+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
49213+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
49214+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
49215+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
49216+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
49217+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
49218+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
49219+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
49220+ };
49221+
49222+static const char * gr_socktypes[SOCK_MAX] = {
49223+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
49224+ "unknown:7", "unknown:8", "unknown:9", "packet"
49225+ };
49226+
49227+static const char * gr_sockfamilies[AF_MAX+1] = {
49228+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
49229+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
49230+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
49231+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154"
49232+ };
49233+
49234+const char *
49235+gr_proto_to_name(unsigned char proto)
49236+{
49237+ return gr_protocols[proto];
49238+}
49239+
49240+const char *
49241+gr_socktype_to_name(unsigned char type)
49242+{
49243+ return gr_socktypes[type];
49244+}
49245+
49246+const char *
49247+gr_sockfamily_to_name(unsigned char family)
49248+{
49249+ return gr_sockfamilies[family];
49250+}
49251+
49252+int
49253+gr_search_socket(const int domain, const int type, const int protocol)
49254+{
49255+ struct acl_subject_label *curr;
49256+ const struct cred *cred = current_cred();
49257+
49258+ if (unlikely(!gr_acl_is_enabled()))
49259+ goto exit;
49260+
49261+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
49262+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
49263+ goto exit; // let the kernel handle it
49264+
49265+ curr = current->acl;
49266+
49267+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
49268+ /* the family is allowed, if this is PF_INET allow it only if
49269+ the extra sock type/protocol checks pass */
49270+ if (domain == PF_INET)
49271+ goto inet_check;
49272+ goto exit;
49273+ } else {
49274+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
49275+ __u32 fakeip = 0;
49276+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
49277+ current->role->roletype, cred->uid,
49278+ cred->gid, current->exec_file ?
49279+ gr_to_filename(current->exec_file->f_path.dentry,
49280+ current->exec_file->f_path.mnt) :
49281+ curr->filename, curr->filename,
49282+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
49283+ &current->signal->saved_ip);
49284+ goto exit;
49285+ }
49286+ goto exit_fail;
49287+ }
49288+
49289+inet_check:
49290+ /* the rest of this checking is for IPv4 only */
49291+ if (!curr->ips)
49292+ goto exit;
49293+
49294+ if ((curr->ip_type & (1 << type)) &&
49295+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
49296+ goto exit;
49297+
49298+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
49299+ /* we don't place acls on raw sockets , and sometimes
49300+ dgram/ip sockets are opened for ioctl and not
49301+ bind/connect, so we'll fake a bind learn log */
49302+ if (type == SOCK_RAW || type == SOCK_PACKET) {
49303+ __u32 fakeip = 0;
49304+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
49305+ current->role->roletype, cred->uid,
49306+ cred->gid, current->exec_file ?
49307+ gr_to_filename(current->exec_file->f_path.dentry,
49308+ current->exec_file->f_path.mnt) :
49309+ curr->filename, curr->filename,
49310+ &fakeip, 0, type,
49311+ protocol, GR_CONNECT, &current->signal->saved_ip);
49312+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
49313+ __u32 fakeip = 0;
49314+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
49315+ current->role->roletype, cred->uid,
49316+ cred->gid, current->exec_file ?
49317+ gr_to_filename(current->exec_file->f_path.dentry,
49318+ current->exec_file->f_path.mnt) :
49319+ curr->filename, curr->filename,
49320+ &fakeip, 0, type,
49321+ protocol, GR_BIND, &current->signal->saved_ip);
49322+ }
49323+ /* we'll log when they use connect or bind */
49324+ goto exit;
49325+ }
49326+
49327+exit_fail:
49328+ if (domain == PF_INET)
49329+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
49330+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
49331+ else
49332+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
49333+ gr_socktype_to_name(type), protocol);
49334+
49335+ return 0;
49336+exit:
49337+ return 1;
49338+}
49339+
49340+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
49341+{
49342+ if ((ip->mode & mode) &&
49343+ (ip_port >= ip->low) &&
49344+ (ip_port <= ip->high) &&
49345+ ((ntohl(ip_addr) & our_netmask) ==
49346+ (ntohl(our_addr) & our_netmask))
49347+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
49348+ && (ip->type & (1 << type))) {
49349+ if (ip->mode & GR_INVERT)
49350+ return 2; // specifically denied
49351+ else
49352+ return 1; // allowed
49353+ }
49354+
49355+ return 0; // not specifically allowed, may continue parsing
49356+}
49357+
49358+static int
49359+gr_search_connectbind(const int full_mode, struct sock *sk,
49360+ struct sockaddr_in *addr, const int type)
49361+{
49362+ char iface[IFNAMSIZ] = {0};
49363+ struct acl_subject_label *curr;
49364+ struct acl_ip_label *ip;
49365+ struct inet_sock *isk;
49366+ struct net_device *dev;
49367+ struct in_device *idev;
49368+ unsigned long i;
49369+ int ret;
49370+ int mode = full_mode & (GR_BIND | GR_CONNECT);
49371+ __u32 ip_addr = 0;
49372+ __u32 our_addr;
49373+ __u32 our_netmask;
49374+ char *p;
49375+ __u16 ip_port = 0;
49376+ const struct cred *cred = current_cred();
49377+
49378+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
49379+ return 0;
49380+
49381+ curr = current->acl;
49382+ isk = inet_sk(sk);
49383+
49384+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
49385+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
49386+ addr->sin_addr.s_addr = curr->inaddr_any_override;
49387+ if ((full_mode & GR_CONNECT) && isk->saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
49388+ struct sockaddr_in saddr;
49389+ int err;
49390+
49391+ saddr.sin_family = AF_INET;
49392+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
49393+ saddr.sin_port = isk->sport;
49394+
49395+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
49396+ if (err)
49397+ return err;
49398+
49399+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
49400+ if (err)
49401+ return err;
49402+ }
49403+
49404+ if (!curr->ips)
49405+ return 0;
49406+
49407+ ip_addr = addr->sin_addr.s_addr;
49408+ ip_port = ntohs(addr->sin_port);
49409+
49410+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
49411+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
49412+ current->role->roletype, cred->uid,
49413+ cred->gid, current->exec_file ?
49414+ gr_to_filename(current->exec_file->f_path.dentry,
49415+ current->exec_file->f_path.mnt) :
49416+ curr->filename, curr->filename,
49417+ &ip_addr, ip_port, type,
49418+ sk->sk_protocol, mode, &current->signal->saved_ip);
49419+ return 0;
49420+ }
49421+
49422+ for (i = 0; i < curr->ip_num; i++) {
49423+ ip = *(curr->ips + i);
49424+ if (ip->iface != NULL) {
49425+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
49426+ p = strchr(iface, ':');
49427+ if (p != NULL)
49428+ *p = '\0';
49429+ dev = dev_get_by_name(sock_net(sk), iface);
49430+ if (dev == NULL)
49431+ continue;
49432+ idev = in_dev_get(dev);
49433+ if (idev == NULL) {
49434+ dev_put(dev);
49435+ continue;
49436+ }
49437+ rcu_read_lock();
49438+ for_ifa(idev) {
49439+ if (!strcmp(ip->iface, ifa->ifa_label)) {
49440+ our_addr = ifa->ifa_address;
49441+ our_netmask = 0xffffffff;
49442+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
49443+ if (ret == 1) {
49444+ rcu_read_unlock();
49445+ in_dev_put(idev);
49446+ dev_put(dev);
49447+ return 0;
49448+ } else if (ret == 2) {
49449+ rcu_read_unlock();
49450+ in_dev_put(idev);
49451+ dev_put(dev);
49452+ goto denied;
49453+ }
49454+ }
49455+ } endfor_ifa(idev);
49456+ rcu_read_unlock();
49457+ in_dev_put(idev);
49458+ dev_put(dev);
49459+ } else {
49460+ our_addr = ip->addr;
49461+ our_netmask = ip->netmask;
49462+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
49463+ if (ret == 1)
49464+ return 0;
49465+ else if (ret == 2)
49466+ goto denied;
49467+ }
49468+ }
49469+
49470+denied:
49471+ if (mode == GR_BIND)
49472+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
49473+ else if (mode == GR_CONNECT)
49474+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
49475+
49476+ return -EACCES;
49477+}
49478+
49479+int
49480+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
49481+{
49482+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
49483+}
49484+
49485+int
49486+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
49487+{
49488+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
49489+}
49490+
49491+int gr_search_listen(struct socket *sock)
49492+{
49493+ struct sock *sk = sock->sk;
49494+ struct sockaddr_in addr;
49495+
49496+ addr.sin_addr.s_addr = inet_sk(sk)->saddr;
49497+ addr.sin_port = inet_sk(sk)->sport;
49498+
49499+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
49500+}
49501+
49502+int gr_search_accept(struct socket *sock)
49503+{
49504+ struct sock *sk = sock->sk;
49505+ struct sockaddr_in addr;
49506+
49507+ addr.sin_addr.s_addr = inet_sk(sk)->saddr;
49508+ addr.sin_port = inet_sk(sk)->sport;
49509+
49510+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
49511+}
49512+
49513+int
49514+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
49515+{
49516+ if (addr)
49517+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
49518+ else {
49519+ struct sockaddr_in sin;
49520+ const struct inet_sock *inet = inet_sk(sk);
49521+
49522+ sin.sin_addr.s_addr = inet->daddr;
49523+ sin.sin_port = inet->dport;
49524+
49525+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
49526+ }
49527+}
49528+
49529+int
49530+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
49531+{
49532+ struct sockaddr_in sin;
49533+
49534+ if (unlikely(skb->len < sizeof (struct udphdr)))
49535+ return 0; // skip this packet
49536+
49537+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
49538+ sin.sin_port = udp_hdr(skb)->source;
49539+
49540+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
49541+}
49542diff -urNp linux-2.6.32.42/grsecurity/gracl_learn.c linux-2.6.32.42/grsecurity/gracl_learn.c
49543--- linux-2.6.32.42/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
49544+++ linux-2.6.32.42/grsecurity/gracl_learn.c 2011-04-17 15:56:46.000000000 -0400
49545@@ -0,0 +1,211 @@
49546+#include <linux/kernel.h>
49547+#include <linux/mm.h>
49548+#include <linux/sched.h>
49549+#include <linux/poll.h>
49550+#include <linux/smp_lock.h>
49551+#include <linux/string.h>
49552+#include <linux/file.h>
49553+#include <linux/types.h>
49554+#include <linux/vmalloc.h>
49555+#include <linux/grinternal.h>
49556+
49557+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
49558+ size_t count, loff_t *ppos);
49559+extern int gr_acl_is_enabled(void);
49560+
49561+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
49562+static int gr_learn_attached;
49563+
49564+/* use a 512k buffer */
49565+#define LEARN_BUFFER_SIZE (512 * 1024)
49566+
49567+static DEFINE_SPINLOCK(gr_learn_lock);
49568+static DEFINE_MUTEX(gr_learn_user_mutex);
49569+
49570+/* we need to maintain two buffers, so that the kernel context of grlearn
49571+ uses a semaphore around the userspace copying, and the other kernel contexts
49572+ use a spinlock when copying into the buffer, since they cannot sleep
49573+*/
49574+static char *learn_buffer;
49575+static char *learn_buffer_user;
49576+static int learn_buffer_len;
49577+static int learn_buffer_user_len;
49578+
49579+static ssize_t
49580+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
49581+{
49582+ DECLARE_WAITQUEUE(wait, current);
49583+ ssize_t retval = 0;
49584+
49585+ add_wait_queue(&learn_wait, &wait);
49586+ set_current_state(TASK_INTERRUPTIBLE);
49587+ do {
49588+ mutex_lock(&gr_learn_user_mutex);
49589+ spin_lock(&gr_learn_lock);
49590+ if (learn_buffer_len)
49591+ break;
49592+ spin_unlock(&gr_learn_lock);
49593+ mutex_unlock(&gr_learn_user_mutex);
49594+ if (file->f_flags & O_NONBLOCK) {
49595+ retval = -EAGAIN;
49596+ goto out;
49597+ }
49598+ if (signal_pending(current)) {
49599+ retval = -ERESTARTSYS;
49600+ goto out;
49601+ }
49602+
49603+ schedule();
49604+ } while (1);
49605+
49606+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
49607+ learn_buffer_user_len = learn_buffer_len;
49608+ retval = learn_buffer_len;
49609+ learn_buffer_len = 0;
49610+
49611+ spin_unlock(&gr_learn_lock);
49612+
49613+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
49614+ retval = -EFAULT;
49615+
49616+ mutex_unlock(&gr_learn_user_mutex);
49617+out:
49618+ set_current_state(TASK_RUNNING);
49619+ remove_wait_queue(&learn_wait, &wait);
49620+ return retval;
49621+}
49622+
49623+static unsigned int
49624+poll_learn(struct file * file, poll_table * wait)
49625+{
49626+ poll_wait(file, &learn_wait, wait);
49627+
49628+ if (learn_buffer_len)
49629+ return (POLLIN | POLLRDNORM);
49630+
49631+ return 0;
49632+}
49633+
49634+void
49635+gr_clear_learn_entries(void)
49636+{
49637+ char *tmp;
49638+
49639+ mutex_lock(&gr_learn_user_mutex);
49640+ if (learn_buffer != NULL) {
49641+ spin_lock(&gr_learn_lock);
49642+ tmp = learn_buffer;
49643+ learn_buffer = NULL;
49644+ spin_unlock(&gr_learn_lock);
49645+ vfree(learn_buffer);
49646+ }
49647+ if (learn_buffer_user != NULL) {
49648+ vfree(learn_buffer_user);
49649+ learn_buffer_user = NULL;
49650+ }
49651+ learn_buffer_len = 0;
49652+ mutex_unlock(&gr_learn_user_mutex);
49653+
49654+ return;
49655+}
49656+
49657+void
49658+gr_add_learn_entry(const char *fmt, ...)
49659+{
49660+ va_list args;
49661+ unsigned int len;
49662+
49663+ if (!gr_learn_attached)
49664+ return;
49665+
49666+ spin_lock(&gr_learn_lock);
49667+
49668+ /* leave a gap at the end so we know when it's "full" but don't have to
49669+ compute the exact length of the string we're trying to append
49670+ */
49671+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
49672+ spin_unlock(&gr_learn_lock);
49673+ wake_up_interruptible(&learn_wait);
49674+ return;
49675+ }
49676+ if (learn_buffer == NULL) {
49677+ spin_unlock(&gr_learn_lock);
49678+ return;
49679+ }
49680+
49681+ va_start(args, fmt);
49682+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
49683+ va_end(args);
49684+
49685+ learn_buffer_len += len + 1;
49686+
49687+ spin_unlock(&gr_learn_lock);
49688+ wake_up_interruptible(&learn_wait);
49689+
49690+ return;
49691+}
49692+
49693+static int
49694+open_learn(struct inode *inode, struct file *file)
49695+{
49696+ if (file->f_mode & FMODE_READ && gr_learn_attached)
49697+ return -EBUSY;
49698+ if (file->f_mode & FMODE_READ) {
49699+ int retval = 0;
49700+ mutex_lock(&gr_learn_user_mutex);
49701+ if (learn_buffer == NULL)
49702+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
49703+ if (learn_buffer_user == NULL)
49704+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
49705+ if (learn_buffer == NULL) {
49706+ retval = -ENOMEM;
49707+ goto out_error;
49708+ }
49709+ if (learn_buffer_user == NULL) {
49710+ retval = -ENOMEM;
49711+ goto out_error;
49712+ }
49713+ learn_buffer_len = 0;
49714+ learn_buffer_user_len = 0;
49715+ gr_learn_attached = 1;
49716+out_error:
49717+ mutex_unlock(&gr_learn_user_mutex);
49718+ return retval;
49719+ }
49720+ return 0;
49721+}
49722+
49723+static int
49724+close_learn(struct inode *inode, struct file *file)
49725+{
49726+ char *tmp;
49727+
49728+ if (file->f_mode & FMODE_READ) {
49729+ mutex_lock(&gr_learn_user_mutex);
49730+ if (learn_buffer != NULL) {
49731+ spin_lock(&gr_learn_lock);
49732+ tmp = learn_buffer;
49733+ learn_buffer = NULL;
49734+ spin_unlock(&gr_learn_lock);
49735+ vfree(tmp);
49736+ }
49737+ if (learn_buffer_user != NULL) {
49738+ vfree(learn_buffer_user);
49739+ learn_buffer_user = NULL;
49740+ }
49741+ learn_buffer_len = 0;
49742+ learn_buffer_user_len = 0;
49743+ gr_learn_attached = 0;
49744+ mutex_unlock(&gr_learn_user_mutex);
49745+ }
49746+
49747+ return 0;
49748+}
49749+
49750+const struct file_operations grsec_fops = {
49751+ .read = read_learn,
49752+ .write = write_grsec_handler,
49753+ .open = open_learn,
49754+ .release = close_learn,
49755+ .poll = poll_learn,
49756+};
49757diff -urNp linux-2.6.32.42/grsecurity/gracl_res.c linux-2.6.32.42/grsecurity/gracl_res.c
49758--- linux-2.6.32.42/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
49759+++ linux-2.6.32.42/grsecurity/gracl_res.c 2011-04-17 15:56:46.000000000 -0400
49760@@ -0,0 +1,67 @@
49761+#include <linux/kernel.h>
49762+#include <linux/sched.h>
49763+#include <linux/gracl.h>
49764+#include <linux/grinternal.h>
49765+
49766+static const char *restab_log[] = {
49767+ [RLIMIT_CPU] = "RLIMIT_CPU",
49768+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
49769+ [RLIMIT_DATA] = "RLIMIT_DATA",
49770+ [RLIMIT_STACK] = "RLIMIT_STACK",
49771+ [RLIMIT_CORE] = "RLIMIT_CORE",
49772+ [RLIMIT_RSS] = "RLIMIT_RSS",
49773+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
49774+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
49775+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
49776+ [RLIMIT_AS] = "RLIMIT_AS",
49777+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
49778+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
49779+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
49780+ [RLIMIT_NICE] = "RLIMIT_NICE",
49781+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
49782+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
49783+ [GR_CRASH_RES] = "RLIMIT_CRASH"
49784+};
49785+
49786+void
49787+gr_log_resource(const struct task_struct *task,
49788+ const int res, const unsigned long wanted, const int gt)
49789+{
49790+ const struct cred *cred;
49791+ unsigned long rlim;
49792+
49793+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
49794+ return;
49795+
49796+ // not yet supported resource
49797+ if (unlikely(!restab_log[res]))
49798+ return;
49799+
49800+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
49801+ rlim = task->signal->rlim[res].rlim_max;
49802+ else
49803+ rlim = task->signal->rlim[res].rlim_cur;
49804+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
49805+ return;
49806+
49807+ rcu_read_lock();
49808+ cred = __task_cred(task);
49809+
49810+ if (res == RLIMIT_NPROC &&
49811+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
49812+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
49813+ goto out_rcu_unlock;
49814+ else if (res == RLIMIT_MEMLOCK &&
49815+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
49816+ goto out_rcu_unlock;
49817+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
49818+ goto out_rcu_unlock;
49819+ rcu_read_unlock();
49820+
49821+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
49822+
49823+ return;
49824+out_rcu_unlock:
49825+ rcu_read_unlock();
49826+ return;
49827+}
49828diff -urNp linux-2.6.32.42/grsecurity/gracl_segv.c linux-2.6.32.42/grsecurity/gracl_segv.c
49829--- linux-2.6.32.42/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
49830+++ linux-2.6.32.42/grsecurity/gracl_segv.c 2011-04-17 15:56:46.000000000 -0400
49831@@ -0,0 +1,284 @@
49832+#include <linux/kernel.h>
49833+#include <linux/mm.h>
49834+#include <asm/uaccess.h>
49835+#include <asm/errno.h>
49836+#include <asm/mman.h>
49837+#include <net/sock.h>
49838+#include <linux/file.h>
49839+#include <linux/fs.h>
49840+#include <linux/net.h>
49841+#include <linux/in.h>
49842+#include <linux/smp_lock.h>
49843+#include <linux/slab.h>
49844+#include <linux/types.h>
49845+#include <linux/sched.h>
49846+#include <linux/timer.h>
49847+#include <linux/gracl.h>
49848+#include <linux/grsecurity.h>
49849+#include <linux/grinternal.h>
49850+
49851+static struct crash_uid *uid_set;
49852+static unsigned short uid_used;
49853+static DEFINE_SPINLOCK(gr_uid_lock);
49854+extern rwlock_t gr_inode_lock;
49855+extern struct acl_subject_label *
49856+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
49857+ struct acl_role_label *role);
49858+extern int gr_fake_force_sig(int sig, struct task_struct *t);
49859+
49860+int
49861+gr_init_uidset(void)
49862+{
49863+ uid_set =
49864+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
49865+ uid_used = 0;
49866+
49867+ return uid_set ? 1 : 0;
49868+}
49869+
49870+void
49871+gr_free_uidset(void)
49872+{
49873+ if (uid_set)
49874+ kfree(uid_set);
49875+
49876+ return;
49877+}
49878+
49879+int
49880+gr_find_uid(const uid_t uid)
49881+{
49882+ struct crash_uid *tmp = uid_set;
49883+ uid_t buid;
49884+ int low = 0, high = uid_used - 1, mid;
49885+
49886+ while (high >= low) {
49887+ mid = (low + high) >> 1;
49888+ buid = tmp[mid].uid;
49889+ if (buid == uid)
49890+ return mid;
49891+ if (buid > uid)
49892+ high = mid - 1;
49893+ if (buid < uid)
49894+ low = mid + 1;
49895+ }
49896+
49897+ return -1;
49898+}
49899+
49900+static __inline__ void
49901+gr_insertsort(void)
49902+{
49903+ unsigned short i, j;
49904+ struct crash_uid index;
49905+
49906+ for (i = 1; i < uid_used; i++) {
49907+ index = uid_set[i];
49908+ j = i;
49909+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
49910+ uid_set[j] = uid_set[j - 1];
49911+ j--;
49912+ }
49913+ uid_set[j] = index;
49914+ }
49915+
49916+ return;
49917+}
49918+
49919+static __inline__ void
49920+gr_insert_uid(const uid_t uid, const unsigned long expires)
49921+{
49922+ int loc;
49923+
49924+ if (uid_used == GR_UIDTABLE_MAX)
49925+ return;
49926+
49927+ loc = gr_find_uid(uid);
49928+
49929+ if (loc >= 0) {
49930+ uid_set[loc].expires = expires;
49931+ return;
49932+ }
49933+
49934+ uid_set[uid_used].uid = uid;
49935+ uid_set[uid_used].expires = expires;
49936+ uid_used++;
49937+
49938+ gr_insertsort();
49939+
49940+ return;
49941+}
49942+
49943+void
49944+gr_remove_uid(const unsigned short loc)
49945+{
49946+ unsigned short i;
49947+
49948+ for (i = loc + 1; i < uid_used; i++)
49949+ uid_set[i - 1] = uid_set[i];
49950+
49951+ uid_used--;
49952+
49953+ return;
49954+}
49955+
49956+int
49957+gr_check_crash_uid(const uid_t uid)
49958+{
49959+ int loc;
49960+ int ret = 0;
49961+
49962+ if (unlikely(!gr_acl_is_enabled()))
49963+ return 0;
49964+
49965+ spin_lock(&gr_uid_lock);
49966+ loc = gr_find_uid(uid);
49967+
49968+ if (loc < 0)
49969+ goto out_unlock;
49970+
49971+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
49972+ gr_remove_uid(loc);
49973+ else
49974+ ret = 1;
49975+
49976+out_unlock:
49977+ spin_unlock(&gr_uid_lock);
49978+ return ret;
49979+}
49980+
49981+static __inline__ int
49982+proc_is_setxid(const struct cred *cred)
49983+{
49984+ if (cred->uid != cred->euid || cred->uid != cred->suid ||
49985+ cred->uid != cred->fsuid)
49986+ return 1;
49987+ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
49988+ cred->gid != cred->fsgid)
49989+ return 1;
49990+
49991+ return 0;
49992+}
49993+
49994+void
49995+gr_handle_crash(struct task_struct *task, const int sig)
49996+{
49997+ struct acl_subject_label *curr;
49998+ struct acl_subject_label *curr2;
49999+ struct task_struct *tsk, *tsk2;
50000+ const struct cred *cred;
50001+ const struct cred *cred2;
50002+
50003+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
50004+ return;
50005+
50006+ if (unlikely(!gr_acl_is_enabled()))
50007+ return;
50008+
50009+ curr = task->acl;
50010+
50011+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
50012+ return;
50013+
50014+ if (time_before_eq(curr->expires, get_seconds())) {
50015+ curr->expires = 0;
50016+ curr->crashes = 0;
50017+ }
50018+
50019+ curr->crashes++;
50020+
50021+ if (!curr->expires)
50022+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
50023+
50024+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
50025+ time_after(curr->expires, get_seconds())) {
50026+ rcu_read_lock();
50027+ cred = __task_cred(task);
50028+ if (cred->uid && proc_is_setxid(cred)) {
50029+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
50030+ spin_lock(&gr_uid_lock);
50031+ gr_insert_uid(cred->uid, curr->expires);
50032+ spin_unlock(&gr_uid_lock);
50033+ curr->expires = 0;
50034+ curr->crashes = 0;
50035+ read_lock(&tasklist_lock);
50036+ do_each_thread(tsk2, tsk) {
50037+ cred2 = __task_cred(tsk);
50038+ if (tsk != task && cred2->uid == cred->uid)
50039+ gr_fake_force_sig(SIGKILL, tsk);
50040+ } while_each_thread(tsk2, tsk);
50041+ read_unlock(&tasklist_lock);
50042+ } else {
50043+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
50044+ read_lock(&tasklist_lock);
50045+ do_each_thread(tsk2, tsk) {
50046+ if (likely(tsk != task)) {
50047+ curr2 = tsk->acl;
50048+
50049+ if (curr2->device == curr->device &&
50050+ curr2->inode == curr->inode)
50051+ gr_fake_force_sig(SIGKILL, tsk);
50052+ }
50053+ } while_each_thread(tsk2, tsk);
50054+ read_unlock(&tasklist_lock);
50055+ }
50056+ rcu_read_unlock();
50057+ }
50058+
50059+ return;
50060+}
50061+
50062+int
50063+gr_check_crash_exec(const struct file *filp)
50064+{
50065+ struct acl_subject_label *curr;
50066+
50067+ if (unlikely(!gr_acl_is_enabled()))
50068+ return 0;
50069+
50070+ read_lock(&gr_inode_lock);
50071+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
50072+ filp->f_path.dentry->d_inode->i_sb->s_dev,
50073+ current->role);
50074+ read_unlock(&gr_inode_lock);
50075+
50076+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
50077+ (!curr->crashes && !curr->expires))
50078+ return 0;
50079+
50080+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
50081+ time_after(curr->expires, get_seconds()))
50082+ return 1;
50083+ else if (time_before_eq(curr->expires, get_seconds())) {
50084+ curr->crashes = 0;
50085+ curr->expires = 0;
50086+ }
50087+
50088+ return 0;
50089+}
50090+
50091+void
50092+gr_handle_alertkill(struct task_struct *task)
50093+{
50094+ struct acl_subject_label *curracl;
50095+ __u32 curr_ip;
50096+ struct task_struct *p, *p2;
50097+
50098+ if (unlikely(!gr_acl_is_enabled()))
50099+ return;
50100+
50101+ curracl = task->acl;
50102+ curr_ip = task->signal->curr_ip;
50103+
50104+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
50105+ read_lock(&tasklist_lock);
50106+ do_each_thread(p2, p) {
50107+ if (p->signal->curr_ip == curr_ip)
50108+ gr_fake_force_sig(SIGKILL, p);
50109+ } while_each_thread(p2, p);
50110+ read_unlock(&tasklist_lock);
50111+ } else if (curracl->mode & GR_KILLPROC)
50112+ gr_fake_force_sig(SIGKILL, task);
50113+
50114+ return;
50115+}
50116diff -urNp linux-2.6.32.42/grsecurity/gracl_shm.c linux-2.6.32.42/grsecurity/gracl_shm.c
50117--- linux-2.6.32.42/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
50118+++ linux-2.6.32.42/grsecurity/gracl_shm.c 2011-04-17 15:56:46.000000000 -0400
50119@@ -0,0 +1,40 @@
50120+#include <linux/kernel.h>
50121+#include <linux/mm.h>
50122+#include <linux/sched.h>
50123+#include <linux/file.h>
50124+#include <linux/ipc.h>
50125+#include <linux/gracl.h>
50126+#include <linux/grsecurity.h>
50127+#include <linux/grinternal.h>
50128+
50129+int
50130+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
50131+ const time_t shm_createtime, const uid_t cuid, const int shmid)
50132+{
50133+ struct task_struct *task;
50134+
50135+ if (!gr_acl_is_enabled())
50136+ return 1;
50137+
50138+ rcu_read_lock();
50139+ read_lock(&tasklist_lock);
50140+
50141+ task = find_task_by_vpid(shm_cprid);
50142+
50143+ if (unlikely(!task))
50144+ task = find_task_by_vpid(shm_lapid);
50145+
50146+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
50147+ (task->pid == shm_lapid)) &&
50148+ (task->acl->mode & GR_PROTSHM) &&
50149+ (task->acl != current->acl))) {
50150+ read_unlock(&tasklist_lock);
50151+ rcu_read_unlock();
50152+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
50153+ return 0;
50154+ }
50155+ read_unlock(&tasklist_lock);
50156+ rcu_read_unlock();
50157+
50158+ return 1;
50159+}
50160diff -urNp linux-2.6.32.42/grsecurity/grsec_chdir.c linux-2.6.32.42/grsecurity/grsec_chdir.c
50161--- linux-2.6.32.42/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
50162+++ linux-2.6.32.42/grsecurity/grsec_chdir.c 2011-04-17 15:56:46.000000000 -0400
50163@@ -0,0 +1,19 @@
50164+#include <linux/kernel.h>
50165+#include <linux/sched.h>
50166+#include <linux/fs.h>
50167+#include <linux/file.h>
50168+#include <linux/grsecurity.h>
50169+#include <linux/grinternal.h>
50170+
50171+void
50172+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
50173+{
50174+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
50175+ if ((grsec_enable_chdir && grsec_enable_group &&
50176+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
50177+ !grsec_enable_group)) {
50178+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
50179+ }
50180+#endif
50181+ return;
50182+}
50183diff -urNp linux-2.6.32.42/grsecurity/grsec_chroot.c linux-2.6.32.42/grsecurity/grsec_chroot.c
50184--- linux-2.6.32.42/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
50185+++ linux-2.6.32.42/grsecurity/grsec_chroot.c 2011-06-20 19:44:00.000000000 -0400
50186@@ -0,0 +1,395 @@
50187+#include <linux/kernel.h>
50188+#include <linux/module.h>
50189+#include <linux/sched.h>
50190+#include <linux/file.h>
50191+#include <linux/fs.h>
50192+#include <linux/mount.h>
50193+#include <linux/types.h>
50194+#include <linux/pid_namespace.h>
50195+#include <linux/grsecurity.h>
50196+#include <linux/grinternal.h>
50197+
50198+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
50199+{
50200+#ifdef CONFIG_GRKERNSEC
50201+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
50202+ path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
50203+ task->gr_is_chrooted = 1;
50204+ else
50205+ task->gr_is_chrooted = 0;
50206+
50207+ task->gr_chroot_dentry = path->dentry;
50208+#endif
50209+ return;
50210+}
50211+
50212+void gr_clear_chroot_entries(struct task_struct *task)
50213+{
50214+#ifdef CONFIG_GRKERNSEC
50215+ task->gr_is_chrooted = 0;
50216+ task->gr_chroot_dentry = NULL;
50217+#endif
50218+ return;
50219+}
50220+
50221+int
50222+gr_handle_chroot_unix(const pid_t pid)
50223+{
50224+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
50225+ struct pid *spid = NULL;
50226+
50227+ if (unlikely(!grsec_enable_chroot_unix))
50228+ return 1;
50229+
50230+ if (likely(!proc_is_chrooted(current)))
50231+ return 1;
50232+
50233+ rcu_read_lock();
50234+ read_lock(&tasklist_lock);
50235+
50236+ spid = find_vpid(pid);
50237+ if (spid) {
50238+ struct task_struct *p;
50239+ p = pid_task(spid, PIDTYPE_PID);
50240+ if (unlikely(p && !have_same_root(current, p))) {
50241+ read_unlock(&tasklist_lock);
50242+ rcu_read_unlock();
50243+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
50244+ return 0;
50245+ }
50246+ }
50247+ read_unlock(&tasklist_lock);
50248+ rcu_read_unlock();
50249+#endif
50250+ return 1;
50251+}
50252+
50253+int
50254+gr_handle_chroot_nice(void)
50255+{
50256+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
50257+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
50258+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
50259+ return -EPERM;
50260+ }
50261+#endif
50262+ return 0;
50263+}
50264+
50265+int
50266+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
50267+{
50268+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
50269+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
50270+ && proc_is_chrooted(current)) {
50271+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
50272+ return -EACCES;
50273+ }
50274+#endif
50275+ return 0;
50276+}
50277+
50278+int
50279+gr_handle_chroot_rawio(const struct inode *inode)
50280+{
50281+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
50282+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
50283+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
50284+ return 1;
50285+#endif
50286+ return 0;
50287+}
50288+
50289+int
50290+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
50291+{
50292+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
50293+ struct task_struct *p;
50294+ int ret = 0;
50295+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
50296+ return ret;
50297+
50298+ read_lock(&tasklist_lock);
50299+ do_each_pid_task(pid, type, p) {
50300+ if (!have_same_root(current, p)) {
50301+ ret = 1;
50302+ goto out;
50303+ }
50304+ } while_each_pid_task(pid, type, p);
50305+out:
50306+ read_unlock(&tasklist_lock);
50307+ return ret;
50308+#endif
50309+ return 0;
50310+}
50311+
50312+int
50313+gr_pid_is_chrooted(struct task_struct *p)
50314+{
50315+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
50316+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
50317+ return 0;
50318+
50319+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
50320+ !have_same_root(current, p)) {
50321+ return 1;
50322+ }
50323+#endif
50324+ return 0;
50325+}
50326+
50327+EXPORT_SYMBOL(gr_pid_is_chrooted);
50328+
50329+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
50330+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
50331+{
50332+ struct dentry *dentry = (struct dentry *)u_dentry;
50333+ struct vfsmount *mnt = (struct vfsmount *)u_mnt;
50334+ struct dentry *realroot;
50335+ struct vfsmount *realrootmnt;
50336+ struct dentry *currentroot;
50337+ struct vfsmount *currentmnt;
50338+ struct task_struct *reaper = &init_task;
50339+ int ret = 1;
50340+
50341+ read_lock(&reaper->fs->lock);
50342+ realrootmnt = mntget(reaper->fs->root.mnt);
50343+ realroot = dget(reaper->fs->root.dentry);
50344+ read_unlock(&reaper->fs->lock);
50345+
50346+ read_lock(&current->fs->lock);
50347+ currentmnt = mntget(current->fs->root.mnt);
50348+ currentroot = dget(current->fs->root.dentry);
50349+ read_unlock(&current->fs->lock);
50350+
50351+ spin_lock(&dcache_lock);
50352+ for (;;) {
50353+ if (unlikely((dentry == realroot && mnt == realrootmnt)
50354+ || (dentry == currentroot && mnt == currentmnt)))
50355+ break;
50356+ if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) {
50357+ if (mnt->mnt_parent == mnt)
50358+ break;
50359+ dentry = mnt->mnt_mountpoint;
50360+ mnt = mnt->mnt_parent;
50361+ continue;
50362+ }
50363+ dentry = dentry->d_parent;
50364+ }
50365+ spin_unlock(&dcache_lock);
50366+
50367+ dput(currentroot);
50368+ mntput(currentmnt);
50369+
50370+ /* access is outside of chroot */
50371+ if (dentry == realroot && mnt == realrootmnt)
50372+ ret = 0;
50373+
50374+ dput(realroot);
50375+ mntput(realrootmnt);
50376+ return ret;
50377+}
50378+#endif
50379+
50380+int
50381+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
50382+{
50383+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
50384+ if (!grsec_enable_chroot_fchdir)
50385+ return 1;
50386+
50387+ if (!proc_is_chrooted(current))
50388+ return 1;
50389+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
50390+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
50391+ return 0;
50392+ }
50393+#endif
50394+ return 1;
50395+}
50396+
50397+int
50398+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
50399+ const time_t shm_createtime)
50400+{
50401+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
50402+ struct pid *pid = NULL;
50403+ time_t starttime;
50404+
50405+ if (unlikely(!grsec_enable_chroot_shmat))
50406+ return 1;
50407+
50408+ if (likely(!proc_is_chrooted(current)))
50409+ return 1;
50410+
50411+ rcu_read_lock();
50412+ read_lock(&tasklist_lock);
50413+
50414+ pid = find_vpid(shm_cprid);
50415+ if (pid) {
50416+ struct task_struct *p;
50417+ p = pid_task(pid, PIDTYPE_PID);
50418+ if (p == NULL)
50419+ goto unlock;
50420+ starttime = p->start_time.tv_sec;
50421+ if (unlikely(!have_same_root(current, p) &&
50422+ time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime))) {
50423+ read_unlock(&tasklist_lock);
50424+ rcu_read_unlock();
50425+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
50426+ return 0;
50427+ }
50428+ } else {
50429+ pid = find_vpid(shm_lapid);
50430+ if (pid) {
50431+ struct task_struct *p;
50432+ p = pid_task(pid, PIDTYPE_PID);
50433+ if (p == NULL)
50434+ goto unlock;
50435+ if (unlikely(!have_same_root(current, p))) {
50436+ read_unlock(&tasklist_lock);
50437+ rcu_read_unlock();
50438+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
50439+ return 0;
50440+ }
50441+ }
50442+ }
50443+
50444+unlock:
50445+ read_unlock(&tasklist_lock);
50446+ rcu_read_unlock();
50447+#endif
50448+ return 1;
50449+}
50450+
50451+void
50452+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
50453+{
50454+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
50455+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
50456+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
50457+#endif
50458+ return;
50459+}
50460+
50461+int
50462+gr_handle_chroot_mknod(const struct dentry *dentry,
50463+ const struct vfsmount *mnt, const int mode)
50464+{
50465+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
50466+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
50467+ proc_is_chrooted(current)) {
50468+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
50469+ return -EPERM;
50470+ }
50471+#endif
50472+ return 0;
50473+}
50474+
50475+int
50476+gr_handle_chroot_mount(const struct dentry *dentry,
50477+ const struct vfsmount *mnt, const char *dev_name)
50478+{
50479+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
50480+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
50481+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none" , dentry, mnt);
50482+ return -EPERM;
50483+ }
50484+#endif
50485+ return 0;
50486+}
50487+
50488+int
50489+gr_handle_chroot_pivot(void)
50490+{
50491+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
50492+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
50493+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
50494+ return -EPERM;
50495+ }
50496+#endif
50497+ return 0;
50498+}
50499+
50500+int
50501+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
50502+{
50503+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
50504+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
50505+ !gr_is_outside_chroot(dentry, mnt)) {
50506+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
50507+ return -EPERM;
50508+ }
50509+#endif
50510+ return 0;
50511+}
50512+
50513+int
50514+gr_handle_chroot_caps(struct path *path)
50515+{
50516+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
50517+ if (grsec_enable_chroot_caps && current->pid > 1 && current->fs != NULL &&
50518+ (init_task.fs->root.dentry != path->dentry) &&
50519+ (current->nsproxy->mnt_ns->root->mnt_root != path->dentry)) {
50520+
50521+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
50522+ const struct cred *old = current_cred();
50523+ struct cred *new = prepare_creds();
50524+ if (new == NULL)
50525+ return 1;
50526+
50527+ new->cap_permitted = cap_drop(old->cap_permitted,
50528+ chroot_caps);
50529+ new->cap_inheritable = cap_drop(old->cap_inheritable,
50530+ chroot_caps);
50531+ new->cap_effective = cap_drop(old->cap_effective,
50532+ chroot_caps);
50533+
50534+ commit_creds(new);
50535+
50536+ return 0;
50537+ }
50538+#endif
50539+ return 0;
50540+}
50541+
50542+int
50543+gr_handle_chroot_sysctl(const int op)
50544+{
50545+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
50546+ if (grsec_enable_chroot_sysctl && proc_is_chrooted(current)
50547+ && (op & MAY_WRITE))
50548+ return -EACCES;
50549+#endif
50550+ return 0;
50551+}
50552+
50553+void
50554+gr_handle_chroot_chdir(struct path *path)
50555+{
50556+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
50557+ if (grsec_enable_chroot_chdir)
50558+ set_fs_pwd(current->fs, path);
50559+#endif
50560+ return;
50561+}
50562+
50563+int
50564+gr_handle_chroot_chmod(const struct dentry *dentry,
50565+ const struct vfsmount *mnt, const int mode)
50566+{
50567+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
50568+ /* allow chmod +s on directories, but not on files */
50569+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
50570+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
50571+ proc_is_chrooted(current)) {
50572+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
50573+ return -EPERM;
50574+ }
50575+#endif
50576+ return 0;
50577+}
50578+
50579+#ifdef CONFIG_SECURITY
50580+EXPORT_SYMBOL(gr_handle_chroot_caps);
50581+#endif
50582diff -urNp linux-2.6.32.42/grsecurity/grsec_disabled.c linux-2.6.32.42/grsecurity/grsec_disabled.c
50583--- linux-2.6.32.42/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
50584+++ linux-2.6.32.42/grsecurity/grsec_disabled.c 2011-04-17 15:56:46.000000000 -0400
50585@@ -0,0 +1,447 @@
50586+#include <linux/kernel.h>
50587+#include <linux/module.h>
50588+#include <linux/sched.h>
50589+#include <linux/file.h>
50590+#include <linux/fs.h>
50591+#include <linux/kdev_t.h>
50592+#include <linux/net.h>
50593+#include <linux/in.h>
50594+#include <linux/ip.h>
50595+#include <linux/skbuff.h>
50596+#include <linux/sysctl.h>
50597+
50598+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
50599+void
50600+pax_set_initial_flags(struct linux_binprm *bprm)
50601+{
50602+ return;
50603+}
50604+#endif
50605+
50606+#ifdef CONFIG_SYSCTL
50607+__u32
50608+gr_handle_sysctl(const struct ctl_table * table, const int op)
50609+{
50610+ return 0;
50611+}
50612+#endif
50613+
50614+#ifdef CONFIG_TASKSTATS
50615+int gr_is_taskstats_denied(int pid)
50616+{
50617+ return 0;
50618+}
50619+#endif
50620+
50621+int
50622+gr_acl_is_enabled(void)
50623+{
50624+ return 0;
50625+}
50626+
50627+int
50628+gr_handle_rawio(const struct inode *inode)
50629+{
50630+ return 0;
50631+}
50632+
50633+void
50634+gr_acl_handle_psacct(struct task_struct *task, const long code)
50635+{
50636+ return;
50637+}
50638+
50639+int
50640+gr_handle_ptrace(struct task_struct *task, const long request)
50641+{
50642+ return 0;
50643+}
50644+
50645+int
50646+gr_handle_proc_ptrace(struct task_struct *task)
50647+{
50648+ return 0;
50649+}
50650+
50651+void
50652+gr_learn_resource(const struct task_struct *task,
50653+ const int res, const unsigned long wanted, const int gt)
50654+{
50655+ return;
50656+}
50657+
50658+int
50659+gr_set_acls(const int type)
50660+{
50661+ return 0;
50662+}
50663+
50664+int
50665+gr_check_hidden_task(const struct task_struct *tsk)
50666+{
50667+ return 0;
50668+}
50669+
50670+int
50671+gr_check_protected_task(const struct task_struct *task)
50672+{
50673+ return 0;
50674+}
50675+
50676+int
50677+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
50678+{
50679+ return 0;
50680+}
50681+
50682+void
50683+gr_copy_label(struct task_struct *tsk)
50684+{
50685+ return;
50686+}
50687+
50688+void
50689+gr_set_pax_flags(struct task_struct *task)
50690+{
50691+ return;
50692+}
50693+
50694+int
50695+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
50696+ const int unsafe_share)
50697+{
50698+ return 0;
50699+}
50700+
50701+void
50702+gr_handle_delete(const ino_t ino, const dev_t dev)
50703+{
50704+ return;
50705+}
50706+
50707+void
50708+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
50709+{
50710+ return;
50711+}
50712+
50713+void
50714+gr_handle_crash(struct task_struct *task, const int sig)
50715+{
50716+ return;
50717+}
50718+
50719+int
50720+gr_check_crash_exec(const struct file *filp)
50721+{
50722+ return 0;
50723+}
50724+
50725+int
50726+gr_check_crash_uid(const uid_t uid)
50727+{
50728+ return 0;
50729+}
50730+
50731+void
50732+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
50733+ struct dentry *old_dentry,
50734+ struct dentry *new_dentry,
50735+ struct vfsmount *mnt, const __u8 replace)
50736+{
50737+ return;
50738+}
50739+
50740+int
50741+gr_search_socket(const int family, const int type, const int protocol)
50742+{
50743+ return 1;
50744+}
50745+
50746+int
50747+gr_search_connectbind(const int mode, const struct socket *sock,
50748+ const struct sockaddr_in *addr)
50749+{
50750+ return 0;
50751+}
50752+
50753+int
50754+gr_is_capable(const int cap)
50755+{
50756+ return 1;
50757+}
50758+
50759+int
50760+gr_is_capable_nolog(const int cap)
50761+{
50762+ return 1;
50763+}
50764+
50765+void
50766+gr_handle_alertkill(struct task_struct *task)
50767+{
50768+ return;
50769+}
50770+
50771+__u32
50772+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
50773+{
50774+ return 1;
50775+}
50776+
50777+__u32
50778+gr_acl_handle_hidden_file(const struct dentry * dentry,
50779+ const struct vfsmount * mnt)
50780+{
50781+ return 1;
50782+}
50783+
50784+__u32
50785+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
50786+ const int fmode)
50787+{
50788+ return 1;
50789+}
50790+
50791+__u32
50792+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
50793+{
50794+ return 1;
50795+}
50796+
50797+__u32
50798+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
50799+{
50800+ return 1;
50801+}
50802+
50803+int
50804+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
50805+ unsigned int *vm_flags)
50806+{
50807+ return 1;
50808+}
50809+
50810+__u32
50811+gr_acl_handle_truncate(const struct dentry * dentry,
50812+ const struct vfsmount * mnt)
50813+{
50814+ return 1;
50815+}
50816+
50817+__u32
50818+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
50819+{
50820+ return 1;
50821+}
50822+
50823+__u32
50824+gr_acl_handle_access(const struct dentry * dentry,
50825+ const struct vfsmount * mnt, const int fmode)
50826+{
50827+ return 1;
50828+}
50829+
50830+__u32
50831+gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
50832+ mode_t mode)
50833+{
50834+ return 1;
50835+}
50836+
50837+__u32
50838+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
50839+ mode_t mode)
50840+{
50841+ return 1;
50842+}
50843+
50844+__u32
50845+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
50846+{
50847+ return 1;
50848+}
50849+
50850+__u32
50851+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
50852+{
50853+ return 1;
50854+}
50855+
50856+void
50857+grsecurity_init(void)
50858+{
50859+ return;
50860+}
50861+
50862+__u32
50863+gr_acl_handle_mknod(const struct dentry * new_dentry,
50864+ const struct dentry * parent_dentry,
50865+ const struct vfsmount * parent_mnt,
50866+ const int mode)
50867+{
50868+ return 1;
50869+}
50870+
50871+__u32
50872+gr_acl_handle_mkdir(const struct dentry * new_dentry,
50873+ const struct dentry * parent_dentry,
50874+ const struct vfsmount * parent_mnt)
50875+{
50876+ return 1;
50877+}
50878+
50879+__u32
50880+gr_acl_handle_symlink(const struct dentry * new_dentry,
50881+ const struct dentry * parent_dentry,
50882+ const struct vfsmount * parent_mnt, const char *from)
50883+{
50884+ return 1;
50885+}
50886+
50887+__u32
50888+gr_acl_handle_link(const struct dentry * new_dentry,
50889+ const struct dentry * parent_dentry,
50890+ const struct vfsmount * parent_mnt,
50891+ const struct dentry * old_dentry,
50892+ const struct vfsmount * old_mnt, const char *to)
50893+{
50894+ return 1;
50895+}
50896+
50897+int
50898+gr_acl_handle_rename(const struct dentry *new_dentry,
50899+ const struct dentry *parent_dentry,
50900+ const struct vfsmount *parent_mnt,
50901+ const struct dentry *old_dentry,
50902+ const struct inode *old_parent_inode,
50903+ const struct vfsmount *old_mnt, const char *newname)
50904+{
50905+ return 0;
50906+}
50907+
50908+int
50909+gr_acl_handle_filldir(const struct file *file, const char *name,
50910+ const int namelen, const ino_t ino)
50911+{
50912+ return 1;
50913+}
50914+
50915+int
50916+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
50917+ const time_t shm_createtime, const uid_t cuid, const int shmid)
50918+{
50919+ return 1;
50920+}
50921+
50922+int
50923+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
50924+{
50925+ return 0;
50926+}
50927+
50928+int
50929+gr_search_accept(const struct socket *sock)
50930+{
50931+ return 0;
50932+}
50933+
50934+int
50935+gr_search_listen(const struct socket *sock)
50936+{
50937+ return 0;
50938+}
50939+
50940+int
50941+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
50942+{
50943+ return 0;
50944+}
50945+
50946+__u32
50947+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
50948+{
50949+ return 1;
50950+}
50951+
50952+__u32
50953+gr_acl_handle_creat(const struct dentry * dentry,
50954+ const struct dentry * p_dentry,
50955+ const struct vfsmount * p_mnt, const int fmode,
50956+ const int imode)
50957+{
50958+ return 1;
50959+}
50960+
50961+void
50962+gr_acl_handle_exit(void)
50963+{
50964+ return;
50965+}
50966+
50967+int
50968+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
50969+{
50970+ return 1;
50971+}
50972+
50973+void
50974+gr_set_role_label(const uid_t uid, const gid_t gid)
50975+{
50976+ return;
50977+}
50978+
50979+int
50980+gr_acl_handle_procpidmem(const struct task_struct *task)
50981+{
50982+ return 0;
50983+}
50984+
50985+int
50986+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
50987+{
50988+ return 0;
50989+}
50990+
50991+int
50992+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
50993+{
50994+ return 0;
50995+}
50996+
50997+void
50998+gr_set_kernel_label(struct task_struct *task)
50999+{
51000+ return;
51001+}
51002+
51003+int
51004+gr_check_user_change(int real, int effective, int fs)
51005+{
51006+ return 0;
51007+}
51008+
51009+int
51010+gr_check_group_change(int real, int effective, int fs)
51011+{
51012+ return 0;
51013+}
51014+
51015+int gr_acl_enable_at_secure(void)
51016+{
51017+ return 0;
51018+}
51019+
51020+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
51021+{
51022+ return dentry->d_inode->i_sb->s_dev;
51023+}
51024+
51025+EXPORT_SYMBOL(gr_is_capable);
51026+EXPORT_SYMBOL(gr_is_capable_nolog);
51027+EXPORT_SYMBOL(gr_learn_resource);
51028+EXPORT_SYMBOL(gr_set_kernel_label);
51029+#ifdef CONFIG_SECURITY
51030+EXPORT_SYMBOL(gr_check_user_change);
51031+EXPORT_SYMBOL(gr_check_group_change);
51032+#endif
51033diff -urNp linux-2.6.32.42/grsecurity/grsec_exec.c linux-2.6.32.42/grsecurity/grsec_exec.c
51034--- linux-2.6.32.42/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
51035+++ linux-2.6.32.42/grsecurity/grsec_exec.c 2011-04-17 15:56:46.000000000 -0400
51036@@ -0,0 +1,148 @@
51037+#include <linux/kernel.h>
51038+#include <linux/sched.h>
51039+#include <linux/file.h>
51040+#include <linux/binfmts.h>
51041+#include <linux/smp_lock.h>
51042+#include <linux/fs.h>
51043+#include <linux/types.h>
51044+#include <linux/grdefs.h>
51045+#include <linux/grinternal.h>
51046+#include <linux/capability.h>
51047+#include <linux/compat.h>
51048+
51049+#include <asm/uaccess.h>
51050+
51051+#ifdef CONFIG_GRKERNSEC_EXECLOG
51052+static char gr_exec_arg_buf[132];
51053+static DEFINE_MUTEX(gr_exec_arg_mutex);
51054+#endif
51055+
51056+int
51057+gr_handle_nproc(void)
51058+{
51059+#ifdef CONFIG_GRKERNSEC_EXECVE
51060+ const struct cred *cred = current_cred();
51061+ if (grsec_enable_execve && cred->user &&
51062+ (atomic_read(&cred->user->processes) >
51063+ current->signal->rlim[RLIMIT_NPROC].rlim_cur) &&
51064+ !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE)) {
51065+ gr_log_noargs(GR_DONT_AUDIT, GR_NPROC_MSG);
51066+ return -EAGAIN;
51067+ }
51068+#endif
51069+ return 0;
51070+}
51071+
51072+void
51073+gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv)
51074+{
51075+#ifdef CONFIG_GRKERNSEC_EXECLOG
51076+ char *grarg = gr_exec_arg_buf;
51077+ unsigned int i, x, execlen = 0;
51078+ char c;
51079+
51080+ if (!((grsec_enable_execlog && grsec_enable_group &&
51081+ in_group_p(grsec_audit_gid))
51082+ || (grsec_enable_execlog && !grsec_enable_group)))
51083+ return;
51084+
51085+ mutex_lock(&gr_exec_arg_mutex);
51086+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
51087+
51088+ if (unlikely(argv == NULL))
51089+ goto log;
51090+
51091+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
51092+ const char __user *p;
51093+ unsigned int len;
51094+
51095+ if (copy_from_user(&p, argv + i, sizeof(p)))
51096+ goto log;
51097+ if (!p)
51098+ goto log;
51099+ len = strnlen_user(p, 128 - execlen);
51100+ if (len > 128 - execlen)
51101+ len = 128 - execlen;
51102+ else if (len > 0)
51103+ len--;
51104+ if (copy_from_user(grarg + execlen, p, len))
51105+ goto log;
51106+
51107+ /* rewrite unprintable characters */
51108+ for (x = 0; x < len; x++) {
51109+ c = *(grarg + execlen + x);
51110+ if (c < 32 || c > 126)
51111+ *(grarg + execlen + x) = ' ';
51112+ }
51113+
51114+ execlen += len;
51115+ *(grarg + execlen) = ' ';
51116+ *(grarg + execlen + 1) = '\0';
51117+ execlen++;
51118+ }
51119+
51120+ log:
51121+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
51122+ bprm->file->f_path.mnt, grarg);
51123+ mutex_unlock(&gr_exec_arg_mutex);
51124+#endif
51125+ return;
51126+}
51127+
51128+#ifdef CONFIG_COMPAT
51129+void
51130+gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv)
51131+{
51132+#ifdef CONFIG_GRKERNSEC_EXECLOG
51133+ char *grarg = gr_exec_arg_buf;
51134+ unsigned int i, x, execlen = 0;
51135+ char c;
51136+
51137+ if (!((grsec_enable_execlog && grsec_enable_group &&
51138+ in_group_p(grsec_audit_gid))
51139+ || (grsec_enable_execlog && !grsec_enable_group)))
51140+ return;
51141+
51142+ mutex_lock(&gr_exec_arg_mutex);
51143+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
51144+
51145+ if (unlikely(argv == NULL))
51146+ goto log;
51147+
51148+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
51149+ compat_uptr_t p;
51150+ unsigned int len;
51151+
51152+ if (get_user(p, argv + i))
51153+ goto log;
51154+ len = strnlen_user(compat_ptr(p), 128 - execlen);
51155+ if (len > 128 - execlen)
51156+ len = 128 - execlen;
51157+ else if (len > 0)
51158+ len--;
51159+ else
51160+ goto log;
51161+ if (copy_from_user(grarg + execlen, compat_ptr(p), len))
51162+ goto log;
51163+
51164+ /* rewrite unprintable characters */
51165+ for (x = 0; x < len; x++) {
51166+ c = *(grarg + execlen + x);
51167+ if (c < 32 || c > 126)
51168+ *(grarg + execlen + x) = ' ';
51169+ }
51170+
51171+ execlen += len;
51172+ *(grarg + execlen) = ' ';
51173+ *(grarg + execlen + 1) = '\0';
51174+ execlen++;
51175+ }
51176+
51177+ log:
51178+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
51179+ bprm->file->f_path.mnt, grarg);
51180+ mutex_unlock(&gr_exec_arg_mutex);
51181+#endif
51182+ return;
51183+}
51184+#endif
51185diff -urNp linux-2.6.32.42/grsecurity/grsec_fifo.c linux-2.6.32.42/grsecurity/grsec_fifo.c
51186--- linux-2.6.32.42/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
51187+++ linux-2.6.32.42/grsecurity/grsec_fifo.c 2011-04-17 15:56:46.000000000 -0400
51188@@ -0,0 +1,24 @@
51189+#include <linux/kernel.h>
51190+#include <linux/sched.h>
51191+#include <linux/fs.h>
51192+#include <linux/file.h>
51193+#include <linux/grinternal.h>
51194+
51195+int
51196+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
51197+ const struct dentry *dir, const int flag, const int acc_mode)
51198+{
51199+#ifdef CONFIG_GRKERNSEC_FIFO
51200+ const struct cred *cred = current_cred();
51201+
51202+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
51203+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
51204+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
51205+ (cred->fsuid != dentry->d_inode->i_uid)) {
51206+ if (!inode_permission(dentry->d_inode, acc_mode))
51207+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
51208+ return -EACCES;
51209+ }
51210+#endif
51211+ return 0;
51212+}
51213diff -urNp linux-2.6.32.42/grsecurity/grsec_fork.c linux-2.6.32.42/grsecurity/grsec_fork.c
51214--- linux-2.6.32.42/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
51215+++ linux-2.6.32.42/grsecurity/grsec_fork.c 2011-04-17 15:56:46.000000000 -0400
51216@@ -0,0 +1,23 @@
51217+#include <linux/kernel.h>
51218+#include <linux/sched.h>
51219+#include <linux/grsecurity.h>
51220+#include <linux/grinternal.h>
51221+#include <linux/errno.h>
51222+
51223+void
51224+gr_log_forkfail(const int retval)
51225+{
51226+#ifdef CONFIG_GRKERNSEC_FORKFAIL
51227+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
51228+ switch (retval) {
51229+ case -EAGAIN:
51230+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
51231+ break;
51232+ case -ENOMEM:
51233+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
51234+ break;
51235+ }
51236+ }
51237+#endif
51238+ return;
51239+}
51240diff -urNp linux-2.6.32.42/grsecurity/grsec_init.c linux-2.6.32.42/grsecurity/grsec_init.c
51241--- linux-2.6.32.42/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
51242+++ linux-2.6.32.42/grsecurity/grsec_init.c 2011-06-29 19:35:26.000000000 -0400
51243@@ -0,0 +1,274 @@
51244+#include <linux/kernel.h>
51245+#include <linux/sched.h>
51246+#include <linux/mm.h>
51247+#include <linux/smp_lock.h>
51248+#include <linux/gracl.h>
51249+#include <linux/slab.h>
51250+#include <linux/vmalloc.h>
51251+#include <linux/percpu.h>
51252+#include <linux/module.h>
51253+
51254+int grsec_enable_brute;
51255+int grsec_enable_link;
51256+int grsec_enable_dmesg;
51257+int grsec_enable_harden_ptrace;
51258+int grsec_enable_fifo;
51259+int grsec_enable_execve;
51260+int grsec_enable_execlog;
51261+int grsec_enable_signal;
51262+int grsec_enable_forkfail;
51263+int grsec_enable_audit_ptrace;
51264+int grsec_enable_time;
51265+int grsec_enable_audit_textrel;
51266+int grsec_enable_group;
51267+int grsec_audit_gid;
51268+int grsec_enable_chdir;
51269+int grsec_enable_mount;
51270+int grsec_enable_rofs;
51271+int grsec_enable_chroot_findtask;
51272+int grsec_enable_chroot_mount;
51273+int grsec_enable_chroot_shmat;
51274+int grsec_enable_chroot_fchdir;
51275+int grsec_enable_chroot_double;
51276+int grsec_enable_chroot_pivot;
51277+int grsec_enable_chroot_chdir;
51278+int grsec_enable_chroot_chmod;
51279+int grsec_enable_chroot_mknod;
51280+int grsec_enable_chroot_nice;
51281+int grsec_enable_chroot_execlog;
51282+int grsec_enable_chroot_caps;
51283+int grsec_enable_chroot_sysctl;
51284+int grsec_enable_chroot_unix;
51285+int grsec_enable_tpe;
51286+int grsec_tpe_gid;
51287+int grsec_enable_blackhole;
51288+#ifdef CONFIG_IPV6_MODULE
51289+EXPORT_SYMBOL(grsec_enable_blackhole);
51290+#endif
51291+int grsec_lastack_retries;
51292+int grsec_enable_tpe_all;
51293+int grsec_enable_tpe_invert;
51294+int grsec_enable_socket_all;
51295+int grsec_socket_all_gid;
51296+int grsec_enable_socket_client;
51297+int grsec_socket_client_gid;
51298+int grsec_enable_socket_server;
51299+int grsec_socket_server_gid;
51300+int grsec_resource_logging;
51301+int grsec_disable_privio;
51302+int grsec_enable_log_rwxmaps;
51303+int grsec_lock;
51304+
51305+DEFINE_SPINLOCK(grsec_alert_lock);
51306+unsigned long grsec_alert_wtime = 0;
51307+unsigned long grsec_alert_fyet = 0;
51308+
51309+DEFINE_SPINLOCK(grsec_audit_lock);
51310+
51311+DEFINE_RWLOCK(grsec_exec_file_lock);
51312+
51313+char *gr_shared_page[4];
51314+
51315+char *gr_alert_log_fmt;
51316+char *gr_audit_log_fmt;
51317+char *gr_alert_log_buf;
51318+char *gr_audit_log_buf;
51319+
51320+extern struct gr_arg *gr_usermode;
51321+extern unsigned char *gr_system_salt;
51322+extern unsigned char *gr_system_sum;
51323+
51324+void __init
51325+grsecurity_init(void)
51326+{
51327+ int j;
51328+ /* create the per-cpu shared pages */
51329+
51330+#ifdef CONFIG_X86
51331+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
51332+#endif
51333+
51334+ for (j = 0; j < 4; j++) {
51335+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
51336+ if (gr_shared_page[j] == NULL) {
51337+ panic("Unable to allocate grsecurity shared page");
51338+ return;
51339+ }
51340+ }
51341+
51342+ /* allocate log buffers */
51343+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
51344+ if (!gr_alert_log_fmt) {
51345+ panic("Unable to allocate grsecurity alert log format buffer");
51346+ return;
51347+ }
51348+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
51349+ if (!gr_audit_log_fmt) {
51350+ panic("Unable to allocate grsecurity audit log format buffer");
51351+ return;
51352+ }
51353+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
51354+ if (!gr_alert_log_buf) {
51355+ panic("Unable to allocate grsecurity alert log buffer");
51356+ return;
51357+ }
51358+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
51359+ if (!gr_audit_log_buf) {
51360+ panic("Unable to allocate grsecurity audit log buffer");
51361+ return;
51362+ }
51363+
51364+ /* allocate memory for authentication structure */
51365+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
51366+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
51367+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
51368+
51369+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
51370+ panic("Unable to allocate grsecurity authentication structure");
51371+ return;
51372+ }
51373+
51374+
51375+#ifdef CONFIG_GRKERNSEC_IO
51376+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
51377+ grsec_disable_privio = 1;
51378+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
51379+ grsec_disable_privio = 1;
51380+#else
51381+ grsec_disable_privio = 0;
51382+#endif
51383+#endif
51384+
51385+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
51386+ /* for backward compatibility, tpe_invert always defaults to on if
51387+ enabled in the kernel
51388+ */
51389+ grsec_enable_tpe_invert = 1;
51390+#endif
51391+
51392+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
51393+#ifndef CONFIG_GRKERNSEC_SYSCTL
51394+ grsec_lock = 1;
51395+#endif
51396+
51397+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
51398+ grsec_enable_audit_textrel = 1;
51399+#endif
51400+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
51401+ grsec_enable_log_rwxmaps = 1;
51402+#endif
51403+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
51404+ grsec_enable_group = 1;
51405+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
51406+#endif
51407+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
51408+ grsec_enable_chdir = 1;
51409+#endif
51410+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
51411+ grsec_enable_harden_ptrace = 1;
51412+#endif
51413+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
51414+ grsec_enable_mount = 1;
51415+#endif
51416+#ifdef CONFIG_GRKERNSEC_LINK
51417+ grsec_enable_link = 1;
51418+#endif
51419+#ifdef CONFIG_GRKERNSEC_BRUTE
51420+ grsec_enable_brute = 1;
51421+#endif
51422+#ifdef CONFIG_GRKERNSEC_DMESG
51423+ grsec_enable_dmesg = 1;
51424+#endif
51425+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
51426+ grsec_enable_blackhole = 1;
51427+ grsec_lastack_retries = 4;
51428+#endif
51429+#ifdef CONFIG_GRKERNSEC_FIFO
51430+ grsec_enable_fifo = 1;
51431+#endif
51432+#ifdef CONFIG_GRKERNSEC_EXECVE
51433+ grsec_enable_execve = 1;
51434+#endif
51435+#ifdef CONFIG_GRKERNSEC_EXECLOG
51436+ grsec_enable_execlog = 1;
51437+#endif
51438+#ifdef CONFIG_GRKERNSEC_SIGNAL
51439+ grsec_enable_signal = 1;
51440+#endif
51441+#ifdef CONFIG_GRKERNSEC_FORKFAIL
51442+ grsec_enable_forkfail = 1;
51443+#endif
51444+#ifdef CONFIG_GRKERNSEC_TIME
51445+ grsec_enable_time = 1;
51446+#endif
51447+#ifdef CONFIG_GRKERNSEC_RESLOG
51448+ grsec_resource_logging = 1;
51449+#endif
51450+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
51451+ grsec_enable_chroot_findtask = 1;
51452+#endif
51453+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
51454+ grsec_enable_chroot_unix = 1;
51455+#endif
51456+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
51457+ grsec_enable_chroot_mount = 1;
51458+#endif
51459+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
51460+ grsec_enable_chroot_fchdir = 1;
51461+#endif
51462+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
51463+ grsec_enable_chroot_shmat = 1;
51464+#endif
51465+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
51466+ grsec_enable_audit_ptrace = 1;
51467+#endif
51468+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
51469+ grsec_enable_chroot_double = 1;
51470+#endif
51471+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
51472+ grsec_enable_chroot_pivot = 1;
51473+#endif
51474+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
51475+ grsec_enable_chroot_chdir = 1;
51476+#endif
51477+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
51478+ grsec_enable_chroot_chmod = 1;
51479+#endif
51480+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
51481+ grsec_enable_chroot_mknod = 1;
51482+#endif
51483+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
51484+ grsec_enable_chroot_nice = 1;
51485+#endif
51486+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
51487+ grsec_enable_chroot_execlog = 1;
51488+#endif
51489+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
51490+ grsec_enable_chroot_caps = 1;
51491+#endif
51492+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
51493+ grsec_enable_chroot_sysctl = 1;
51494+#endif
51495+#ifdef CONFIG_GRKERNSEC_TPE
51496+ grsec_enable_tpe = 1;
51497+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
51498+#ifdef CONFIG_GRKERNSEC_TPE_ALL
51499+ grsec_enable_tpe_all = 1;
51500+#endif
51501+#endif
51502+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
51503+ grsec_enable_socket_all = 1;
51504+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
51505+#endif
51506+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
51507+ grsec_enable_socket_client = 1;
51508+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
51509+#endif
51510+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
51511+ grsec_enable_socket_server = 1;
51512+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
51513+#endif
51514+#endif
51515+
51516+ return;
51517+}
51518diff -urNp linux-2.6.32.42/grsecurity/grsec_link.c linux-2.6.32.42/grsecurity/grsec_link.c
51519--- linux-2.6.32.42/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
51520+++ linux-2.6.32.42/grsecurity/grsec_link.c 2011-04-17 15:56:46.000000000 -0400
51521@@ -0,0 +1,43 @@
51522+#include <linux/kernel.h>
51523+#include <linux/sched.h>
51524+#include <linux/fs.h>
51525+#include <linux/file.h>
51526+#include <linux/grinternal.h>
51527+
51528+int
51529+gr_handle_follow_link(const struct inode *parent,
51530+ const struct inode *inode,
51531+ const struct dentry *dentry, const struct vfsmount *mnt)
51532+{
51533+#ifdef CONFIG_GRKERNSEC_LINK
51534+ const struct cred *cred = current_cred();
51535+
51536+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
51537+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
51538+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
51539+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
51540+ return -EACCES;
51541+ }
51542+#endif
51543+ return 0;
51544+}
51545+
51546+int
51547+gr_handle_hardlink(const struct dentry *dentry,
51548+ const struct vfsmount *mnt,
51549+ struct inode *inode, const int mode, const char *to)
51550+{
51551+#ifdef CONFIG_GRKERNSEC_LINK
51552+ const struct cred *cred = current_cred();
51553+
51554+ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
51555+ (!S_ISREG(mode) || (mode & S_ISUID) ||
51556+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
51557+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
51558+ !capable(CAP_FOWNER) && cred->uid) {
51559+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
51560+ return -EPERM;
51561+ }
51562+#endif
51563+ return 0;
51564+}
51565diff -urNp linux-2.6.32.42/grsecurity/grsec_log.c linux-2.6.32.42/grsecurity/grsec_log.c
51566--- linux-2.6.32.42/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
51567+++ linux-2.6.32.42/grsecurity/grsec_log.c 2011-05-10 21:58:49.000000000 -0400
51568@@ -0,0 +1,310 @@
51569+#include <linux/kernel.h>
51570+#include <linux/sched.h>
51571+#include <linux/file.h>
51572+#include <linux/tty.h>
51573+#include <linux/fs.h>
51574+#include <linux/grinternal.h>
51575+
51576+#ifdef CONFIG_TREE_PREEMPT_RCU
51577+#define DISABLE_PREEMPT() preempt_disable()
51578+#define ENABLE_PREEMPT() preempt_enable()
51579+#else
51580+#define DISABLE_PREEMPT()
51581+#define ENABLE_PREEMPT()
51582+#endif
51583+
51584+#define BEGIN_LOCKS(x) \
51585+ DISABLE_PREEMPT(); \
51586+ rcu_read_lock(); \
51587+ read_lock(&tasklist_lock); \
51588+ read_lock(&grsec_exec_file_lock); \
51589+ if (x != GR_DO_AUDIT) \
51590+ spin_lock(&grsec_alert_lock); \
51591+ else \
51592+ spin_lock(&grsec_audit_lock)
51593+
51594+#define END_LOCKS(x) \
51595+ if (x != GR_DO_AUDIT) \
51596+ spin_unlock(&grsec_alert_lock); \
51597+ else \
51598+ spin_unlock(&grsec_audit_lock); \
51599+ read_unlock(&grsec_exec_file_lock); \
51600+ read_unlock(&tasklist_lock); \
51601+ rcu_read_unlock(); \
51602+ ENABLE_PREEMPT(); \
51603+ if (x == GR_DONT_AUDIT) \
51604+ gr_handle_alertkill(current)
51605+
51606+enum {
51607+ FLOODING,
51608+ NO_FLOODING
51609+};
51610+
51611+extern char *gr_alert_log_fmt;
51612+extern char *gr_audit_log_fmt;
51613+extern char *gr_alert_log_buf;
51614+extern char *gr_audit_log_buf;
51615+
51616+static int gr_log_start(int audit)
51617+{
51618+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
51619+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
51620+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
51621+
51622+ if (audit == GR_DO_AUDIT)
51623+ goto set_fmt;
51624+
51625+ if (!grsec_alert_wtime || jiffies - grsec_alert_wtime > CONFIG_GRKERNSEC_FLOODTIME * HZ) {
51626+ grsec_alert_wtime = jiffies;
51627+ grsec_alert_fyet = 0;
51628+ } else if ((jiffies - grsec_alert_wtime < CONFIG_GRKERNSEC_FLOODTIME * HZ) && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
51629+ grsec_alert_fyet++;
51630+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
51631+ grsec_alert_wtime = jiffies;
51632+ grsec_alert_fyet++;
51633+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
51634+ return FLOODING;
51635+ } else return FLOODING;
51636+
51637+set_fmt:
51638+ memset(buf, 0, PAGE_SIZE);
51639+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
51640+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
51641+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
51642+ } else if (current->signal->curr_ip) {
51643+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
51644+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
51645+ } else if (gr_acl_is_enabled()) {
51646+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
51647+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
51648+ } else {
51649+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
51650+ strcpy(buf, fmt);
51651+ }
51652+
51653+ return NO_FLOODING;
51654+}
51655+
51656+static void gr_log_middle(int audit, const char *msg, va_list ap)
51657+ __attribute__ ((format (printf, 2, 0)));
51658+
51659+static void gr_log_middle(int audit, const char *msg, va_list ap)
51660+{
51661+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
51662+ unsigned int len = strlen(buf);
51663+
51664+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
51665+
51666+ return;
51667+}
51668+
51669+static void gr_log_middle_varargs(int audit, const char *msg, ...)
51670+ __attribute__ ((format (printf, 2, 3)));
51671+
51672+static void gr_log_middle_varargs(int audit, const char *msg, ...)
51673+{
51674+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
51675+ unsigned int len = strlen(buf);
51676+ va_list ap;
51677+
51678+ va_start(ap, msg);
51679+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
51680+ va_end(ap);
51681+
51682+ return;
51683+}
51684+
51685+static void gr_log_end(int audit)
51686+{
51687+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
51688+ unsigned int len = strlen(buf);
51689+
51690+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
51691+ printk("%s\n", buf);
51692+
51693+ return;
51694+}
51695+
51696+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
51697+{
51698+ int logtype;
51699+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
51700+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
51701+ void *voidptr = NULL;
51702+ int num1 = 0, num2 = 0;
51703+ unsigned long ulong1 = 0, ulong2 = 0;
51704+ struct dentry *dentry = NULL;
51705+ struct vfsmount *mnt = NULL;
51706+ struct file *file = NULL;
51707+ struct task_struct *task = NULL;
51708+ const struct cred *cred, *pcred;
51709+ va_list ap;
51710+
51711+ BEGIN_LOCKS(audit);
51712+ logtype = gr_log_start(audit);
51713+ if (logtype == FLOODING) {
51714+ END_LOCKS(audit);
51715+ return;
51716+ }
51717+ va_start(ap, argtypes);
51718+ switch (argtypes) {
51719+ case GR_TTYSNIFF:
51720+ task = va_arg(ap, struct task_struct *);
51721+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
51722+ break;
51723+ case GR_SYSCTL_HIDDEN:
51724+ str1 = va_arg(ap, char *);
51725+ gr_log_middle_varargs(audit, msg, result, str1);
51726+ break;
51727+ case GR_RBAC:
51728+ dentry = va_arg(ap, struct dentry *);
51729+ mnt = va_arg(ap, struct vfsmount *);
51730+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
51731+ break;
51732+ case GR_RBAC_STR:
51733+ dentry = va_arg(ap, struct dentry *);
51734+ mnt = va_arg(ap, struct vfsmount *);
51735+ str1 = va_arg(ap, char *);
51736+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
51737+ break;
51738+ case GR_STR_RBAC:
51739+ str1 = va_arg(ap, char *);
51740+ dentry = va_arg(ap, struct dentry *);
51741+ mnt = va_arg(ap, struct vfsmount *);
51742+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
51743+ break;
51744+ case GR_RBAC_MODE2:
51745+ dentry = va_arg(ap, struct dentry *);
51746+ mnt = va_arg(ap, struct vfsmount *);
51747+ str1 = va_arg(ap, char *);
51748+ str2 = va_arg(ap, char *);
51749+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
51750+ break;
51751+ case GR_RBAC_MODE3:
51752+ dentry = va_arg(ap, struct dentry *);
51753+ mnt = va_arg(ap, struct vfsmount *);
51754+ str1 = va_arg(ap, char *);
51755+ str2 = va_arg(ap, char *);
51756+ str3 = va_arg(ap, char *);
51757+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
51758+ break;
51759+ case GR_FILENAME:
51760+ dentry = va_arg(ap, struct dentry *);
51761+ mnt = va_arg(ap, struct vfsmount *);
51762+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
51763+ break;
51764+ case GR_STR_FILENAME:
51765+ str1 = va_arg(ap, char *);
51766+ dentry = va_arg(ap, struct dentry *);
51767+ mnt = va_arg(ap, struct vfsmount *);
51768+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
51769+ break;
51770+ case GR_FILENAME_STR:
51771+ dentry = va_arg(ap, struct dentry *);
51772+ mnt = va_arg(ap, struct vfsmount *);
51773+ str1 = va_arg(ap, char *);
51774+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
51775+ break;
51776+ case GR_FILENAME_TWO_INT:
51777+ dentry = va_arg(ap, struct dentry *);
51778+ mnt = va_arg(ap, struct vfsmount *);
51779+ num1 = va_arg(ap, int);
51780+ num2 = va_arg(ap, int);
51781+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
51782+ break;
51783+ case GR_FILENAME_TWO_INT_STR:
51784+ dentry = va_arg(ap, struct dentry *);
51785+ mnt = va_arg(ap, struct vfsmount *);
51786+ num1 = va_arg(ap, int);
51787+ num2 = va_arg(ap, int);
51788+ str1 = va_arg(ap, char *);
51789+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
51790+ break;
51791+ case GR_TEXTREL:
51792+ file = va_arg(ap, struct file *);
51793+ ulong1 = va_arg(ap, unsigned long);
51794+ ulong2 = va_arg(ap, unsigned long);
51795+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
51796+ break;
51797+ case GR_PTRACE:
51798+ task = va_arg(ap, struct task_struct *);
51799+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
51800+ break;
51801+ case GR_RESOURCE:
51802+ task = va_arg(ap, struct task_struct *);
51803+ cred = __task_cred(task);
51804+ pcred = __task_cred(task->real_parent);
51805+ ulong1 = va_arg(ap, unsigned long);
51806+ str1 = va_arg(ap, char *);
51807+ ulong2 = va_arg(ap, unsigned long);
51808+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51809+ break;
51810+ case GR_CAP:
51811+ task = va_arg(ap, struct task_struct *);
51812+ cred = __task_cred(task);
51813+ pcred = __task_cred(task->real_parent);
51814+ str1 = va_arg(ap, char *);
51815+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51816+ break;
51817+ case GR_SIG:
51818+ str1 = va_arg(ap, char *);
51819+ voidptr = va_arg(ap, void *);
51820+ gr_log_middle_varargs(audit, msg, str1, voidptr);
51821+ break;
51822+ case GR_SIG2:
51823+ task = va_arg(ap, struct task_struct *);
51824+ cred = __task_cred(task);
51825+ pcred = __task_cred(task->real_parent);
51826+ num1 = va_arg(ap, int);
51827+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51828+ break;
51829+ case GR_CRASH1:
51830+ task = va_arg(ap, struct task_struct *);
51831+ cred = __task_cred(task);
51832+ pcred = __task_cred(task->real_parent);
51833+ ulong1 = va_arg(ap, unsigned long);
51834+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
51835+ break;
51836+ case GR_CRASH2:
51837+ task = va_arg(ap, struct task_struct *);
51838+ cred = __task_cred(task);
51839+ pcred = __task_cred(task->real_parent);
51840+ ulong1 = va_arg(ap, unsigned long);
51841+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
51842+ break;
51843+ case GR_RWXMAP:
51844+ file = va_arg(ap, struct file *);
51845+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
51846+ break;
51847+ case GR_PSACCT:
51848+ {
51849+ unsigned int wday, cday;
51850+ __u8 whr, chr;
51851+ __u8 wmin, cmin;
51852+ __u8 wsec, csec;
51853+ char cur_tty[64] = { 0 };
51854+ char parent_tty[64] = { 0 };
51855+
51856+ task = va_arg(ap, struct task_struct *);
51857+ wday = va_arg(ap, unsigned int);
51858+ cday = va_arg(ap, unsigned int);
51859+ whr = va_arg(ap, int);
51860+ chr = va_arg(ap, int);
51861+ wmin = va_arg(ap, int);
51862+ cmin = va_arg(ap, int);
51863+ wsec = va_arg(ap, int);
51864+ csec = va_arg(ap, int);
51865+ ulong1 = va_arg(ap, unsigned long);
51866+ cred = __task_cred(task);
51867+ pcred = __task_cred(task->real_parent);
51868+
51869+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51870+ }
51871+ break;
51872+ default:
51873+ gr_log_middle(audit, msg, ap);
51874+ }
51875+ va_end(ap);
51876+ gr_log_end(audit);
51877+ END_LOCKS(audit);
51878+}
51879diff -urNp linux-2.6.32.42/grsecurity/grsec_mem.c linux-2.6.32.42/grsecurity/grsec_mem.c
51880--- linux-2.6.32.42/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
51881+++ linux-2.6.32.42/grsecurity/grsec_mem.c 2011-04-17 15:56:46.000000000 -0400
51882@@ -0,0 +1,33 @@
51883+#include <linux/kernel.h>
51884+#include <linux/sched.h>
51885+#include <linux/mm.h>
51886+#include <linux/mman.h>
51887+#include <linux/grinternal.h>
51888+
51889+void
51890+gr_handle_ioperm(void)
51891+{
51892+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
51893+ return;
51894+}
51895+
51896+void
51897+gr_handle_iopl(void)
51898+{
51899+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
51900+ return;
51901+}
51902+
51903+void
51904+gr_handle_mem_readwrite(u64 from, u64 to)
51905+{
51906+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
51907+ return;
51908+}
51909+
51910+void
51911+gr_handle_vm86(void)
51912+{
51913+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
51914+ return;
51915+}
51916diff -urNp linux-2.6.32.42/grsecurity/grsec_mount.c linux-2.6.32.42/grsecurity/grsec_mount.c
51917--- linux-2.6.32.42/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
51918+++ linux-2.6.32.42/grsecurity/grsec_mount.c 2011-06-20 19:47:03.000000000 -0400
51919@@ -0,0 +1,62 @@
51920+#include <linux/kernel.h>
51921+#include <linux/sched.h>
51922+#include <linux/mount.h>
51923+#include <linux/grsecurity.h>
51924+#include <linux/grinternal.h>
51925+
51926+void
51927+gr_log_remount(const char *devname, const int retval)
51928+{
51929+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
51930+ if (grsec_enable_mount && (retval >= 0))
51931+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
51932+#endif
51933+ return;
51934+}
51935+
51936+void
51937+gr_log_unmount(const char *devname, const int retval)
51938+{
51939+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
51940+ if (grsec_enable_mount && (retval >= 0))
51941+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
51942+#endif
51943+ return;
51944+}
51945+
51946+void
51947+gr_log_mount(const char *from, const char *to, const int retval)
51948+{
51949+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
51950+ if (grsec_enable_mount && (retval >= 0))
51951+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
51952+#endif
51953+ return;
51954+}
51955+
51956+int
51957+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
51958+{
51959+#ifdef CONFIG_GRKERNSEC_ROFS
51960+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
51961+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
51962+ return -EPERM;
51963+ } else
51964+ return 0;
51965+#endif
51966+ return 0;
51967+}
51968+
51969+int
51970+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
51971+{
51972+#ifdef CONFIG_GRKERNSEC_ROFS
51973+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
51974+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
51975+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
51976+ return -EPERM;
51977+ } else
51978+ return 0;
51979+#endif
51980+ return 0;
51981+}
51982diff -urNp linux-2.6.32.42/grsecurity/grsec_pax.c linux-2.6.32.42/grsecurity/grsec_pax.c
51983--- linux-2.6.32.42/grsecurity/grsec_pax.c 1969-12-31 19:00:00.000000000 -0500
51984+++ linux-2.6.32.42/grsecurity/grsec_pax.c 2011-04-17 15:56:46.000000000 -0400
51985@@ -0,0 +1,36 @@
51986+#include <linux/kernel.h>
51987+#include <linux/sched.h>
51988+#include <linux/mm.h>
51989+#include <linux/file.h>
51990+#include <linux/grinternal.h>
51991+#include <linux/grsecurity.h>
51992+
51993+void
51994+gr_log_textrel(struct vm_area_struct * vma)
51995+{
51996+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
51997+ if (grsec_enable_audit_textrel)
51998+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
51999+#endif
52000+ return;
52001+}
52002+
52003+void
52004+gr_log_rwxmmap(struct file *file)
52005+{
52006+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
52007+ if (grsec_enable_log_rwxmaps)
52008+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
52009+#endif
52010+ return;
52011+}
52012+
52013+void
52014+gr_log_rwxmprotect(struct file *file)
52015+{
52016+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
52017+ if (grsec_enable_log_rwxmaps)
52018+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
52019+#endif
52020+ return;
52021+}
52022diff -urNp linux-2.6.32.42/grsecurity/grsec_ptrace.c linux-2.6.32.42/grsecurity/grsec_ptrace.c
52023--- linux-2.6.32.42/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
52024+++ linux-2.6.32.42/grsecurity/grsec_ptrace.c 2011-04-17 15:56:46.000000000 -0400
52025@@ -0,0 +1,14 @@
52026+#include <linux/kernel.h>
52027+#include <linux/sched.h>
52028+#include <linux/grinternal.h>
52029+#include <linux/grsecurity.h>
52030+
52031+void
52032+gr_audit_ptrace(struct task_struct *task)
52033+{
52034+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
52035+ if (grsec_enable_audit_ptrace)
52036+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
52037+#endif
52038+ return;
52039+}
52040diff -urNp linux-2.6.32.42/grsecurity/grsec_sig.c linux-2.6.32.42/grsecurity/grsec_sig.c
52041--- linux-2.6.32.42/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
52042+++ linux-2.6.32.42/grsecurity/grsec_sig.c 2011-06-29 19:40:31.000000000 -0400
52043@@ -0,0 +1,205 @@
52044+#include <linux/kernel.h>
52045+#include <linux/sched.h>
52046+#include <linux/delay.h>
52047+#include <linux/grsecurity.h>
52048+#include <linux/grinternal.h>
52049+#include <linux/hardirq.h>
52050+
52051+char *signames[] = {
52052+ [SIGSEGV] = "Segmentation fault",
52053+ [SIGILL] = "Illegal instruction",
52054+ [SIGABRT] = "Abort",
52055+ [SIGBUS] = "Invalid alignment/Bus error"
52056+};
52057+
52058+void
52059+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
52060+{
52061+#ifdef CONFIG_GRKERNSEC_SIGNAL
52062+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
52063+ (sig == SIGABRT) || (sig == SIGBUS))) {
52064+ if (t->pid == current->pid) {
52065+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
52066+ } else {
52067+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
52068+ }
52069+ }
52070+#endif
52071+ return;
52072+}
52073+
52074+int
52075+gr_handle_signal(const struct task_struct *p, const int sig)
52076+{
52077+#ifdef CONFIG_GRKERNSEC
52078+ if (current->pid > 1 && gr_check_protected_task(p)) {
52079+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
52080+ return -EPERM;
52081+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
52082+ return -EPERM;
52083+ }
52084+#endif
52085+ return 0;
52086+}
52087+
52088+#ifdef CONFIG_GRKERNSEC
52089+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
52090+
52091+int gr_fake_force_sig(int sig, struct task_struct *t)
52092+{
52093+ unsigned long int flags;
52094+ int ret, blocked, ignored;
52095+ struct k_sigaction *action;
52096+
52097+ spin_lock_irqsave(&t->sighand->siglock, flags);
52098+ action = &t->sighand->action[sig-1];
52099+ ignored = action->sa.sa_handler == SIG_IGN;
52100+ blocked = sigismember(&t->blocked, sig);
52101+ if (blocked || ignored) {
52102+ action->sa.sa_handler = SIG_DFL;
52103+ if (blocked) {
52104+ sigdelset(&t->blocked, sig);
52105+ recalc_sigpending_and_wake(t);
52106+ }
52107+ }
52108+ if (action->sa.sa_handler == SIG_DFL)
52109+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
52110+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
52111+
52112+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
52113+
52114+ return ret;
52115+}
52116+#endif
52117+
52118+#ifdef CONFIG_GRKERNSEC_BRUTE
52119+#define GR_USER_BAN_TIME (15 * 60)
52120+
52121+static int __get_dumpable(unsigned long mm_flags)
52122+{
52123+ int ret;
52124+
52125+ ret = mm_flags & MMF_DUMPABLE_MASK;
52126+ return (ret >= 2) ? 2 : ret;
52127+}
52128+#endif
52129+
52130+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
52131+{
52132+#ifdef CONFIG_GRKERNSEC_BRUTE
52133+ uid_t uid = 0;
52134+
52135+ if (!grsec_enable_brute)
52136+ return;
52137+
52138+ rcu_read_lock();
52139+ read_lock(&tasklist_lock);
52140+ read_lock(&grsec_exec_file_lock);
52141+ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
52142+ p->real_parent->brute = 1;
52143+ else {
52144+ const struct cred *cred = __task_cred(p), *cred2;
52145+ struct task_struct *tsk, *tsk2;
52146+
52147+ if (!__get_dumpable(mm_flags) && cred->uid) {
52148+ struct user_struct *user;
52149+
52150+ uid = cred->uid;
52151+
52152+ /* this is put upon execution past expiration */
52153+ user = find_user(uid);
52154+ if (user == NULL)
52155+ goto unlock;
52156+ user->banned = 1;
52157+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
52158+ if (user->ban_expires == ~0UL)
52159+ user->ban_expires--;
52160+
52161+ do_each_thread(tsk2, tsk) {
52162+ cred2 = __task_cred(tsk);
52163+ if (tsk != p && cred2->uid == uid)
52164+ gr_fake_force_sig(SIGKILL, tsk);
52165+ } while_each_thread(tsk2, tsk);
52166+ }
52167+ }
52168+unlock:
52169+ read_unlock(&grsec_exec_file_lock);
52170+ read_unlock(&tasklist_lock);
52171+ rcu_read_unlock();
52172+
52173+ if (uid)
52174+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
52175+#endif
52176+ return;
52177+}
52178+
52179+void gr_handle_brute_check(void)
52180+{
52181+#ifdef CONFIG_GRKERNSEC_BRUTE
52182+ if (current->brute)
52183+ msleep(30 * 1000);
52184+#endif
52185+ return;
52186+}
52187+
52188+void gr_handle_kernel_exploit(void)
52189+{
52190+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
52191+ const struct cred *cred;
52192+ struct task_struct *tsk, *tsk2;
52193+ struct user_struct *user;
52194+ uid_t uid;
52195+
52196+ if (in_irq() || in_serving_softirq() || in_nmi())
52197+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
52198+
52199+ uid = current_uid();
52200+
52201+ if (uid == 0)
52202+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
52203+ else {
52204+ /* kill all the processes of this user, hold a reference
52205+ to their creds struct, and prevent them from creating
52206+ another process until system reset
52207+ */
52208+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
52209+ /* we intentionally leak this ref */
52210+ user = get_uid(current->cred->user);
52211+ if (user) {
52212+ user->banned = 1;
52213+ user->ban_expires = ~0UL;
52214+ }
52215+
52216+ read_lock(&tasklist_lock);
52217+ do_each_thread(tsk2, tsk) {
52218+ cred = __task_cred(tsk);
52219+ if (cred->uid == uid)
52220+ gr_fake_force_sig(SIGKILL, tsk);
52221+ } while_each_thread(tsk2, tsk);
52222+ read_unlock(&tasklist_lock);
52223+ }
52224+#endif
52225+}
52226+
52227+int __gr_process_user_ban(struct user_struct *user)
52228+{
52229+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
52230+ if (unlikely(user->banned)) {
52231+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
52232+ user->banned = 0;
52233+ user->ban_expires = 0;
52234+ free_uid(user);
52235+ } else
52236+ return -EPERM;
52237+ }
52238+#endif
52239+ return 0;
52240+}
52241+
52242+int gr_process_user_ban(void)
52243+{
52244+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
52245+ return __gr_process_user_ban(current->cred->user);
52246+#endif
52247+ return 0;
52248+}
52249diff -urNp linux-2.6.32.42/grsecurity/grsec_sock.c linux-2.6.32.42/grsecurity/grsec_sock.c
52250--- linux-2.6.32.42/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
52251+++ linux-2.6.32.42/grsecurity/grsec_sock.c 2011-04-17 15:56:46.000000000 -0400
52252@@ -0,0 +1,275 @@
52253+#include <linux/kernel.h>
52254+#include <linux/module.h>
52255+#include <linux/sched.h>
52256+#include <linux/file.h>
52257+#include <linux/net.h>
52258+#include <linux/in.h>
52259+#include <linux/ip.h>
52260+#include <net/sock.h>
52261+#include <net/inet_sock.h>
52262+#include <linux/grsecurity.h>
52263+#include <linux/grinternal.h>
52264+#include <linux/gracl.h>
52265+
52266+kernel_cap_t gr_cap_rtnetlink(struct sock *sock);
52267+EXPORT_SYMBOL(gr_cap_rtnetlink);
52268+
52269+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
52270+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
52271+
52272+EXPORT_SYMBOL(gr_search_udp_recvmsg);
52273+EXPORT_SYMBOL(gr_search_udp_sendmsg);
52274+
52275+#ifdef CONFIG_UNIX_MODULE
52276+EXPORT_SYMBOL(gr_acl_handle_unix);
52277+EXPORT_SYMBOL(gr_acl_handle_mknod);
52278+EXPORT_SYMBOL(gr_handle_chroot_unix);
52279+EXPORT_SYMBOL(gr_handle_create);
52280+#endif
52281+
52282+#ifdef CONFIG_GRKERNSEC
52283+#define gr_conn_table_size 32749
52284+struct conn_table_entry {
52285+ struct conn_table_entry *next;
52286+ struct signal_struct *sig;
52287+};
52288+
52289+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
52290+DEFINE_SPINLOCK(gr_conn_table_lock);
52291+
52292+extern const char * gr_socktype_to_name(unsigned char type);
52293+extern const char * gr_proto_to_name(unsigned char proto);
52294+extern const char * gr_sockfamily_to_name(unsigned char family);
52295+
52296+static __inline__ int
52297+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
52298+{
52299+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
52300+}
52301+
52302+static __inline__ int
52303+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
52304+ __u16 sport, __u16 dport)
52305+{
52306+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
52307+ sig->gr_sport == sport && sig->gr_dport == dport))
52308+ return 1;
52309+ else
52310+ return 0;
52311+}
52312+
52313+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
52314+{
52315+ struct conn_table_entry **match;
52316+ unsigned int index;
52317+
52318+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
52319+ sig->gr_sport, sig->gr_dport,
52320+ gr_conn_table_size);
52321+
52322+ newent->sig = sig;
52323+
52324+ match = &gr_conn_table[index];
52325+ newent->next = *match;
52326+ *match = newent;
52327+
52328+ return;
52329+}
52330+
52331+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
52332+{
52333+ struct conn_table_entry *match, *last = NULL;
52334+ unsigned int index;
52335+
52336+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
52337+ sig->gr_sport, sig->gr_dport,
52338+ gr_conn_table_size);
52339+
52340+ match = gr_conn_table[index];
52341+ while (match && !conn_match(match->sig,
52342+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
52343+ sig->gr_dport)) {
52344+ last = match;
52345+ match = match->next;
52346+ }
52347+
52348+ if (match) {
52349+ if (last)
52350+ last->next = match->next;
52351+ else
52352+ gr_conn_table[index] = NULL;
52353+ kfree(match);
52354+ }
52355+
52356+ return;
52357+}
52358+
52359+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
52360+ __u16 sport, __u16 dport)
52361+{
52362+ struct conn_table_entry *match;
52363+ unsigned int index;
52364+
52365+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
52366+
52367+ match = gr_conn_table[index];
52368+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
52369+ match = match->next;
52370+
52371+ if (match)
52372+ return match->sig;
52373+ else
52374+ return NULL;
52375+}
52376+
52377+#endif
52378+
52379+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
52380+{
52381+#ifdef CONFIG_GRKERNSEC
52382+ struct signal_struct *sig = task->signal;
52383+ struct conn_table_entry *newent;
52384+
52385+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
52386+ if (newent == NULL)
52387+ return;
52388+ /* no bh lock needed since we are called with bh disabled */
52389+ spin_lock(&gr_conn_table_lock);
52390+ gr_del_task_from_ip_table_nolock(sig);
52391+ sig->gr_saddr = inet->rcv_saddr;
52392+ sig->gr_daddr = inet->daddr;
52393+ sig->gr_sport = inet->sport;
52394+ sig->gr_dport = inet->dport;
52395+ gr_add_to_task_ip_table_nolock(sig, newent);
52396+ spin_unlock(&gr_conn_table_lock);
52397+#endif
52398+ return;
52399+}
52400+
52401+void gr_del_task_from_ip_table(struct task_struct *task)
52402+{
52403+#ifdef CONFIG_GRKERNSEC
52404+ spin_lock_bh(&gr_conn_table_lock);
52405+ gr_del_task_from_ip_table_nolock(task->signal);
52406+ spin_unlock_bh(&gr_conn_table_lock);
52407+#endif
52408+ return;
52409+}
52410+
52411+void
52412+gr_attach_curr_ip(const struct sock *sk)
52413+{
52414+#ifdef CONFIG_GRKERNSEC
52415+ struct signal_struct *p, *set;
52416+ const struct inet_sock *inet = inet_sk(sk);
52417+
52418+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
52419+ return;
52420+
52421+ set = current->signal;
52422+
52423+ spin_lock_bh(&gr_conn_table_lock);
52424+ p = gr_lookup_task_ip_table(inet->daddr, inet->rcv_saddr,
52425+ inet->dport, inet->sport);
52426+ if (unlikely(p != NULL)) {
52427+ set->curr_ip = p->curr_ip;
52428+ set->used_accept = 1;
52429+ gr_del_task_from_ip_table_nolock(p);
52430+ spin_unlock_bh(&gr_conn_table_lock);
52431+ return;
52432+ }
52433+ spin_unlock_bh(&gr_conn_table_lock);
52434+
52435+ set->curr_ip = inet->daddr;
52436+ set->used_accept = 1;
52437+#endif
52438+ return;
52439+}
52440+
52441+int
52442+gr_handle_sock_all(const int family, const int type, const int protocol)
52443+{
52444+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
52445+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
52446+ (family != AF_UNIX)) {
52447+ if (family == AF_INET)
52448+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
52449+ else
52450+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
52451+ return -EACCES;
52452+ }
52453+#endif
52454+ return 0;
52455+}
52456+
52457+int
52458+gr_handle_sock_server(const struct sockaddr *sck)
52459+{
52460+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
52461+ if (grsec_enable_socket_server &&
52462+ in_group_p(grsec_socket_server_gid) &&
52463+ sck && (sck->sa_family != AF_UNIX) &&
52464+ (sck->sa_family != AF_LOCAL)) {
52465+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
52466+ return -EACCES;
52467+ }
52468+#endif
52469+ return 0;
52470+}
52471+
52472+int
52473+gr_handle_sock_server_other(const struct sock *sck)
52474+{
52475+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
52476+ if (grsec_enable_socket_server &&
52477+ in_group_p(grsec_socket_server_gid) &&
52478+ sck && (sck->sk_family != AF_UNIX) &&
52479+ (sck->sk_family != AF_LOCAL)) {
52480+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
52481+ return -EACCES;
52482+ }
52483+#endif
52484+ return 0;
52485+}
52486+
52487+int
52488+gr_handle_sock_client(const struct sockaddr *sck)
52489+{
52490+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
52491+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
52492+ sck && (sck->sa_family != AF_UNIX) &&
52493+ (sck->sa_family != AF_LOCAL)) {
52494+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
52495+ return -EACCES;
52496+ }
52497+#endif
52498+ return 0;
52499+}
52500+
52501+kernel_cap_t
52502+gr_cap_rtnetlink(struct sock *sock)
52503+{
52504+#ifdef CONFIG_GRKERNSEC
52505+ if (!gr_acl_is_enabled())
52506+ return current_cap();
52507+ else if (sock->sk_protocol == NETLINK_ISCSI &&
52508+ cap_raised(current_cap(), CAP_SYS_ADMIN) &&
52509+ gr_is_capable(CAP_SYS_ADMIN))
52510+ return current_cap();
52511+ else if (sock->sk_protocol == NETLINK_AUDIT &&
52512+ cap_raised(current_cap(), CAP_AUDIT_WRITE) &&
52513+ gr_is_capable(CAP_AUDIT_WRITE) &&
52514+ cap_raised(current_cap(), CAP_AUDIT_CONTROL) &&
52515+ gr_is_capable(CAP_AUDIT_CONTROL))
52516+ return current_cap();
52517+ else if (cap_raised(current_cap(), CAP_NET_ADMIN) &&
52518+ ((sock->sk_protocol == NETLINK_ROUTE) ?
52519+ gr_is_capable_nolog(CAP_NET_ADMIN) :
52520+ gr_is_capable(CAP_NET_ADMIN)))
52521+ return current_cap();
52522+ else
52523+ return __cap_empty_set;
52524+#else
52525+ return current_cap();
52526+#endif
52527+}
52528diff -urNp linux-2.6.32.42/grsecurity/grsec_sysctl.c linux-2.6.32.42/grsecurity/grsec_sysctl.c
52529--- linux-2.6.32.42/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
52530+++ linux-2.6.32.42/grsecurity/grsec_sysctl.c 2011-06-29 19:37:19.000000000 -0400
52531@@ -0,0 +1,489 @@
52532+#include <linux/kernel.h>
52533+#include <linux/sched.h>
52534+#include <linux/sysctl.h>
52535+#include <linux/grsecurity.h>
52536+#include <linux/grinternal.h>
52537+
52538+int
52539+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
52540+{
52541+#ifdef CONFIG_GRKERNSEC_SYSCTL
52542+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
52543+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
52544+ return -EACCES;
52545+ }
52546+#endif
52547+ return 0;
52548+}
52549+
52550+#ifdef CONFIG_GRKERNSEC_ROFS
52551+static int __maybe_unused one = 1;
52552+#endif
52553+
52554+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
52555+ctl_table grsecurity_table[] = {
52556+#ifdef CONFIG_GRKERNSEC_SYSCTL
52557+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
52558+#ifdef CONFIG_GRKERNSEC_IO
52559+ {
52560+ .ctl_name = CTL_UNNUMBERED,
52561+ .procname = "disable_priv_io",
52562+ .data = &grsec_disable_privio,
52563+ .maxlen = sizeof(int),
52564+ .mode = 0600,
52565+ .proc_handler = &proc_dointvec,
52566+ },
52567+#endif
52568+#endif
52569+#ifdef CONFIG_GRKERNSEC_LINK
52570+ {
52571+ .ctl_name = CTL_UNNUMBERED,
52572+ .procname = "linking_restrictions",
52573+ .data = &grsec_enable_link,
52574+ .maxlen = sizeof(int),
52575+ .mode = 0600,
52576+ .proc_handler = &proc_dointvec,
52577+ },
52578+#endif
52579+#ifdef CONFIG_GRKERNSEC_BRUTE
52580+ {
52581+ .ctl_name = CTL_UNNUMBERED,
52582+ .procname = "deter_bruteforce",
52583+ .data = &grsec_enable_brute,
52584+ .maxlen = sizeof(int),
52585+ .mode = 0600,
52586+ .proc_handler = &proc_dointvec,
52587+ },
52588+#endif
52589+#ifdef CONFIG_GRKERNSEC_FIFO
52590+ {
52591+ .ctl_name = CTL_UNNUMBERED,
52592+ .procname = "fifo_restrictions",
52593+ .data = &grsec_enable_fifo,
52594+ .maxlen = sizeof(int),
52595+ .mode = 0600,
52596+ .proc_handler = &proc_dointvec,
52597+ },
52598+#endif
52599+#ifdef CONFIG_GRKERNSEC_EXECVE
52600+ {
52601+ .ctl_name = CTL_UNNUMBERED,
52602+ .procname = "execve_limiting",
52603+ .data = &grsec_enable_execve,
52604+ .maxlen = sizeof(int),
52605+ .mode = 0600,
52606+ .proc_handler = &proc_dointvec,
52607+ },
52608+#endif
52609+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
52610+ {
52611+ .ctl_name = CTL_UNNUMBERED,
52612+ .procname = "ip_blackhole",
52613+ .data = &grsec_enable_blackhole,
52614+ .maxlen = sizeof(int),
52615+ .mode = 0600,
52616+ .proc_handler = &proc_dointvec,
52617+ },
52618+ {
52619+ .ctl_name = CTL_UNNUMBERED,
52620+ .procname = "lastack_retries",
52621+ .data = &grsec_lastack_retries,
52622+ .maxlen = sizeof(int),
52623+ .mode = 0600,
52624+ .proc_handler = &proc_dointvec,
52625+ },
52626+#endif
52627+#ifdef CONFIG_GRKERNSEC_EXECLOG
52628+ {
52629+ .ctl_name = CTL_UNNUMBERED,
52630+ .procname = "exec_logging",
52631+ .data = &grsec_enable_execlog,
52632+ .maxlen = sizeof(int),
52633+ .mode = 0600,
52634+ .proc_handler = &proc_dointvec,
52635+ },
52636+#endif
52637+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
52638+ {
52639+ .ctl_name = CTL_UNNUMBERED,
52640+ .procname = "rwxmap_logging",
52641+ .data = &grsec_enable_log_rwxmaps,
52642+ .maxlen = sizeof(int),
52643+ .mode = 0600,
52644+ .proc_handler = &proc_dointvec,
52645+ },
52646+#endif
52647+#ifdef CONFIG_GRKERNSEC_SIGNAL
52648+ {
52649+ .ctl_name = CTL_UNNUMBERED,
52650+ .procname = "signal_logging",
52651+ .data = &grsec_enable_signal,
52652+ .maxlen = sizeof(int),
52653+ .mode = 0600,
52654+ .proc_handler = &proc_dointvec,
52655+ },
52656+#endif
52657+#ifdef CONFIG_GRKERNSEC_FORKFAIL
52658+ {
52659+ .ctl_name = CTL_UNNUMBERED,
52660+ .procname = "forkfail_logging",
52661+ .data = &grsec_enable_forkfail,
52662+ .maxlen = sizeof(int),
52663+ .mode = 0600,
52664+ .proc_handler = &proc_dointvec,
52665+ },
52666+#endif
52667+#ifdef CONFIG_GRKERNSEC_TIME
52668+ {
52669+ .ctl_name = CTL_UNNUMBERED,
52670+ .procname = "timechange_logging",
52671+ .data = &grsec_enable_time,
52672+ .maxlen = sizeof(int),
52673+ .mode = 0600,
52674+ .proc_handler = &proc_dointvec,
52675+ },
52676+#endif
52677+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
52678+ {
52679+ .ctl_name = CTL_UNNUMBERED,
52680+ .procname = "chroot_deny_shmat",
52681+ .data = &grsec_enable_chroot_shmat,
52682+ .maxlen = sizeof(int),
52683+ .mode = 0600,
52684+ .proc_handler = &proc_dointvec,
52685+ },
52686+#endif
52687+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
52688+ {
52689+ .ctl_name = CTL_UNNUMBERED,
52690+ .procname = "chroot_deny_unix",
52691+ .data = &grsec_enable_chroot_unix,
52692+ .maxlen = sizeof(int),
52693+ .mode = 0600,
52694+ .proc_handler = &proc_dointvec,
52695+ },
52696+#endif
52697+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
52698+ {
52699+ .ctl_name = CTL_UNNUMBERED,
52700+ .procname = "chroot_deny_mount",
52701+ .data = &grsec_enable_chroot_mount,
52702+ .maxlen = sizeof(int),
52703+ .mode = 0600,
52704+ .proc_handler = &proc_dointvec,
52705+ },
52706+#endif
52707+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
52708+ {
52709+ .ctl_name = CTL_UNNUMBERED,
52710+ .procname = "chroot_deny_fchdir",
52711+ .data = &grsec_enable_chroot_fchdir,
52712+ .maxlen = sizeof(int),
52713+ .mode = 0600,
52714+ .proc_handler = &proc_dointvec,
52715+ },
52716+#endif
52717+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
52718+ {
52719+ .ctl_name = CTL_UNNUMBERED,
52720+ .procname = "chroot_deny_chroot",
52721+ .data = &grsec_enable_chroot_double,
52722+ .maxlen = sizeof(int),
52723+ .mode = 0600,
52724+ .proc_handler = &proc_dointvec,
52725+ },
52726+#endif
52727+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
52728+ {
52729+ .ctl_name = CTL_UNNUMBERED,
52730+ .procname = "chroot_deny_pivot",
52731+ .data = &grsec_enable_chroot_pivot,
52732+ .maxlen = sizeof(int),
52733+ .mode = 0600,
52734+ .proc_handler = &proc_dointvec,
52735+ },
52736+#endif
52737+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
52738+ {
52739+ .ctl_name = CTL_UNNUMBERED,
52740+ .procname = "chroot_enforce_chdir",
52741+ .data = &grsec_enable_chroot_chdir,
52742+ .maxlen = sizeof(int),
52743+ .mode = 0600,
52744+ .proc_handler = &proc_dointvec,
52745+ },
52746+#endif
52747+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
52748+ {
52749+ .ctl_name = CTL_UNNUMBERED,
52750+ .procname = "chroot_deny_chmod",
52751+ .data = &grsec_enable_chroot_chmod,
52752+ .maxlen = sizeof(int),
52753+ .mode = 0600,
52754+ .proc_handler = &proc_dointvec,
52755+ },
52756+#endif
52757+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
52758+ {
52759+ .ctl_name = CTL_UNNUMBERED,
52760+ .procname = "chroot_deny_mknod",
52761+ .data = &grsec_enable_chroot_mknod,
52762+ .maxlen = sizeof(int),
52763+ .mode = 0600,
52764+ .proc_handler = &proc_dointvec,
52765+ },
52766+#endif
52767+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
52768+ {
52769+ .ctl_name = CTL_UNNUMBERED,
52770+ .procname = "chroot_restrict_nice",
52771+ .data = &grsec_enable_chroot_nice,
52772+ .maxlen = sizeof(int),
52773+ .mode = 0600,
52774+ .proc_handler = &proc_dointvec,
52775+ },
52776+#endif
52777+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
52778+ {
52779+ .ctl_name = CTL_UNNUMBERED,
52780+ .procname = "chroot_execlog",
52781+ .data = &grsec_enable_chroot_execlog,
52782+ .maxlen = sizeof(int),
52783+ .mode = 0600,
52784+ .proc_handler = &proc_dointvec,
52785+ },
52786+#endif
52787+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
52788+ {
52789+ .ctl_name = CTL_UNNUMBERED,
52790+ .procname = "chroot_caps",
52791+ .data = &grsec_enable_chroot_caps,
52792+ .maxlen = sizeof(int),
52793+ .mode = 0600,
52794+ .proc_handler = &proc_dointvec,
52795+ },
52796+#endif
52797+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
52798+ {
52799+ .ctl_name = CTL_UNNUMBERED,
52800+ .procname = "chroot_deny_sysctl",
52801+ .data = &grsec_enable_chroot_sysctl,
52802+ .maxlen = sizeof(int),
52803+ .mode = 0600,
52804+ .proc_handler = &proc_dointvec,
52805+ },
52806+#endif
52807+#ifdef CONFIG_GRKERNSEC_TPE
52808+ {
52809+ .ctl_name = CTL_UNNUMBERED,
52810+ .procname = "tpe",
52811+ .data = &grsec_enable_tpe,
52812+ .maxlen = sizeof(int),
52813+ .mode = 0600,
52814+ .proc_handler = &proc_dointvec,
52815+ },
52816+ {
52817+ .ctl_name = CTL_UNNUMBERED,
52818+ .procname = "tpe_gid",
52819+ .data = &grsec_tpe_gid,
52820+ .maxlen = sizeof(int),
52821+ .mode = 0600,
52822+ .proc_handler = &proc_dointvec,
52823+ },
52824+#endif
52825+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
52826+ {
52827+ .ctl_name = CTL_UNNUMBERED,
52828+ .procname = "tpe_invert",
52829+ .data = &grsec_enable_tpe_invert,
52830+ .maxlen = sizeof(int),
52831+ .mode = 0600,
52832+ .proc_handler = &proc_dointvec,
52833+ },
52834+#endif
52835+#ifdef CONFIG_GRKERNSEC_TPE_ALL
52836+ {
52837+ .ctl_name = CTL_UNNUMBERED,
52838+ .procname = "tpe_restrict_all",
52839+ .data = &grsec_enable_tpe_all,
52840+ .maxlen = sizeof(int),
52841+ .mode = 0600,
52842+ .proc_handler = &proc_dointvec,
52843+ },
52844+#endif
52845+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
52846+ {
52847+ .ctl_name = CTL_UNNUMBERED,
52848+ .procname = "socket_all",
52849+ .data = &grsec_enable_socket_all,
52850+ .maxlen = sizeof(int),
52851+ .mode = 0600,
52852+ .proc_handler = &proc_dointvec,
52853+ },
52854+ {
52855+ .ctl_name = CTL_UNNUMBERED,
52856+ .procname = "socket_all_gid",
52857+ .data = &grsec_socket_all_gid,
52858+ .maxlen = sizeof(int),
52859+ .mode = 0600,
52860+ .proc_handler = &proc_dointvec,
52861+ },
52862+#endif
52863+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
52864+ {
52865+ .ctl_name = CTL_UNNUMBERED,
52866+ .procname = "socket_client",
52867+ .data = &grsec_enable_socket_client,
52868+ .maxlen = sizeof(int),
52869+ .mode = 0600,
52870+ .proc_handler = &proc_dointvec,
52871+ },
52872+ {
52873+ .ctl_name = CTL_UNNUMBERED,
52874+ .procname = "socket_client_gid",
52875+ .data = &grsec_socket_client_gid,
52876+ .maxlen = sizeof(int),
52877+ .mode = 0600,
52878+ .proc_handler = &proc_dointvec,
52879+ },
52880+#endif
52881+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
52882+ {
52883+ .ctl_name = CTL_UNNUMBERED,
52884+ .procname = "socket_server",
52885+ .data = &grsec_enable_socket_server,
52886+ .maxlen = sizeof(int),
52887+ .mode = 0600,
52888+ .proc_handler = &proc_dointvec,
52889+ },
52890+ {
52891+ .ctl_name = CTL_UNNUMBERED,
52892+ .procname = "socket_server_gid",
52893+ .data = &grsec_socket_server_gid,
52894+ .maxlen = sizeof(int),
52895+ .mode = 0600,
52896+ .proc_handler = &proc_dointvec,
52897+ },
52898+#endif
52899+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
52900+ {
52901+ .ctl_name = CTL_UNNUMBERED,
52902+ .procname = "audit_group",
52903+ .data = &grsec_enable_group,
52904+ .maxlen = sizeof(int),
52905+ .mode = 0600,
52906+ .proc_handler = &proc_dointvec,
52907+ },
52908+ {
52909+ .ctl_name = CTL_UNNUMBERED,
52910+ .procname = "audit_gid",
52911+ .data = &grsec_audit_gid,
52912+ .maxlen = sizeof(int),
52913+ .mode = 0600,
52914+ .proc_handler = &proc_dointvec,
52915+ },
52916+#endif
52917+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
52918+ {
52919+ .ctl_name = CTL_UNNUMBERED,
52920+ .procname = "audit_chdir",
52921+ .data = &grsec_enable_chdir,
52922+ .maxlen = sizeof(int),
52923+ .mode = 0600,
52924+ .proc_handler = &proc_dointvec,
52925+ },
52926+#endif
52927+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
52928+ {
52929+ .ctl_name = CTL_UNNUMBERED,
52930+ .procname = "audit_mount",
52931+ .data = &grsec_enable_mount,
52932+ .maxlen = sizeof(int),
52933+ .mode = 0600,
52934+ .proc_handler = &proc_dointvec,
52935+ },
52936+#endif
52937+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
52938+ {
52939+ .ctl_name = CTL_UNNUMBERED,
52940+ .procname = "audit_textrel",
52941+ .data = &grsec_enable_audit_textrel,
52942+ .maxlen = sizeof(int),
52943+ .mode = 0600,
52944+ .proc_handler = &proc_dointvec,
52945+ },
52946+#endif
52947+#ifdef CONFIG_GRKERNSEC_DMESG
52948+ {
52949+ .ctl_name = CTL_UNNUMBERED,
52950+ .procname = "dmesg",
52951+ .data = &grsec_enable_dmesg,
52952+ .maxlen = sizeof(int),
52953+ .mode = 0600,
52954+ .proc_handler = &proc_dointvec,
52955+ },
52956+#endif
52957+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
52958+ {
52959+ .ctl_name = CTL_UNNUMBERED,
52960+ .procname = "chroot_findtask",
52961+ .data = &grsec_enable_chroot_findtask,
52962+ .maxlen = sizeof(int),
52963+ .mode = 0600,
52964+ .proc_handler = &proc_dointvec,
52965+ },
52966+#endif
52967+#ifdef CONFIG_GRKERNSEC_RESLOG
52968+ {
52969+ .ctl_name = CTL_UNNUMBERED,
52970+ .procname = "resource_logging",
52971+ .data = &grsec_resource_logging,
52972+ .maxlen = sizeof(int),
52973+ .mode = 0600,
52974+ .proc_handler = &proc_dointvec,
52975+ },
52976+#endif
52977+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
52978+ {
52979+ .ctl_name = CTL_UNNUMBERED,
52980+ .procname = "audit_ptrace",
52981+ .data = &grsec_enable_audit_ptrace,
52982+ .maxlen = sizeof(int),
52983+ .mode = 0600,
52984+ .proc_handler = &proc_dointvec,
52985+ },
52986+#endif
52987+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
52988+ {
52989+ .ctl_name = CTL_UNNUMBERED,
52990+ .procname = "harden_ptrace",
52991+ .data = &grsec_enable_harden_ptrace,
52992+ .maxlen = sizeof(int),
52993+ .mode = 0600,
52994+ .proc_handler = &proc_dointvec,
52995+ },
52996+#endif
52997+ {
52998+ .ctl_name = CTL_UNNUMBERED,
52999+ .procname = "grsec_lock",
53000+ .data = &grsec_lock,
53001+ .maxlen = sizeof(int),
53002+ .mode = 0600,
53003+ .proc_handler = &proc_dointvec,
53004+ },
53005+#endif
53006+#ifdef CONFIG_GRKERNSEC_ROFS
53007+ {
53008+ .ctl_name = CTL_UNNUMBERED,
53009+ .procname = "romount_protect",
53010+ .data = &grsec_enable_rofs,
53011+ .maxlen = sizeof(int),
53012+ .mode = 0600,
53013+ .proc_handler = &proc_dointvec_minmax,
53014+ .extra1 = &one,
53015+ .extra2 = &one,
53016+ },
53017+#endif
53018+ { .ctl_name = 0 }
53019+};
53020+#endif
53021diff -urNp linux-2.6.32.42/grsecurity/grsec_time.c linux-2.6.32.42/grsecurity/grsec_time.c
53022--- linux-2.6.32.42/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
53023+++ linux-2.6.32.42/grsecurity/grsec_time.c 2011-04-17 15:56:46.000000000 -0400
53024@@ -0,0 +1,16 @@
53025+#include <linux/kernel.h>
53026+#include <linux/sched.h>
53027+#include <linux/grinternal.h>
53028+#include <linux/module.h>
53029+
53030+void
53031+gr_log_timechange(void)
53032+{
53033+#ifdef CONFIG_GRKERNSEC_TIME
53034+ if (grsec_enable_time)
53035+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
53036+#endif
53037+ return;
53038+}
53039+
53040+EXPORT_SYMBOL(gr_log_timechange);
53041diff -urNp linux-2.6.32.42/grsecurity/grsec_tpe.c linux-2.6.32.42/grsecurity/grsec_tpe.c
53042--- linux-2.6.32.42/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
53043+++ linux-2.6.32.42/grsecurity/grsec_tpe.c 2011-04-17 15:56:46.000000000 -0400
53044@@ -0,0 +1,39 @@
53045+#include <linux/kernel.h>
53046+#include <linux/sched.h>
53047+#include <linux/file.h>
53048+#include <linux/fs.h>
53049+#include <linux/grinternal.h>
53050+
53051+extern int gr_acl_tpe_check(void);
53052+
53053+int
53054+gr_tpe_allow(const struct file *file)
53055+{
53056+#ifdef CONFIG_GRKERNSEC
53057+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
53058+ const struct cred *cred = current_cred();
53059+
53060+ if (cred->uid && ((grsec_enable_tpe &&
53061+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
53062+ ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
53063+ (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
53064+#else
53065+ in_group_p(grsec_tpe_gid)
53066+#endif
53067+ ) || gr_acl_tpe_check()) &&
53068+ (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
53069+ (inode->i_mode & S_IWOTH))))) {
53070+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
53071+ return 0;
53072+ }
53073+#ifdef CONFIG_GRKERNSEC_TPE_ALL
53074+ if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
53075+ ((inode->i_uid && (inode->i_uid != cred->uid)) ||
53076+ (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
53077+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
53078+ return 0;
53079+ }
53080+#endif
53081+#endif
53082+ return 1;
53083+}
53084diff -urNp linux-2.6.32.42/grsecurity/grsum.c linux-2.6.32.42/grsecurity/grsum.c
53085--- linux-2.6.32.42/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
53086+++ linux-2.6.32.42/grsecurity/grsum.c 2011-04-17 15:56:46.000000000 -0400
53087@@ -0,0 +1,61 @@
53088+#include <linux/err.h>
53089+#include <linux/kernel.h>
53090+#include <linux/sched.h>
53091+#include <linux/mm.h>
53092+#include <linux/scatterlist.h>
53093+#include <linux/crypto.h>
53094+#include <linux/gracl.h>
53095+
53096+
53097+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
53098+#error "crypto and sha256 must be built into the kernel"
53099+#endif
53100+
53101+int
53102+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
53103+{
53104+ char *p;
53105+ struct crypto_hash *tfm;
53106+ struct hash_desc desc;
53107+ struct scatterlist sg;
53108+ unsigned char temp_sum[GR_SHA_LEN];
53109+ volatile int retval = 0;
53110+ volatile int dummy = 0;
53111+ unsigned int i;
53112+
53113+ sg_init_table(&sg, 1);
53114+
53115+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
53116+ if (IS_ERR(tfm)) {
53117+ /* should never happen, since sha256 should be built in */
53118+ return 1;
53119+ }
53120+
53121+ desc.tfm = tfm;
53122+ desc.flags = 0;
53123+
53124+ crypto_hash_init(&desc);
53125+
53126+ p = salt;
53127+ sg_set_buf(&sg, p, GR_SALT_LEN);
53128+ crypto_hash_update(&desc, &sg, sg.length);
53129+
53130+ p = entry->pw;
53131+ sg_set_buf(&sg, p, strlen(p));
53132+
53133+ crypto_hash_update(&desc, &sg, sg.length);
53134+
53135+ crypto_hash_final(&desc, temp_sum);
53136+
53137+ memset(entry->pw, 0, GR_PW_LEN);
53138+
53139+ for (i = 0; i < GR_SHA_LEN; i++)
53140+ if (sum[i] != temp_sum[i])
53141+ retval = 1;
53142+ else
53143+ dummy = 1; // waste a cycle
53144+
53145+ crypto_free_hash(tfm);
53146+
53147+ return retval;
53148+}
53149diff -urNp linux-2.6.32.42/grsecurity/Kconfig linux-2.6.32.42/grsecurity/Kconfig
53150--- linux-2.6.32.42/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
53151+++ linux-2.6.32.42/grsecurity/Kconfig 2011-07-06 19:57:57.000000000 -0400
53152@@ -0,0 +1,1047 @@
53153+#
53154+# grecurity configuration
53155+#
53156+
53157+menu "Grsecurity"
53158+
53159+config GRKERNSEC
53160+ bool "Grsecurity"
53161+ select CRYPTO
53162+ select CRYPTO_SHA256
53163+ help
53164+ If you say Y here, you will be able to configure many features
53165+ that will enhance the security of your system. It is highly
53166+ recommended that you say Y here and read through the help
53167+ for each option so that you fully understand the features and
53168+ can evaluate their usefulness for your machine.
53169+
53170+choice
53171+ prompt "Security Level"
53172+ depends on GRKERNSEC
53173+ default GRKERNSEC_CUSTOM
53174+
53175+config GRKERNSEC_LOW
53176+ bool "Low"
53177+ select GRKERNSEC_LINK
53178+ select GRKERNSEC_FIFO
53179+ select GRKERNSEC_EXECVE
53180+ select GRKERNSEC_RANDNET
53181+ select GRKERNSEC_DMESG
53182+ select GRKERNSEC_CHROOT
53183+ select GRKERNSEC_CHROOT_CHDIR
53184+
53185+ help
53186+ If you choose this option, several of the grsecurity options will
53187+ be enabled that will give you greater protection against a number
53188+ of attacks, while assuring that none of your software will have any
53189+ conflicts with the additional security measures. If you run a lot
53190+ of unusual software, or you are having problems with the higher
53191+ security levels, you should say Y here. With this option, the
53192+ following features are enabled:
53193+
53194+ - Linking restrictions
53195+ - FIFO restrictions
53196+ - Enforcing RLIMIT_NPROC on execve
53197+ - Restricted dmesg
53198+ - Enforced chdir("/") on chroot
53199+ - Runtime module disabling
53200+
53201+config GRKERNSEC_MEDIUM
53202+ bool "Medium"
53203+ select PAX
53204+ select PAX_EI_PAX
53205+ select PAX_PT_PAX_FLAGS
53206+ select PAX_HAVE_ACL_FLAGS
53207+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
53208+ select GRKERNSEC_CHROOT
53209+ select GRKERNSEC_CHROOT_SYSCTL
53210+ select GRKERNSEC_LINK
53211+ select GRKERNSEC_FIFO
53212+ select GRKERNSEC_EXECVE
53213+ select GRKERNSEC_DMESG
53214+ select GRKERNSEC_RANDNET
53215+ select GRKERNSEC_FORKFAIL
53216+ select GRKERNSEC_TIME
53217+ select GRKERNSEC_SIGNAL
53218+ select GRKERNSEC_CHROOT
53219+ select GRKERNSEC_CHROOT_UNIX
53220+ select GRKERNSEC_CHROOT_MOUNT
53221+ select GRKERNSEC_CHROOT_PIVOT
53222+ select GRKERNSEC_CHROOT_DOUBLE
53223+ select GRKERNSEC_CHROOT_CHDIR
53224+ select GRKERNSEC_CHROOT_MKNOD
53225+ select GRKERNSEC_PROC
53226+ select GRKERNSEC_PROC_USERGROUP
53227+ select PAX_RANDUSTACK
53228+ select PAX_ASLR
53229+ select PAX_RANDMMAP
53230+ select PAX_REFCOUNT if (X86 || SPARC64)
53231+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
53232+
53233+ help
53234+ If you say Y here, several features in addition to those included
53235+ in the low additional security level will be enabled. These
53236+ features provide even more security to your system, though in rare
53237+ cases they may be incompatible with very old or poorly written
53238+ software. If you enable this option, make sure that your auth
53239+ service (identd) is running as gid 1001. With this option,
53240+ the following features (in addition to those provided in the
53241+ low additional security level) will be enabled:
53242+
53243+ - Failed fork logging
53244+ - Time change logging
53245+ - Signal logging
53246+ - Deny mounts in chroot
53247+ - Deny double chrooting
53248+ - Deny sysctl writes in chroot
53249+ - Deny mknod in chroot
53250+ - Deny access to abstract AF_UNIX sockets out of chroot
53251+ - Deny pivot_root in chroot
53252+ - Denied writes of /dev/kmem, /dev/mem, and /dev/port
53253+ - /proc restrictions with special GID set to 10 (usually wheel)
53254+ - Address Space Layout Randomization (ASLR)
53255+ - Prevent exploitation of most refcount overflows
53256+ - Bounds checking of copying between the kernel and userland
53257+
53258+config GRKERNSEC_HIGH
53259+ bool "High"
53260+ select GRKERNSEC_LINK
53261+ select GRKERNSEC_FIFO
53262+ select GRKERNSEC_EXECVE
53263+ select GRKERNSEC_DMESG
53264+ select GRKERNSEC_FORKFAIL
53265+ select GRKERNSEC_TIME
53266+ select GRKERNSEC_SIGNAL
53267+ select GRKERNSEC_CHROOT
53268+ select GRKERNSEC_CHROOT_SHMAT
53269+ select GRKERNSEC_CHROOT_UNIX
53270+ select GRKERNSEC_CHROOT_MOUNT
53271+ select GRKERNSEC_CHROOT_FCHDIR
53272+ select GRKERNSEC_CHROOT_PIVOT
53273+ select GRKERNSEC_CHROOT_DOUBLE
53274+ select GRKERNSEC_CHROOT_CHDIR
53275+ select GRKERNSEC_CHROOT_MKNOD
53276+ select GRKERNSEC_CHROOT_CAPS
53277+ select GRKERNSEC_CHROOT_SYSCTL
53278+ select GRKERNSEC_CHROOT_FINDTASK
53279+ select GRKERNSEC_SYSFS_RESTRICT
53280+ select GRKERNSEC_PROC
53281+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
53282+ select GRKERNSEC_HIDESYM
53283+ select GRKERNSEC_BRUTE
53284+ select GRKERNSEC_PROC_USERGROUP
53285+ select GRKERNSEC_KMEM
53286+ select GRKERNSEC_RESLOG
53287+ select GRKERNSEC_RANDNET
53288+ select GRKERNSEC_PROC_ADD
53289+ select GRKERNSEC_CHROOT_CHMOD
53290+ select GRKERNSEC_CHROOT_NICE
53291+ select GRKERNSEC_AUDIT_MOUNT
53292+ select GRKERNSEC_MODHARDEN if (MODULES)
53293+ select GRKERNSEC_HARDEN_PTRACE
53294+ select GRKERNSEC_VM86 if (X86_32)
53295+ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
53296+ select PAX
53297+ select PAX_RANDUSTACK
53298+ select PAX_ASLR
53299+ select PAX_RANDMMAP
53300+ select PAX_NOEXEC
53301+ select PAX_MPROTECT
53302+ select PAX_EI_PAX
53303+ select PAX_PT_PAX_FLAGS
53304+ select PAX_HAVE_ACL_FLAGS
53305+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
53306+ select PAX_MEMORY_UDEREF if (X86 && !XEN)
53307+ select PAX_RANDKSTACK if (X86_TSC && X86)
53308+ select PAX_SEGMEXEC if (X86_32)
53309+ select PAX_PAGEEXEC
53310+ select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
53311+ select PAX_EMUTRAMP if (PARISC)
53312+ select PAX_EMUSIGRT if (PARISC)
53313+ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
53314+ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
53315+ select PAX_REFCOUNT if (X86 || SPARC64)
53316+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
53317+ help
53318+ If you say Y here, many of the features of grsecurity will be
53319+ enabled, which will protect you against many kinds of attacks
53320+ against your system. The heightened security comes at a cost
53321+ of an increased chance of incompatibilities with rare software
53322+ on your machine. Since this security level enables PaX, you should
53323+ view <http://pax.grsecurity.net> and read about the PaX
53324+ project. While you are there, download chpax and run it on
53325+ binaries that cause problems with PaX. Also remember that
53326+ since the /proc restrictions are enabled, you must run your
53327+ identd as gid 1001. This security level enables the following
53328+ features in addition to those listed in the low and medium
53329+ security levels:
53330+
53331+ - Additional /proc restrictions
53332+ - Chmod restrictions in chroot
53333+ - No signals, ptrace, or viewing of processes outside of chroot
53334+ - Capability restrictions in chroot
53335+ - Deny fchdir out of chroot
53336+ - Priority restrictions in chroot
53337+ - Segmentation-based implementation of PaX
53338+ - Mprotect restrictions
53339+ - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
53340+ - Kernel stack randomization
53341+ - Mount/unmount/remount logging
53342+ - Kernel symbol hiding
53343+ - Prevention of memory exhaustion-based exploits
53344+ - Hardening of module auto-loading
53345+ - Ptrace restrictions
53346+ - Restricted vm86 mode
53347+ - Restricted sysfs/debugfs
53348+ - Active kernel exploit response
53349+
53350+config GRKERNSEC_CUSTOM
53351+ bool "Custom"
53352+ help
53353+ If you say Y here, you will be able to configure every grsecurity
53354+ option, which allows you to enable many more features that aren't
53355+ covered in the basic security levels. These additional features
53356+ include TPE, socket restrictions, and the sysctl system for
53357+ grsecurity. It is advised that you read through the help for
53358+ each option to determine its usefulness in your situation.
53359+
53360+endchoice
53361+
53362+menu "Address Space Protection"
53363+depends on GRKERNSEC
53364+
53365+config GRKERNSEC_KMEM
53366+ bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
53367+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
53368+ help
53369+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
53370+ be written to via mmap or otherwise to modify the running kernel.
53371+ /dev/port will also not be allowed to be opened. If you have module
53372+ support disabled, enabling this will close up four ways that are
53373+ currently used to insert malicious code into the running kernel.
53374+ Even with all these features enabled, we still highly recommend that
53375+ you use the RBAC system, as it is still possible for an attacker to
53376+ modify the running kernel through privileged I/O granted by ioperm/iopl.
53377+ If you are not using XFree86, you may be able to stop this additional
53378+ case by enabling the 'Disable privileged I/O' option. Though nothing
53379+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
53380+ but only to video memory, which is the only writing we allow in this
53381+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
53382+ not be allowed to mprotect it with PROT_WRITE later.
53383+ It is highly recommended that you say Y here if you meet all the
53384+ conditions above.
53385+
53386+config GRKERNSEC_VM86
53387+ bool "Restrict VM86 mode"
53388+ depends on X86_32
53389+
53390+ help
53391+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
53392+ make use of a special execution mode on 32bit x86 processors called
53393+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
53394+ video cards and will still work with this option enabled. The purpose
53395+ of the option is to prevent exploitation of emulation errors in
53396+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
53397+ Nearly all users should be able to enable this option.
53398+
53399+config GRKERNSEC_IO
53400+ bool "Disable privileged I/O"
53401+ depends on X86
53402+ select RTC_CLASS
53403+ select RTC_INTF_DEV
53404+ select RTC_DRV_CMOS
53405+
53406+ help
53407+ If you say Y here, all ioperm and iopl calls will return an error.
53408+ Ioperm and iopl can be used to modify the running kernel.
53409+ Unfortunately, some programs need this access to operate properly,
53410+ the most notable of which are XFree86 and hwclock. hwclock can be
53411+ remedied by having RTC support in the kernel, so real-time
53412+ clock support is enabled if this option is enabled, to ensure
53413+ that hwclock operates correctly. XFree86 still will not
53414+ operate correctly with this option enabled, so DO NOT CHOOSE Y
53415+ IF YOU USE XFree86. If you use XFree86 and you still want to
53416+ protect your kernel against modification, use the RBAC system.
53417+
53418+config GRKERNSEC_PROC_MEMMAP
53419+ bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
53420+ default y if (PAX_NOEXEC || PAX_ASLR)
53421+ depends on PAX_NOEXEC || PAX_ASLR
53422+ help
53423+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
53424+ give no information about the addresses of its mappings if
53425+ PaX features that rely on random addresses are enabled on the task.
53426+ If you use PaX it is greatly recommended that you say Y here as it
53427+ closes up a hole that makes the full ASLR useless for suid
53428+ binaries.
53429+
53430+config GRKERNSEC_BRUTE
53431+ bool "Deter exploit bruteforcing"
53432+ help
53433+ If you say Y here, attempts to bruteforce exploits against forking
53434+ daemons such as apache or sshd, as well as against suid/sgid binaries
53435+ will be deterred. When a child of a forking daemon is killed by PaX
53436+ or crashes due to an illegal instruction or other suspicious signal,
53437+ the parent process will be delayed 30 seconds upon every subsequent
53438+ fork until the administrator is able to assess the situation and
53439+ restart the daemon.
53440+ In the suid/sgid case, the attempt is logged, the user has all their
53441+ processes terminated, and they are prevented from executing any further
53442+ processes for 15 minutes.
53443+ It is recommended that you also enable signal logging in the auditing
53444+ section so that logs are generated when a process triggers a suspicious
53445+ signal.
53446+ If the sysctl option is enabled, a sysctl option with name
53447+ "deter_bruteforce" is created.
53448+
53449+config GRKERNSEC_MODHARDEN
53450+ bool "Harden module auto-loading"
53451+ depends on MODULES
53452+ help
53453+ If you say Y here, module auto-loading in response to use of some
53454+ feature implemented by an unloaded module will be restricted to
53455+ root users. Enabling this option helps defend against attacks
53456+ by unprivileged users who abuse the auto-loading behavior to
53457+ cause a vulnerable module to load that is then exploited.
53458+
53459+ If this option prevents a legitimate use of auto-loading for a
53460+ non-root user, the administrator can execute modprobe manually
53461+ with the exact name of the module mentioned in the alert log.
53462+ Alternatively, the administrator can add the module to the list
53463+ of modules loaded at boot by modifying init scripts.
53464+
53465+ Modification of init scripts will most likely be needed on
53466+ Ubuntu servers with encrypted home directory support enabled,
53467+ as the first non-root user logging in will cause the ecb(aes),
53468+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
53469+
53470+config GRKERNSEC_HIDESYM
53471+ bool "Hide kernel symbols"
53472+ help
53473+ If you say Y here, getting information on loaded modules, and
53474+ displaying all kernel symbols through a syscall will be restricted
53475+ to users with CAP_SYS_MODULE. For software compatibility reasons,
53476+ /proc/kallsyms will be restricted to the root user. The RBAC
53477+ system can hide that entry even from root.
53478+
53479+ This option also prevents leaking of kernel addresses through
53480+ several /proc entries.
53481+
53482+ Note that this option is only effective provided the following
53483+ conditions are met:
53484+ 1) The kernel using grsecurity is not precompiled by some distribution
53485+ 2) You have also enabled GRKERNSEC_DMESG
53486+ 3) You are using the RBAC system and hiding other files such as your
53487+ kernel image and System.map. Alternatively, enabling this option
53488+ causes the permissions on /boot, /lib/modules, and the kernel
53489+ source directory to change at compile time to prevent
53490+ reading by non-root users.
53491+ If the above conditions are met, this option will aid in providing a
53492+ useful protection against local kernel exploitation of overflows
53493+ and arbitrary read/write vulnerabilities.
53494+
53495+config GRKERNSEC_KERN_LOCKOUT
53496+ bool "Active kernel exploit response"
53497+ depends on X86 || ARM || PPC || SPARC
53498+ help
53499+ If you say Y here, when a PaX alert is triggered due to suspicious
53500+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
53501+ or an OOPs occurs due to bad memory accesses, instead of just
53502+ terminating the offending process (and potentially allowing
53503+ a subsequent exploit from the same user), we will take one of two
53504+ actions:
53505+ If the user was root, we will panic the system
53506+ If the user was non-root, we will log the attempt, terminate
53507+ all processes owned by the user, then prevent them from creating
53508+ any new processes until the system is restarted
53509+ This deters repeated kernel exploitation/bruteforcing attempts
53510+ and is useful for later forensics.
53511+
53512+endmenu
53513+menu "Role Based Access Control Options"
53514+depends on GRKERNSEC
53515+
53516+config GRKERNSEC_RBAC_DEBUG
53517+ bool
53518+
53519+config GRKERNSEC_NO_RBAC
53520+ bool "Disable RBAC system"
53521+ help
53522+ If you say Y here, the /dev/grsec device will be removed from the kernel,
53523+ preventing the RBAC system from being enabled. You should only say Y
53524+ here if you have no intention of using the RBAC system, so as to prevent
53525+ an attacker with root access from misusing the RBAC system to hide files
53526+ and processes when loadable module support and /dev/[k]mem have been
53527+ locked down.
53528+
53529+config GRKERNSEC_ACL_HIDEKERN
53530+ bool "Hide kernel processes"
53531+ help
53532+ If you say Y here, all kernel threads will be hidden to all
53533+ processes but those whose subject has the "view hidden processes"
53534+ flag.
53535+
53536+config GRKERNSEC_ACL_MAXTRIES
53537+ int "Maximum tries before password lockout"
53538+ default 3
53539+ help
53540+ This option enforces the maximum number of times a user can attempt
53541+ to authorize themselves with the grsecurity RBAC system before being
53542+ denied the ability to attempt authorization again for a specified time.
53543+ The lower the number, the harder it will be to brute-force a password.
53544+
53545+config GRKERNSEC_ACL_TIMEOUT
53546+ int "Time to wait after max password tries, in seconds"
53547+ default 30
53548+ help
53549+ This option specifies the time the user must wait after attempting to
53550+ authorize to the RBAC system with the maximum number of invalid
53551+ passwords. The higher the number, the harder it will be to brute-force
53552+ a password.
53553+
53554+endmenu
53555+menu "Filesystem Protections"
53556+depends on GRKERNSEC
53557+
53558+config GRKERNSEC_PROC
53559+ bool "Proc restrictions"
53560+ help
53561+ If you say Y here, the permissions of the /proc filesystem
53562+ will be altered to enhance system security and privacy. You MUST
53563+ choose either a user only restriction or a user and group restriction.
53564+ Depending upon the option you choose, you can either restrict users to
53565+ see only the processes they themselves run, or choose a group that can
53566+ view all processes and files normally restricted to root if you choose
53567+ the "restrict to user only" option. NOTE: If you're running identd as
53568+ a non-root user, you will have to run it as the group you specify here.
53569+
53570+config GRKERNSEC_PROC_USER
53571+ bool "Restrict /proc to user only"
53572+ depends on GRKERNSEC_PROC
53573+ help
53574+ If you say Y here, non-root users will only be able to view their own
53575+ processes, and restricts them from viewing network-related information,
53576+ and viewing kernel symbol and module information.
53577+
53578+config GRKERNSEC_PROC_USERGROUP
53579+ bool "Allow special group"
53580+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
53581+ help
53582+ If you say Y here, you will be able to select a group that will be
53583+ able to view all processes and network-related information. If you've
53584+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
53585+ remain hidden. This option is useful if you want to run identd as
53586+ a non-root user.
53587+
53588+config GRKERNSEC_PROC_GID
53589+ int "GID for special group"
53590+ depends on GRKERNSEC_PROC_USERGROUP
53591+ default 1001
53592+
53593+config GRKERNSEC_PROC_ADD
53594+ bool "Additional restrictions"
53595+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
53596+ help
53597+ If you say Y here, additional restrictions will be placed on
53598+ /proc that keep normal users from viewing device information and
53599+ slabinfo information that could be useful for exploits.
53600+
53601+config GRKERNSEC_LINK
53602+ bool "Linking restrictions"
53603+ help
53604+ If you say Y here, /tmp race exploits will be prevented, since users
53605+ will no longer be able to follow symlinks owned by other users in
53606+ world-writable +t directories (e.g. /tmp), unless the owner of the
53607+ symlink is the owner of the directory. users will also not be
53608+ able to hardlink to files they do not own. If the sysctl option is
53609+ enabled, a sysctl option with name "linking_restrictions" is created.
53610+
53611+config GRKERNSEC_FIFO
53612+ bool "FIFO restrictions"
53613+ help
53614+ If you say Y here, users will not be able to write to FIFOs they don't
53615+ own in world-writable +t directories (e.g. /tmp), unless the owner of
53616+ the FIFO is the same owner of the directory it's held in. If the sysctl
53617+ option is enabled, a sysctl option with name "fifo_restrictions" is
53618+ created.
53619+
53620+config GRKERNSEC_SYSFS_RESTRICT
53621+ bool "Sysfs/debugfs restriction"
53622+ depends on SYSFS
53623+ help
53624+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
53625+ any filesystem normally mounted under it (e.g. debugfs) will only
53626+ be accessible by root. These filesystems generally provide access
53627+ to hardware and debug information that isn't appropriate for unprivileged
53628+ users of the system. Sysfs and debugfs have also become a large source
53629+ of new vulnerabilities, ranging from infoleaks to local compromise.
53630+ There has been very little oversight with an eye toward security involved
53631+ in adding new exporters of information to these filesystems, so their
53632+ use is discouraged.
53633+ This option is equivalent to a chmod 0700 of the mount paths.
53634+
53635+config GRKERNSEC_ROFS
53636+ bool "Runtime read-only mount protection"
53637+ help
53638+ If you say Y here, a sysctl option with name "romount_protect" will
53639+ be created. By setting this option to 1 at runtime, filesystems
53640+ will be protected in the following ways:
53641+ * No new writable mounts will be allowed
53642+ * Existing read-only mounts won't be able to be remounted read/write
53643+ * Write operations will be denied on all block devices
53644+ This option acts independently of grsec_lock: once it is set to 1,
53645+ it cannot be turned off. Therefore, please be mindful of the resulting
53646+ behavior if this option is enabled in an init script on a read-only
53647+ filesystem. This feature is mainly intended for secure embedded systems.
53648+
53649+config GRKERNSEC_CHROOT
53650+ bool "Chroot jail restrictions"
53651+ help
53652+ If you say Y here, you will be able to choose several options that will
53653+ make breaking out of a chrooted jail much more difficult. If you
53654+ encounter no software incompatibilities with the following options, it
53655+ is recommended that you enable each one.
53656+
53657+config GRKERNSEC_CHROOT_MOUNT
53658+ bool "Deny mounts"
53659+ depends on GRKERNSEC_CHROOT
53660+ help
53661+ If you say Y here, processes inside a chroot will not be able to
53662+ mount or remount filesystems. If the sysctl option is enabled, a
53663+ sysctl option with name "chroot_deny_mount" is created.
53664+
53665+config GRKERNSEC_CHROOT_DOUBLE
53666+ bool "Deny double-chroots"
53667+ depends on GRKERNSEC_CHROOT
53668+ help
53669+ If you say Y here, processes inside a chroot will not be able to chroot
53670+ again outside the chroot. This is a widely used method of breaking
53671+ out of a chroot jail and should not be allowed. If the sysctl
53672+ option is enabled, a sysctl option with name
53673+ "chroot_deny_chroot" is created.
53674+
53675+config GRKERNSEC_CHROOT_PIVOT
53676+ bool "Deny pivot_root in chroot"
53677+ depends on GRKERNSEC_CHROOT
53678+ help
53679+ If you say Y here, processes inside a chroot will not be able to use
53680+ a function called pivot_root() that was introduced in Linux 2.3.41. It
53681+ works similar to chroot in that it changes the root filesystem. This
53682+ function could be misused in a chrooted process to attempt to break out
53683+ of the chroot, and therefore should not be allowed. If the sysctl
53684+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
53685+ created.
53686+
53687+config GRKERNSEC_CHROOT_CHDIR
53688+ bool "Enforce chdir(\"/\") on all chroots"
53689+ depends on GRKERNSEC_CHROOT
53690+ help
53691+ If you say Y here, the current working directory of all newly-chrooted
53692+ applications will be set to the the root directory of the chroot.
53693+ The man page on chroot(2) states:
53694+ Note that this call does not change the current working
53695+ directory, so that `.' can be outside the tree rooted at
53696+ `/'. In particular, the super-user can escape from a
53697+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
53698+
53699+ It is recommended that you say Y here, since it's not known to break
53700+ any software. If the sysctl option is enabled, a sysctl option with
53701+ name "chroot_enforce_chdir" is created.
53702+
53703+config GRKERNSEC_CHROOT_CHMOD
53704+ bool "Deny (f)chmod +s"
53705+ depends on GRKERNSEC_CHROOT
53706+ help
53707+ If you say Y here, processes inside a chroot will not be able to chmod
53708+ or fchmod files to make them have suid or sgid bits. This protects
53709+ against another published method of breaking a chroot. If the sysctl
53710+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
53711+ created.
53712+
53713+config GRKERNSEC_CHROOT_FCHDIR
53714+ bool "Deny fchdir out of chroot"
53715+ depends on GRKERNSEC_CHROOT
53716+ help
53717+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
53718+ to a file descriptor of the chrooting process that points to a directory
53719+ outside the filesystem will be stopped. If the sysctl option
53720+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
53721+
53722+config GRKERNSEC_CHROOT_MKNOD
53723+ bool "Deny mknod"
53724+ depends on GRKERNSEC_CHROOT
53725+ help
53726+ If you say Y here, processes inside a chroot will not be allowed to
53727+ mknod. The problem with using mknod inside a chroot is that it
53728+ would allow an attacker to create a device entry that is the same
53729+ as one on the physical root of your system, which could range from
53730+ anything from the console device to a device for your harddrive (which
53731+ they could then use to wipe the drive or steal data). It is recommended
53732+ that you say Y here, unless you run into software incompatibilities.
53733+ If the sysctl option is enabled, a sysctl option with name
53734+ "chroot_deny_mknod" is created.
53735+
53736+config GRKERNSEC_CHROOT_SHMAT
53737+ bool "Deny shmat() out of chroot"
53738+ depends on GRKERNSEC_CHROOT
53739+ help
53740+ If you say Y here, processes inside a chroot will not be able to attach
53741+ to shared memory segments that were created outside of the chroot jail.
53742+ It is recommended that you say Y here. If the sysctl option is enabled,
53743+ a sysctl option with name "chroot_deny_shmat" is created.
53744+
53745+config GRKERNSEC_CHROOT_UNIX
53746+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
53747+ depends on GRKERNSEC_CHROOT
53748+ help
53749+ If you say Y here, processes inside a chroot will not be able to
53750+ connect to abstract (meaning not belonging to a filesystem) Unix
53751+ domain sockets that were bound outside of a chroot. It is recommended
53752+ that you say Y here. If the sysctl option is enabled, a sysctl option
53753+ with name "chroot_deny_unix" is created.
53754+
53755+config GRKERNSEC_CHROOT_FINDTASK
53756+ bool "Protect outside processes"
53757+ depends on GRKERNSEC_CHROOT
53758+ help
53759+ If you say Y here, processes inside a chroot will not be able to
53760+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
53761+ getsid, or view any process outside of the chroot. If the sysctl
53762+ option is enabled, a sysctl option with name "chroot_findtask" is
53763+ created.
53764+
53765+config GRKERNSEC_CHROOT_NICE
53766+ bool "Restrict priority changes"
53767+ depends on GRKERNSEC_CHROOT
53768+ help
53769+ If you say Y here, processes inside a chroot will not be able to raise
53770+ the priority of processes in the chroot, or alter the priority of
53771+ processes outside the chroot. This provides more security than simply
53772+ removing CAP_SYS_NICE from the process' capability set. If the
53773+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
53774+ is created.
53775+
53776+config GRKERNSEC_CHROOT_SYSCTL
53777+ bool "Deny sysctl writes"
53778+ depends on GRKERNSEC_CHROOT
53779+ help
53780+ If you say Y here, an attacker in a chroot will not be able to
53781+ write to sysctl entries, either by sysctl(2) or through a /proc
53782+ interface. It is strongly recommended that you say Y here. If the
53783+ sysctl option is enabled, a sysctl option with name
53784+ "chroot_deny_sysctl" is created.
53785+
53786+config GRKERNSEC_CHROOT_CAPS
53787+ bool "Capability restrictions"
53788+ depends on GRKERNSEC_CHROOT
53789+ help
53790+ If you say Y here, the capabilities on all root processes within a
53791+ chroot jail will be lowered to stop module insertion, raw i/o,
53792+ system and net admin tasks, rebooting the system, modifying immutable
53793+ files, modifying IPC owned by another, and changing the system time.
53794+ This is left an option because it can break some apps. Disable this
53795+ if your chrooted apps are having problems performing those kinds of
53796+ tasks. If the sysctl option is enabled, a sysctl option with
53797+ name "chroot_caps" is created.
53798+
53799+endmenu
53800+menu "Kernel Auditing"
53801+depends on GRKERNSEC
53802+
53803+config GRKERNSEC_AUDIT_GROUP
53804+ bool "Single group for auditing"
53805+ help
53806+ If you say Y here, the exec, chdir, and (un)mount logging features
53807+ will only operate on a group you specify. This option is recommended
53808+ if you only want to watch certain users instead of having a large
53809+ amount of logs from the entire system. If the sysctl option is enabled,
53810+ a sysctl option with name "audit_group" is created.
53811+
53812+config GRKERNSEC_AUDIT_GID
53813+ int "GID for auditing"
53814+ depends on GRKERNSEC_AUDIT_GROUP
53815+ default 1007
53816+
53817+config GRKERNSEC_EXECLOG
53818+ bool "Exec logging"
53819+ help
53820+ If you say Y here, all execve() calls will be logged (since the
53821+ other exec*() calls are frontends to execve(), all execution
53822+ will be logged). Useful for shell-servers that like to keep track
53823+ of their users. If the sysctl option is enabled, a sysctl option with
53824+ name "exec_logging" is created.
53825+ WARNING: This option when enabled will produce a LOT of logs, especially
53826+ on an active system.
53827+
53828+config GRKERNSEC_RESLOG
53829+ bool "Resource logging"
53830+ help
53831+ If you say Y here, all attempts to overstep resource limits will
53832+ be logged with the resource name, the requested size, and the current
53833+ limit. It is highly recommended that you say Y here. If the sysctl
53834+ option is enabled, a sysctl option with name "resource_logging" is
53835+ created. If the RBAC system is enabled, the sysctl value is ignored.
53836+
53837+config GRKERNSEC_CHROOT_EXECLOG
53838+ bool "Log execs within chroot"
53839+ help
53840+ If you say Y here, all executions inside a chroot jail will be logged
53841+ to syslog. This can cause a large amount of logs if certain
53842+ applications (eg. djb's daemontools) are installed on the system, and
53843+ is therefore left as an option. If the sysctl option is enabled, a
53844+ sysctl option with name "chroot_execlog" is created.
53845+
53846+config GRKERNSEC_AUDIT_PTRACE
53847+ bool "Ptrace logging"
53848+ help
53849+ If you say Y here, all attempts to attach to a process via ptrace
53850+ will be logged. If the sysctl option is enabled, a sysctl option
53851+ with name "audit_ptrace" is created.
53852+
53853+config GRKERNSEC_AUDIT_CHDIR
53854+ bool "Chdir logging"
53855+ help
53856+ If you say Y here, all chdir() calls will be logged. If the sysctl
53857+ option is enabled, a sysctl option with name "audit_chdir" is created.
53858+
53859+config GRKERNSEC_AUDIT_MOUNT
53860+ bool "(Un)Mount logging"
53861+ help
53862+ If you say Y here, all mounts and unmounts will be logged. If the
53863+ sysctl option is enabled, a sysctl option with name "audit_mount" is
53864+ created.
53865+
53866+config GRKERNSEC_SIGNAL
53867+ bool "Signal logging"
53868+ help
53869+ If you say Y here, certain important signals will be logged, such as
53870+ SIGSEGV, which will as a result inform you of when a error in a program
53871+ occurred, which in some cases could mean a possible exploit attempt.
53872+ If the sysctl option is enabled, a sysctl option with name
53873+ "signal_logging" is created.
53874+
53875+config GRKERNSEC_FORKFAIL
53876+ bool "Fork failure logging"
53877+ help
53878+ If you say Y here, all failed fork() attempts will be logged.
53879+ This could suggest a fork bomb, or someone attempting to overstep
53880+ their process limit. If the sysctl option is enabled, a sysctl option
53881+ with name "forkfail_logging" is created.
53882+
53883+config GRKERNSEC_TIME
53884+ bool "Time change logging"
53885+ help
53886+ If you say Y here, any changes of the system clock will be logged.
53887+ If the sysctl option is enabled, a sysctl option with name
53888+ "timechange_logging" is created.
53889+
53890+config GRKERNSEC_PROC_IPADDR
53891+ bool "/proc/<pid>/ipaddr support"
53892+ help
53893+ If you say Y here, a new entry will be added to each /proc/<pid>
53894+ directory that contains the IP address of the person using the task.
53895+ The IP is carried across local TCP and AF_UNIX stream sockets.
53896+ This information can be useful for IDS/IPSes to perform remote response
53897+ to a local attack. The entry is readable by only the owner of the
53898+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
53899+ the RBAC system), and thus does not create privacy concerns.
53900+
53901+config GRKERNSEC_RWXMAP_LOG
53902+ bool 'Denied RWX mmap/mprotect logging'
53903+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
53904+ help
53905+ If you say Y here, calls to mmap() and mprotect() with explicit
53906+ usage of PROT_WRITE and PROT_EXEC together will be logged when
53907+ denied by the PAX_MPROTECT feature. If the sysctl option is
53908+ enabled, a sysctl option with name "rwxmap_logging" is created.
53909+
53910+config GRKERNSEC_AUDIT_TEXTREL
53911+ bool 'ELF text relocations logging (READ HELP)'
53912+ depends on PAX_MPROTECT
53913+ help
53914+ If you say Y here, text relocations will be logged with the filename
53915+ of the offending library or binary. The purpose of the feature is
53916+ to help Linux distribution developers get rid of libraries and
53917+ binaries that need text relocations which hinder the future progress
53918+ of PaX. Only Linux distribution developers should say Y here, and
53919+ never on a production machine, as this option creates an information
53920+ leak that could aid an attacker in defeating the randomization of
53921+ a single memory region. If the sysctl option is enabled, a sysctl
53922+ option with name "audit_textrel" is created.
53923+
53924+endmenu
53925+
53926+menu "Executable Protections"
53927+depends on GRKERNSEC
53928+
53929+config GRKERNSEC_EXECVE
53930+ bool "Enforce RLIMIT_NPROC on execs"
53931+ help
53932+ If you say Y here, users with a resource limit on processes will
53933+ have the value checked during execve() calls. The current system
53934+ only checks the system limit during fork() calls. If the sysctl option
53935+ is enabled, a sysctl option with name "execve_limiting" is created.
53936+
53937+config GRKERNSEC_DMESG
53938+ bool "Dmesg(8) restriction"
53939+ help
53940+ If you say Y here, non-root users will not be able to use dmesg(8)
53941+ to view up to the last 4kb of messages in the kernel's log buffer.
53942+ The kernel's log buffer often contains kernel addresses and other
53943+ identifying information useful to an attacker in fingerprinting a
53944+ system for a targeted exploit.
53945+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
53946+ created.
53947+
53948+config GRKERNSEC_HARDEN_PTRACE
53949+ bool "Deter ptrace-based process snooping"
53950+ help
53951+ If you say Y here, TTY sniffers and other malicious monitoring
53952+ programs implemented through ptrace will be defeated. If you
53953+ have been using the RBAC system, this option has already been
53954+ enabled for several years for all users, with the ability to make
53955+ fine-grained exceptions.
53956+
53957+ This option only affects the ability of non-root users to ptrace
53958+ processes that are not a descendent of the ptracing process.
53959+ This means that strace ./binary and gdb ./binary will still work,
53960+ but attaching to arbitrary processes will not. If the sysctl
53961+ option is enabled, a sysctl option with name "harden_ptrace" is
53962+ created.
53963+
53964+config GRKERNSEC_TPE
53965+ bool "Trusted Path Execution (TPE)"
53966+ help
53967+ If you say Y here, you will be able to choose a gid to add to the
53968+ supplementary groups of users you want to mark as "untrusted."
53969+ These users will not be able to execute any files that are not in
53970+ root-owned directories writable only by root. If the sysctl option
53971+ is enabled, a sysctl option with name "tpe" is created.
53972+
53973+config GRKERNSEC_TPE_ALL
53974+ bool "Partially restrict all non-root users"
53975+ depends on GRKERNSEC_TPE
53976+ help
53977+ If you say Y here, all non-root users will be covered under
53978+ a weaker TPE restriction. This is separate from, and in addition to,
53979+ the main TPE options that you have selected elsewhere. Thus, if a
53980+ "trusted" GID is chosen, this restriction applies to even that GID.
53981+ Under this restriction, all non-root users will only be allowed to
53982+ execute files in directories they own that are not group or
53983+ world-writable, or in directories owned by root and writable only by
53984+ root. If the sysctl option is enabled, a sysctl option with name
53985+ "tpe_restrict_all" is created.
53986+
53987+config GRKERNSEC_TPE_INVERT
53988+ bool "Invert GID option"
53989+ depends on GRKERNSEC_TPE
53990+ help
53991+ If you say Y here, the group you specify in the TPE configuration will
53992+ decide what group TPE restrictions will be *disabled* for. This
53993+ option is useful if you want TPE restrictions to be applied to most
53994+ users on the system. If the sysctl option is enabled, a sysctl option
53995+ with name "tpe_invert" is created. Unlike other sysctl options, this
53996+ entry will default to on for backward-compatibility.
53997+
53998+config GRKERNSEC_TPE_GID
53999+ int "GID for untrusted users"
54000+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
54001+ default 1005
54002+ help
54003+ Setting this GID determines what group TPE restrictions will be
54004+ *enabled* for. If the sysctl option is enabled, a sysctl option
54005+ with name "tpe_gid" is created.
54006+
54007+config GRKERNSEC_TPE_GID
54008+ int "GID for trusted users"
54009+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
54010+ default 1005
54011+ help
54012+ Setting this GID determines what group TPE restrictions will be
54013+ *disabled* for. If the sysctl option is enabled, a sysctl option
54014+ with name "tpe_gid" is created.
54015+
54016+endmenu
54017+menu "Network Protections"
54018+depends on GRKERNSEC
54019+
54020+config GRKERNSEC_RANDNET
54021+ bool "Larger entropy pools"
54022+ help
54023+ If you say Y here, the entropy pools used for many features of Linux
54024+ and grsecurity will be doubled in size. Since several grsecurity
54025+ features use additional randomness, it is recommended that you say Y
54026+ here. Saying Y here has a similar effect as modifying
54027+ /proc/sys/kernel/random/poolsize.
54028+
54029+config GRKERNSEC_BLACKHOLE
54030+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
54031+ help
54032+ If you say Y here, neither TCP resets nor ICMP
54033+ destination-unreachable packets will be sent in response to packets
54034+ sent to ports for which no associated listening process exists.
54035+ This feature supports both IPV4 and IPV6 and exempts the
54036+ loopback interface from blackholing. Enabling this feature
54037+ makes a host more resilient to DoS attacks and reduces network
54038+ visibility against scanners.
54039+
54040+ The blackhole feature as-implemented is equivalent to the FreeBSD
54041+ blackhole feature, as it prevents RST responses to all packets, not
54042+ just SYNs. Under most application behavior this causes no
54043+ problems, but applications (like haproxy) may not close certain
54044+ connections in a way that cleanly terminates them on the remote
54045+ end, leaving the remote host in LAST_ACK state. Because of this
54046+ side-effect and to prevent intentional LAST_ACK DoSes, this
54047+ feature also adds automatic mitigation against such attacks.
54048+ The mitigation drastically reduces the amount of time a socket
54049+ can spend in LAST_ACK state. If you're using haproxy and not
54050+ all servers it connects to have this option enabled, consider
54051+ disabling this feature on the haproxy host.
54052+
54053+ If the sysctl option is enabled, two sysctl options with names
54054+ "ip_blackhole" and "lastack_retries" will be created.
54055+ While "ip_blackhole" takes the standard zero/non-zero on/off
54056+ toggle, "lastack_retries" uses the same kinds of values as
54057+ "tcp_retries1" and "tcp_retries2". The default value of 4
54058+ prevents a socket from lasting more than 45 seconds in LAST_ACK
54059+ state.
54060+
54061+config GRKERNSEC_SOCKET
54062+ bool "Socket restrictions"
54063+ help
54064+ If you say Y here, you will be able to choose from several options.
54065+ If you assign a GID on your system and add it to the supplementary
54066+ groups of users you want to restrict socket access to, this patch
54067+ will perform up to three things, based on the option(s) you choose.
54068+
54069+config GRKERNSEC_SOCKET_ALL
54070+ bool "Deny any sockets to group"
54071+ depends on GRKERNSEC_SOCKET
54072+ help
54073+ If you say Y here, you will be able to choose a GID of whose users will
54074+ be unable to connect to other hosts from your machine or run server
54075+ applications from your machine. If the sysctl option is enabled, a
54076+ sysctl option with name "socket_all" is created.
54077+
54078+config GRKERNSEC_SOCKET_ALL_GID
54079+ int "GID to deny all sockets for"
54080+ depends on GRKERNSEC_SOCKET_ALL
54081+ default 1004
54082+ help
54083+ Here you can choose the GID to disable socket access for. Remember to
54084+ add the users you want socket access disabled for to the GID
54085+ specified here. If the sysctl option is enabled, a sysctl option
54086+ with name "socket_all_gid" is created.
54087+
54088+config GRKERNSEC_SOCKET_CLIENT
54089+ bool "Deny client sockets to group"
54090+ depends on GRKERNSEC_SOCKET
54091+ help
54092+ If you say Y here, you will be able to choose a GID of whose users will
54093+ be unable to connect to other hosts from your machine, but will be
54094+ able to run servers. If this option is enabled, all users in the group
54095+ you specify will have to use passive mode when initiating ftp transfers
54096+ from the shell on your machine. If the sysctl option is enabled, a
54097+ sysctl option with name "socket_client" is created.
54098+
54099+config GRKERNSEC_SOCKET_CLIENT_GID
54100+ int "GID to deny client sockets for"
54101+ depends on GRKERNSEC_SOCKET_CLIENT
54102+ default 1003
54103+ help
54104+ Here you can choose the GID to disable client socket access for.
54105+ Remember to add the users you want client socket access disabled for to
54106+ the GID specified here. If the sysctl option is enabled, a sysctl
54107+ option with name "socket_client_gid" is created.
54108+
54109+config GRKERNSEC_SOCKET_SERVER
54110+ bool "Deny server sockets to group"
54111+ depends on GRKERNSEC_SOCKET
54112+ help
54113+ If you say Y here, you will be able to choose a GID of whose users will
54114+ be unable to run server applications from your machine. If the sysctl
54115+ option is enabled, a sysctl option with name "socket_server" is created.
54116+
54117+config GRKERNSEC_SOCKET_SERVER_GID
54118+ int "GID to deny server sockets for"
54119+ depends on GRKERNSEC_SOCKET_SERVER
54120+ default 1002
54121+ help
54122+ Here you can choose the GID to disable server socket access for.
54123+ Remember to add the users you want server socket access disabled for to
54124+ the GID specified here. If the sysctl option is enabled, a sysctl
54125+ option with name "socket_server_gid" is created.
54126+
54127+endmenu
54128+menu "Sysctl support"
54129+depends on GRKERNSEC && SYSCTL
54130+
54131+config GRKERNSEC_SYSCTL
54132+ bool "Sysctl support"
54133+ help
54134+ If you say Y here, you will be able to change the options that
54135+ grsecurity runs with at bootup, without having to recompile your
54136+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
54137+ to enable (1) or disable (0) various features. All the sysctl entries
54138+ are mutable until the "grsec_lock" entry is set to a non-zero value.
54139+ All features enabled in the kernel configuration are disabled at boot
54140+ if you do not say Y to the "Turn on features by default" option.
54141+ All options should be set at startup, and the grsec_lock entry should
54142+ be set to a non-zero value after all the options are set.
54143+ *THIS IS EXTREMELY IMPORTANT*
54144+
54145+config GRKERNSEC_SYSCTL_DISTRO
54146+ bool "Extra sysctl support for distro makers (READ HELP)"
54147+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
54148+ help
54149+ If you say Y here, additional sysctl options will be created
54150+ for features that affect processes running as root. Therefore,
54151+ it is critical when using this option that the grsec_lock entry be
54152+ enabled after boot. Only distros with prebuilt kernel packages
54153+ with this option enabled that can ensure grsec_lock is enabled
54154+ after boot should use this option.
54155+ *Failure to set grsec_lock after boot makes all grsec features
54156+ this option covers useless*
54157+
54158+ Currently this option creates the following sysctl entries:
54159+ "Disable Privileged I/O": "disable_priv_io"
54160+
54161+config GRKERNSEC_SYSCTL_ON
54162+ bool "Turn on features by default"
54163+ depends on GRKERNSEC_SYSCTL
54164+ help
54165+ If you say Y here, instead of having all features enabled in the
54166+ kernel configuration disabled at boot time, the features will be
54167+ enabled at boot time. It is recommended you say Y here unless
54168+ there is some reason you would want all sysctl-tunable features to
54169+ be disabled by default. As mentioned elsewhere, it is important
54170+ to enable the grsec_lock entry once you have finished modifying
54171+ the sysctl entries.
54172+
54173+endmenu
54174+menu "Logging Options"
54175+depends on GRKERNSEC
54176+
54177+config GRKERNSEC_FLOODTIME
54178+ int "Seconds in between log messages (minimum)"
54179+ default 10
54180+ help
54181+ This option allows you to enforce the number of seconds between
54182+ grsecurity log messages. The default should be suitable for most
54183+ people, however, if you choose to change it, choose a value small enough
54184+ to allow informative logs to be produced, but large enough to
54185+ prevent flooding.
54186+
54187+config GRKERNSEC_FLOODBURST
54188+ int "Number of messages in a burst (maximum)"
54189+ default 4
54190+ help
54191+ This option allows you to choose the maximum number of messages allowed
54192+ within the flood time interval you chose in a separate option. The
54193+ default should be suitable for most people, however if you find that
54194+ many of your logs are being interpreted as flooding, you may want to
54195+ raise this value.
54196+
54197+endmenu
54198+
54199+endmenu
54200diff -urNp linux-2.6.32.42/grsecurity/Makefile linux-2.6.32.42/grsecurity/Makefile
54201--- linux-2.6.32.42/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
54202+++ linux-2.6.32.42/grsecurity/Makefile 2011-05-24 20:27:46.000000000 -0400
54203@@ -0,0 +1,33 @@
54204+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
54205+# during 2001-2009 it has been completely redesigned by Brad Spengler
54206+# into an RBAC system
54207+#
54208+# All code in this directory and various hooks inserted throughout the kernel
54209+# are copyright Brad Spengler - Open Source Security, Inc., and released
54210+# under the GPL v2 or higher
54211+
54212+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
54213+ grsec_mount.o grsec_sig.o grsec_sock.o grsec_sysctl.o \
54214+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
54215+
54216+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
54217+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
54218+ gracl_learn.o grsec_log.o
54219+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
54220+
54221+ifdef CONFIG_NET
54222+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
54223+endif
54224+
54225+ifndef CONFIG_GRKERNSEC
54226+obj-y += grsec_disabled.o
54227+endif
54228+
54229+ifdef CONFIG_GRKERNSEC_HIDESYM
54230+extra-y := grsec_hidesym.o
54231+$(obj)/grsec_hidesym.o:
54232+ @-chmod -f 500 /boot
54233+ @-chmod -f 500 /lib/modules
54234+ @-chmod -f 700 .
54235+ @echo ' grsec: protected kernel image paths'
54236+endif
54237diff -urNp linux-2.6.32.42/include/acpi/acpi_drivers.h linux-2.6.32.42/include/acpi/acpi_drivers.h
54238--- linux-2.6.32.42/include/acpi/acpi_drivers.h 2011-03-27 14:31:47.000000000 -0400
54239+++ linux-2.6.32.42/include/acpi/acpi_drivers.h 2011-04-17 15:56:46.000000000 -0400
54240@@ -119,8 +119,8 @@ int acpi_processor_set_thermal_limit(acp
54241 Dock Station
54242 -------------------------------------------------------------------------- */
54243 struct acpi_dock_ops {
54244- acpi_notify_handler handler;
54245- acpi_notify_handler uevent;
54246+ const acpi_notify_handler handler;
54247+ const acpi_notify_handler uevent;
54248 };
54249
54250 #if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE)
54251@@ -128,7 +128,7 @@ extern int is_dock_device(acpi_handle ha
54252 extern int register_dock_notifier(struct notifier_block *nb);
54253 extern void unregister_dock_notifier(struct notifier_block *nb);
54254 extern int register_hotplug_dock_device(acpi_handle handle,
54255- struct acpi_dock_ops *ops,
54256+ const struct acpi_dock_ops *ops,
54257 void *context);
54258 extern void unregister_hotplug_dock_device(acpi_handle handle);
54259 #else
54260@@ -144,7 +144,7 @@ static inline void unregister_dock_notif
54261 {
54262 }
54263 static inline int register_hotplug_dock_device(acpi_handle handle,
54264- struct acpi_dock_ops *ops,
54265+ const struct acpi_dock_ops *ops,
54266 void *context)
54267 {
54268 return -ENODEV;
54269diff -urNp linux-2.6.32.42/include/asm-generic/atomic-long.h linux-2.6.32.42/include/asm-generic/atomic-long.h
54270--- linux-2.6.32.42/include/asm-generic/atomic-long.h 2011-03-27 14:31:47.000000000 -0400
54271+++ linux-2.6.32.42/include/asm-generic/atomic-long.h 2011-05-16 21:46:57.000000000 -0400
54272@@ -22,6 +22,12 @@
54273
54274 typedef atomic64_t atomic_long_t;
54275
54276+#ifdef CONFIG_PAX_REFCOUNT
54277+typedef atomic64_unchecked_t atomic_long_unchecked_t;
54278+#else
54279+typedef atomic64_t atomic_long_unchecked_t;
54280+#endif
54281+
54282 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
54283
54284 static inline long atomic_long_read(atomic_long_t *l)
54285@@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
54286 return (long)atomic64_read(v);
54287 }
54288
54289+#ifdef CONFIG_PAX_REFCOUNT
54290+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
54291+{
54292+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
54293+
54294+ return (long)atomic64_read_unchecked(v);
54295+}
54296+#endif
54297+
54298 static inline void atomic_long_set(atomic_long_t *l, long i)
54299 {
54300 atomic64_t *v = (atomic64_t *)l;
54301@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
54302 atomic64_set(v, i);
54303 }
54304
54305+#ifdef CONFIG_PAX_REFCOUNT
54306+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
54307+{
54308+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
54309+
54310+ atomic64_set_unchecked(v, i);
54311+}
54312+#endif
54313+
54314 static inline void atomic_long_inc(atomic_long_t *l)
54315 {
54316 atomic64_t *v = (atomic64_t *)l;
54317@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
54318 atomic64_inc(v);
54319 }
54320
54321+#ifdef CONFIG_PAX_REFCOUNT
54322+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
54323+{
54324+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
54325+
54326+ atomic64_inc_unchecked(v);
54327+}
54328+#endif
54329+
54330 static inline void atomic_long_dec(atomic_long_t *l)
54331 {
54332 atomic64_t *v = (atomic64_t *)l;
54333@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
54334 atomic64_dec(v);
54335 }
54336
54337+#ifdef CONFIG_PAX_REFCOUNT
54338+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
54339+{
54340+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
54341+
54342+ atomic64_dec_unchecked(v);
54343+}
54344+#endif
54345+
54346 static inline void atomic_long_add(long i, atomic_long_t *l)
54347 {
54348 atomic64_t *v = (atomic64_t *)l;
54349@@ -59,6 +101,15 @@ static inline void atomic_long_add(long
54350 atomic64_add(i, v);
54351 }
54352
54353+#ifdef CONFIG_PAX_REFCOUNT
54354+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
54355+{
54356+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
54357+
54358+ atomic64_add_unchecked(i, v);
54359+}
54360+#endif
54361+
54362 static inline void atomic_long_sub(long i, atomic_long_t *l)
54363 {
54364 atomic64_t *v = (atomic64_t *)l;
54365@@ -115,6 +166,15 @@ static inline long atomic_long_inc_retur
54366 return (long)atomic64_inc_return(v);
54367 }
54368
54369+#ifdef CONFIG_PAX_REFCOUNT
54370+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
54371+{
54372+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
54373+
54374+ return (long)atomic64_inc_return_unchecked(v);
54375+}
54376+#endif
54377+
54378 static inline long atomic_long_dec_return(atomic_long_t *l)
54379 {
54380 atomic64_t *v = (atomic64_t *)l;
54381@@ -140,6 +200,12 @@ static inline long atomic_long_add_unles
54382
54383 typedef atomic_t atomic_long_t;
54384
54385+#ifdef CONFIG_PAX_REFCOUNT
54386+typedef atomic_unchecked_t atomic_long_unchecked_t;
54387+#else
54388+typedef atomic_t atomic_long_unchecked_t;
54389+#endif
54390+
54391 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
54392 static inline long atomic_long_read(atomic_long_t *l)
54393 {
54394@@ -148,6 +214,15 @@ static inline long atomic_long_read(atom
54395 return (long)atomic_read(v);
54396 }
54397
54398+#ifdef CONFIG_PAX_REFCOUNT
54399+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
54400+{
54401+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
54402+
54403+ return (long)atomic_read_unchecked(v);
54404+}
54405+#endif
54406+
54407 static inline void atomic_long_set(atomic_long_t *l, long i)
54408 {
54409 atomic_t *v = (atomic_t *)l;
54410@@ -155,6 +230,15 @@ static inline void atomic_long_set(atomi
54411 atomic_set(v, i);
54412 }
54413
54414+#ifdef CONFIG_PAX_REFCOUNT
54415+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
54416+{
54417+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
54418+
54419+ atomic_set_unchecked(v, i);
54420+}
54421+#endif
54422+
54423 static inline void atomic_long_inc(atomic_long_t *l)
54424 {
54425 atomic_t *v = (atomic_t *)l;
54426@@ -162,6 +246,15 @@ static inline void atomic_long_inc(atomi
54427 atomic_inc(v);
54428 }
54429
54430+#ifdef CONFIG_PAX_REFCOUNT
54431+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
54432+{
54433+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
54434+
54435+ atomic_inc_unchecked(v);
54436+}
54437+#endif
54438+
54439 static inline void atomic_long_dec(atomic_long_t *l)
54440 {
54441 atomic_t *v = (atomic_t *)l;
54442@@ -169,6 +262,15 @@ static inline void atomic_long_dec(atomi
54443 atomic_dec(v);
54444 }
54445
54446+#ifdef CONFIG_PAX_REFCOUNT
54447+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
54448+{
54449+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
54450+
54451+ atomic_dec_unchecked(v);
54452+}
54453+#endif
54454+
54455 static inline void atomic_long_add(long i, atomic_long_t *l)
54456 {
54457 atomic_t *v = (atomic_t *)l;
54458@@ -176,6 +278,15 @@ static inline void atomic_long_add(long
54459 atomic_add(i, v);
54460 }
54461
54462+#ifdef CONFIG_PAX_REFCOUNT
54463+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
54464+{
54465+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
54466+
54467+ atomic_add_unchecked(i, v);
54468+}
54469+#endif
54470+
54471 static inline void atomic_long_sub(long i, atomic_long_t *l)
54472 {
54473 atomic_t *v = (atomic_t *)l;
54474@@ -232,6 +343,15 @@ static inline long atomic_long_inc_retur
54475 return (long)atomic_inc_return(v);
54476 }
54477
54478+#ifdef CONFIG_PAX_REFCOUNT
54479+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
54480+{
54481+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
54482+
54483+ return (long)atomic_inc_return_unchecked(v);
54484+}
54485+#endif
54486+
54487 static inline long atomic_long_dec_return(atomic_long_t *l)
54488 {
54489 atomic_t *v = (atomic_t *)l;
54490@@ -255,4 +375,47 @@ static inline long atomic_long_add_unles
54491
54492 #endif /* BITS_PER_LONG == 64 */
54493
54494+#ifdef CONFIG_PAX_REFCOUNT
54495+static inline void pax_refcount_needs_these_functions(void)
54496+{
54497+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
54498+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
54499+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
54500+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
54501+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
54502+ atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
54503+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
54504+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
54505+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
54506+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
54507+ atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
54508+
54509+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
54510+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
54511+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
54512+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
54513+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
54514+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
54515+}
54516+#else
54517+#define atomic_read_unchecked(v) atomic_read(v)
54518+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
54519+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
54520+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
54521+#define atomic_inc_unchecked(v) atomic_inc(v)
54522+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
54523+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
54524+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
54525+#define atomic_dec_unchecked(v) atomic_dec(v)
54526+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
54527+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
54528+
54529+#define atomic_long_read_unchecked(v) atomic_long_read(v)
54530+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
54531+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
54532+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
54533+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
54534+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
54535+#endif
54536+
54537 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
54538diff -urNp linux-2.6.32.42/include/asm-generic/cache.h linux-2.6.32.42/include/asm-generic/cache.h
54539--- linux-2.6.32.42/include/asm-generic/cache.h 2011-03-27 14:31:47.000000000 -0400
54540+++ linux-2.6.32.42/include/asm-generic/cache.h 2011-07-06 19:53:33.000000000 -0400
54541@@ -6,7 +6,7 @@
54542 * cache lines need to provide their own cache.h.
54543 */
54544
54545-#define L1_CACHE_SHIFT 5
54546-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
54547+#define L1_CACHE_SHIFT 5UL
54548+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
54549
54550 #endif /* __ASM_GENERIC_CACHE_H */
54551diff -urNp linux-2.6.32.42/include/asm-generic/dma-mapping-common.h linux-2.6.32.42/include/asm-generic/dma-mapping-common.h
54552--- linux-2.6.32.42/include/asm-generic/dma-mapping-common.h 2011-03-27 14:31:47.000000000 -0400
54553+++ linux-2.6.32.42/include/asm-generic/dma-mapping-common.h 2011-04-17 15:56:46.000000000 -0400
54554@@ -11,7 +11,7 @@ static inline dma_addr_t dma_map_single_
54555 enum dma_data_direction dir,
54556 struct dma_attrs *attrs)
54557 {
54558- struct dma_map_ops *ops = get_dma_ops(dev);
54559+ const struct dma_map_ops *ops = get_dma_ops(dev);
54560 dma_addr_t addr;
54561
54562 kmemcheck_mark_initialized(ptr, size);
54563@@ -30,7 +30,7 @@ static inline void dma_unmap_single_attr
54564 enum dma_data_direction dir,
54565 struct dma_attrs *attrs)
54566 {
54567- struct dma_map_ops *ops = get_dma_ops(dev);
54568+ const struct dma_map_ops *ops = get_dma_ops(dev);
54569
54570 BUG_ON(!valid_dma_direction(dir));
54571 if (ops->unmap_page)
54572@@ -42,7 +42,7 @@ static inline int dma_map_sg_attrs(struc
54573 int nents, enum dma_data_direction dir,
54574 struct dma_attrs *attrs)
54575 {
54576- struct dma_map_ops *ops = get_dma_ops(dev);
54577+ const struct dma_map_ops *ops = get_dma_ops(dev);
54578 int i, ents;
54579 struct scatterlist *s;
54580
54581@@ -59,7 +59,7 @@ static inline void dma_unmap_sg_attrs(st
54582 int nents, enum dma_data_direction dir,
54583 struct dma_attrs *attrs)
54584 {
54585- struct dma_map_ops *ops = get_dma_ops(dev);
54586+ const struct dma_map_ops *ops = get_dma_ops(dev);
54587
54588 BUG_ON(!valid_dma_direction(dir));
54589 debug_dma_unmap_sg(dev, sg, nents, dir);
54590@@ -71,7 +71,7 @@ static inline dma_addr_t dma_map_page(st
54591 size_t offset, size_t size,
54592 enum dma_data_direction dir)
54593 {
54594- struct dma_map_ops *ops = get_dma_ops(dev);
54595+ const struct dma_map_ops *ops = get_dma_ops(dev);
54596 dma_addr_t addr;
54597
54598 kmemcheck_mark_initialized(page_address(page) + offset, size);
54599@@ -85,7 +85,7 @@ static inline dma_addr_t dma_map_page(st
54600 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
54601 size_t size, enum dma_data_direction dir)
54602 {
54603- struct dma_map_ops *ops = get_dma_ops(dev);
54604+ const struct dma_map_ops *ops = get_dma_ops(dev);
54605
54606 BUG_ON(!valid_dma_direction(dir));
54607 if (ops->unmap_page)
54608@@ -97,7 +97,7 @@ static inline void dma_sync_single_for_c
54609 size_t size,
54610 enum dma_data_direction dir)
54611 {
54612- struct dma_map_ops *ops = get_dma_ops(dev);
54613+ const struct dma_map_ops *ops = get_dma_ops(dev);
54614
54615 BUG_ON(!valid_dma_direction(dir));
54616 if (ops->sync_single_for_cpu)
54617@@ -109,7 +109,7 @@ static inline void dma_sync_single_for_d
54618 dma_addr_t addr, size_t size,
54619 enum dma_data_direction dir)
54620 {
54621- struct dma_map_ops *ops = get_dma_ops(dev);
54622+ const struct dma_map_ops *ops = get_dma_ops(dev);
54623
54624 BUG_ON(!valid_dma_direction(dir));
54625 if (ops->sync_single_for_device)
54626@@ -123,7 +123,7 @@ static inline void dma_sync_single_range
54627 size_t size,
54628 enum dma_data_direction dir)
54629 {
54630- struct dma_map_ops *ops = get_dma_ops(dev);
54631+ const struct dma_map_ops *ops = get_dma_ops(dev);
54632
54633 BUG_ON(!valid_dma_direction(dir));
54634 if (ops->sync_single_range_for_cpu) {
54635@@ -140,7 +140,7 @@ static inline void dma_sync_single_range
54636 size_t size,
54637 enum dma_data_direction dir)
54638 {
54639- struct dma_map_ops *ops = get_dma_ops(dev);
54640+ const struct dma_map_ops *ops = get_dma_ops(dev);
54641
54642 BUG_ON(!valid_dma_direction(dir));
54643 if (ops->sync_single_range_for_device) {
54644@@ -155,7 +155,7 @@ static inline void
54645 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
54646 int nelems, enum dma_data_direction dir)
54647 {
54648- struct dma_map_ops *ops = get_dma_ops(dev);
54649+ const struct dma_map_ops *ops = get_dma_ops(dev);
54650
54651 BUG_ON(!valid_dma_direction(dir));
54652 if (ops->sync_sg_for_cpu)
54653@@ -167,7 +167,7 @@ static inline void
54654 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
54655 int nelems, enum dma_data_direction dir)
54656 {
54657- struct dma_map_ops *ops = get_dma_ops(dev);
54658+ const struct dma_map_ops *ops = get_dma_ops(dev);
54659
54660 BUG_ON(!valid_dma_direction(dir));
54661 if (ops->sync_sg_for_device)
54662diff -urNp linux-2.6.32.42/include/asm-generic/futex.h linux-2.6.32.42/include/asm-generic/futex.h
54663--- linux-2.6.32.42/include/asm-generic/futex.h 2011-03-27 14:31:47.000000000 -0400
54664+++ linux-2.6.32.42/include/asm-generic/futex.h 2011-04-17 15:56:46.000000000 -0400
54665@@ -6,7 +6,7 @@
54666 #include <asm/errno.h>
54667
54668 static inline int
54669-futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
54670+futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
54671 {
54672 int op = (encoded_op >> 28) & 7;
54673 int cmp = (encoded_op >> 24) & 15;
54674@@ -48,7 +48,7 @@ futex_atomic_op_inuser (int encoded_op,
54675 }
54676
54677 static inline int
54678-futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
54679+futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval)
54680 {
54681 return -ENOSYS;
54682 }
54683diff -urNp linux-2.6.32.42/include/asm-generic/int-l64.h linux-2.6.32.42/include/asm-generic/int-l64.h
54684--- linux-2.6.32.42/include/asm-generic/int-l64.h 2011-03-27 14:31:47.000000000 -0400
54685+++ linux-2.6.32.42/include/asm-generic/int-l64.h 2011-04-17 15:56:46.000000000 -0400
54686@@ -46,6 +46,8 @@ typedef unsigned int u32;
54687 typedef signed long s64;
54688 typedef unsigned long u64;
54689
54690+typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
54691+
54692 #define S8_C(x) x
54693 #define U8_C(x) x ## U
54694 #define S16_C(x) x
54695diff -urNp linux-2.6.32.42/include/asm-generic/int-ll64.h linux-2.6.32.42/include/asm-generic/int-ll64.h
54696--- linux-2.6.32.42/include/asm-generic/int-ll64.h 2011-03-27 14:31:47.000000000 -0400
54697+++ linux-2.6.32.42/include/asm-generic/int-ll64.h 2011-04-17 15:56:46.000000000 -0400
54698@@ -51,6 +51,8 @@ typedef unsigned int u32;
54699 typedef signed long long s64;
54700 typedef unsigned long long u64;
54701
54702+typedef unsigned long long intoverflow_t;
54703+
54704 #define S8_C(x) x
54705 #define U8_C(x) x ## U
54706 #define S16_C(x) x
54707diff -urNp linux-2.6.32.42/include/asm-generic/kmap_types.h linux-2.6.32.42/include/asm-generic/kmap_types.h
54708--- linux-2.6.32.42/include/asm-generic/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
54709+++ linux-2.6.32.42/include/asm-generic/kmap_types.h 2011-04-17 15:56:46.000000000 -0400
54710@@ -28,7 +28,8 @@ KMAP_D(15) KM_UML_USERCOPY,
54711 KMAP_D(16) KM_IRQ_PTE,
54712 KMAP_D(17) KM_NMI,
54713 KMAP_D(18) KM_NMI_PTE,
54714-KMAP_D(19) KM_TYPE_NR
54715+KMAP_D(19) KM_CLEARPAGE,
54716+KMAP_D(20) KM_TYPE_NR
54717 };
54718
54719 #undef KMAP_D
54720diff -urNp linux-2.6.32.42/include/asm-generic/pgtable.h linux-2.6.32.42/include/asm-generic/pgtable.h
54721--- linux-2.6.32.42/include/asm-generic/pgtable.h 2011-03-27 14:31:47.000000000 -0400
54722+++ linux-2.6.32.42/include/asm-generic/pgtable.h 2011-04-17 15:56:46.000000000 -0400
54723@@ -344,6 +344,14 @@ extern void untrack_pfn_vma(struct vm_ar
54724 unsigned long size);
54725 #endif
54726
54727+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
54728+static inline unsigned long pax_open_kernel(void) { return 0; }
54729+#endif
54730+
54731+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
54732+static inline unsigned long pax_close_kernel(void) { return 0; }
54733+#endif
54734+
54735 #endif /* !__ASSEMBLY__ */
54736
54737 #endif /* _ASM_GENERIC_PGTABLE_H */
54738diff -urNp linux-2.6.32.42/include/asm-generic/pgtable-nopmd.h linux-2.6.32.42/include/asm-generic/pgtable-nopmd.h
54739--- linux-2.6.32.42/include/asm-generic/pgtable-nopmd.h 2011-03-27 14:31:47.000000000 -0400
54740+++ linux-2.6.32.42/include/asm-generic/pgtable-nopmd.h 2011-04-17 15:56:46.000000000 -0400
54741@@ -1,14 +1,19 @@
54742 #ifndef _PGTABLE_NOPMD_H
54743 #define _PGTABLE_NOPMD_H
54744
54745-#ifndef __ASSEMBLY__
54746-
54747 #include <asm-generic/pgtable-nopud.h>
54748
54749-struct mm_struct;
54750-
54751 #define __PAGETABLE_PMD_FOLDED
54752
54753+#define PMD_SHIFT PUD_SHIFT
54754+#define PTRS_PER_PMD 1
54755+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
54756+#define PMD_MASK (~(PMD_SIZE-1))
54757+
54758+#ifndef __ASSEMBLY__
54759+
54760+struct mm_struct;
54761+
54762 /*
54763 * Having the pmd type consist of a pud gets the size right, and allows
54764 * us to conceptually access the pud entry that this pmd is folded into
54765@@ -16,11 +21,6 @@ struct mm_struct;
54766 */
54767 typedef struct { pud_t pud; } pmd_t;
54768
54769-#define PMD_SHIFT PUD_SHIFT
54770-#define PTRS_PER_PMD 1
54771-#define PMD_SIZE (1UL << PMD_SHIFT)
54772-#define PMD_MASK (~(PMD_SIZE-1))
54773-
54774 /*
54775 * The "pud_xxx()" functions here are trivial for a folded two-level
54776 * setup: the pmd is never bad, and a pmd always exists (as it's folded
54777diff -urNp linux-2.6.32.42/include/asm-generic/pgtable-nopud.h linux-2.6.32.42/include/asm-generic/pgtable-nopud.h
54778--- linux-2.6.32.42/include/asm-generic/pgtable-nopud.h 2011-03-27 14:31:47.000000000 -0400
54779+++ linux-2.6.32.42/include/asm-generic/pgtable-nopud.h 2011-04-17 15:56:46.000000000 -0400
54780@@ -1,10 +1,15 @@
54781 #ifndef _PGTABLE_NOPUD_H
54782 #define _PGTABLE_NOPUD_H
54783
54784-#ifndef __ASSEMBLY__
54785-
54786 #define __PAGETABLE_PUD_FOLDED
54787
54788+#define PUD_SHIFT PGDIR_SHIFT
54789+#define PTRS_PER_PUD 1
54790+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
54791+#define PUD_MASK (~(PUD_SIZE-1))
54792+
54793+#ifndef __ASSEMBLY__
54794+
54795 /*
54796 * Having the pud type consist of a pgd gets the size right, and allows
54797 * us to conceptually access the pgd entry that this pud is folded into
54798@@ -12,11 +17,6 @@
54799 */
54800 typedef struct { pgd_t pgd; } pud_t;
54801
54802-#define PUD_SHIFT PGDIR_SHIFT
54803-#define PTRS_PER_PUD 1
54804-#define PUD_SIZE (1UL << PUD_SHIFT)
54805-#define PUD_MASK (~(PUD_SIZE-1))
54806-
54807 /*
54808 * The "pgd_xxx()" functions here are trivial for a folded two-level
54809 * setup: the pud is never bad, and a pud always exists (as it's folded
54810diff -urNp linux-2.6.32.42/include/asm-generic/vmlinux.lds.h linux-2.6.32.42/include/asm-generic/vmlinux.lds.h
54811--- linux-2.6.32.42/include/asm-generic/vmlinux.lds.h 2011-03-27 14:31:47.000000000 -0400
54812+++ linux-2.6.32.42/include/asm-generic/vmlinux.lds.h 2011-04-17 15:56:46.000000000 -0400
54813@@ -199,6 +199,7 @@
54814 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
54815 VMLINUX_SYMBOL(__start_rodata) = .; \
54816 *(.rodata) *(.rodata.*) \
54817+ *(.data.read_only) \
54818 *(__vermagic) /* Kernel version magic */ \
54819 *(__markers_strings) /* Markers: strings */ \
54820 *(__tracepoints_strings)/* Tracepoints: strings */ \
54821@@ -656,22 +657,24 @@
54822 * section in the linker script will go there too. @phdr should have
54823 * a leading colon.
54824 *
54825- * Note that this macros defines __per_cpu_load as an absolute symbol.
54826+ * Note that this macros defines per_cpu_load as an absolute symbol.
54827 * If there is no need to put the percpu section at a predetermined
54828 * address, use PERCPU().
54829 */
54830 #define PERCPU_VADDR(vaddr, phdr) \
54831- VMLINUX_SYMBOL(__per_cpu_load) = .; \
54832- .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
54833+ per_cpu_load = .; \
54834+ .data.percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
54835 - LOAD_OFFSET) { \
54836+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
54837 VMLINUX_SYMBOL(__per_cpu_start) = .; \
54838 *(.data.percpu.first) \
54839- *(.data.percpu.page_aligned) \
54840 *(.data.percpu) \
54841+ . = ALIGN(PAGE_SIZE); \
54842+ *(.data.percpu.page_aligned) \
54843 *(.data.percpu.shared_aligned) \
54844 VMLINUX_SYMBOL(__per_cpu_end) = .; \
54845 } phdr \
54846- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
54847+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data.percpu);
54848
54849 /**
54850 * PERCPU - define output section for percpu area, simple version
54851diff -urNp linux-2.6.32.42/include/drm/drmP.h linux-2.6.32.42/include/drm/drmP.h
54852--- linux-2.6.32.42/include/drm/drmP.h 2011-03-27 14:31:47.000000000 -0400
54853+++ linux-2.6.32.42/include/drm/drmP.h 2011-04-17 15:56:46.000000000 -0400
54854@@ -71,6 +71,7 @@
54855 #include <linux/workqueue.h>
54856 #include <linux/poll.h>
54857 #include <asm/pgalloc.h>
54858+#include <asm/local.h>
54859 #include "drm.h"
54860
54861 #include <linux/idr.h>
54862@@ -814,7 +815,7 @@ struct drm_driver {
54863 void (*vgaarb_irq)(struct drm_device *dev, bool state);
54864
54865 /* Driver private ops for this object */
54866- struct vm_operations_struct *gem_vm_ops;
54867+ const struct vm_operations_struct *gem_vm_ops;
54868
54869 int major;
54870 int minor;
54871@@ -917,7 +918,7 @@ struct drm_device {
54872
54873 /** \name Usage Counters */
54874 /*@{ */
54875- int open_count; /**< Outstanding files open */
54876+ local_t open_count; /**< Outstanding files open */
54877 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
54878 atomic_t vma_count; /**< Outstanding vma areas open */
54879 int buf_use; /**< Buffers in use -- cannot alloc */
54880@@ -928,7 +929,7 @@ struct drm_device {
54881 /*@{ */
54882 unsigned long counters;
54883 enum drm_stat_type types[15];
54884- atomic_t counts[15];
54885+ atomic_unchecked_t counts[15];
54886 /*@} */
54887
54888 struct list_head filelist;
54889@@ -1016,7 +1017,7 @@ struct drm_device {
54890 struct pci_controller *hose;
54891 #endif
54892 struct drm_sg_mem *sg; /**< Scatter gather memory */
54893- unsigned int num_crtcs; /**< Number of CRTCs on this device */
54894+ unsigned int num_crtcs; /**< Number of CRTCs on this device */
54895 void *dev_private; /**< device private data */
54896 void *mm_private;
54897 struct address_space *dev_mapping;
54898@@ -1042,11 +1043,11 @@ struct drm_device {
54899 spinlock_t object_name_lock;
54900 struct idr object_name_idr;
54901 atomic_t object_count;
54902- atomic_t object_memory;
54903+ atomic_unchecked_t object_memory;
54904 atomic_t pin_count;
54905- atomic_t pin_memory;
54906+ atomic_unchecked_t pin_memory;
54907 atomic_t gtt_count;
54908- atomic_t gtt_memory;
54909+ atomic_unchecked_t gtt_memory;
54910 uint32_t gtt_total;
54911 uint32_t invalidate_domains; /* domains pending invalidation */
54912 uint32_t flush_domains; /* domains pending flush */
54913diff -urNp linux-2.6.32.42/include/linux/a.out.h linux-2.6.32.42/include/linux/a.out.h
54914--- linux-2.6.32.42/include/linux/a.out.h 2011-03-27 14:31:47.000000000 -0400
54915+++ linux-2.6.32.42/include/linux/a.out.h 2011-04-17 15:56:46.000000000 -0400
54916@@ -39,6 +39,14 @@ enum machine_type {
54917 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
54918 };
54919
54920+/* Constants for the N_FLAGS field */
54921+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
54922+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
54923+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
54924+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
54925+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
54926+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
54927+
54928 #if !defined (N_MAGIC)
54929 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
54930 #endif
54931diff -urNp linux-2.6.32.42/include/linux/atmdev.h linux-2.6.32.42/include/linux/atmdev.h
54932--- linux-2.6.32.42/include/linux/atmdev.h 2011-03-27 14:31:47.000000000 -0400
54933+++ linux-2.6.32.42/include/linux/atmdev.h 2011-04-17 15:56:46.000000000 -0400
54934@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
54935 #endif
54936
54937 struct k_atm_aal_stats {
54938-#define __HANDLE_ITEM(i) atomic_t i
54939+#define __HANDLE_ITEM(i) atomic_unchecked_t i
54940 __AAL_STAT_ITEMS
54941 #undef __HANDLE_ITEM
54942 };
54943diff -urNp linux-2.6.32.42/include/linux/backlight.h linux-2.6.32.42/include/linux/backlight.h
54944--- linux-2.6.32.42/include/linux/backlight.h 2011-03-27 14:31:47.000000000 -0400
54945+++ linux-2.6.32.42/include/linux/backlight.h 2011-04-17 15:56:46.000000000 -0400
54946@@ -36,18 +36,18 @@ struct backlight_device;
54947 struct fb_info;
54948
54949 struct backlight_ops {
54950- unsigned int options;
54951+ const unsigned int options;
54952
54953 #define BL_CORE_SUSPENDRESUME (1 << 0)
54954
54955 /* Notify the backlight driver some property has changed */
54956- int (*update_status)(struct backlight_device *);
54957+ int (* const update_status)(struct backlight_device *);
54958 /* Return the current backlight brightness (accounting for power,
54959 fb_blank etc.) */
54960- int (*get_brightness)(struct backlight_device *);
54961+ int (* const get_brightness)(struct backlight_device *);
54962 /* Check if given framebuffer device is the one bound to this backlight;
54963 return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
54964- int (*check_fb)(struct fb_info *);
54965+ int (* const check_fb)(struct fb_info *);
54966 };
54967
54968 /* This structure defines all the properties of a backlight */
54969@@ -86,7 +86,7 @@ struct backlight_device {
54970 registered this device has been unloaded, and if class_get_devdata()
54971 points to something in the body of that driver, it is also invalid. */
54972 struct mutex ops_lock;
54973- struct backlight_ops *ops;
54974+ const struct backlight_ops *ops;
54975
54976 /* The framebuffer notifier block */
54977 struct notifier_block fb_notif;
54978@@ -103,7 +103,7 @@ static inline void backlight_update_stat
54979 }
54980
54981 extern struct backlight_device *backlight_device_register(const char *name,
54982- struct device *dev, void *devdata, struct backlight_ops *ops);
54983+ struct device *dev, void *devdata, const struct backlight_ops *ops);
54984 extern void backlight_device_unregister(struct backlight_device *bd);
54985 extern void backlight_force_update(struct backlight_device *bd,
54986 enum backlight_update_reason reason);
54987diff -urNp linux-2.6.32.42/include/linux/binfmts.h linux-2.6.32.42/include/linux/binfmts.h
54988--- linux-2.6.32.42/include/linux/binfmts.h 2011-04-17 17:00:52.000000000 -0400
54989+++ linux-2.6.32.42/include/linux/binfmts.h 2011-04-17 15:56:46.000000000 -0400
54990@@ -83,6 +83,7 @@ struct linux_binfmt {
54991 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
54992 int (*load_shlib)(struct file *);
54993 int (*core_dump)(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
54994+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
54995 unsigned long min_coredump; /* minimal dump size */
54996 int hasvdso;
54997 };
54998diff -urNp linux-2.6.32.42/include/linux/blkdev.h linux-2.6.32.42/include/linux/blkdev.h
54999--- linux-2.6.32.42/include/linux/blkdev.h 2011-03-27 14:31:47.000000000 -0400
55000+++ linux-2.6.32.42/include/linux/blkdev.h 2011-04-17 15:56:46.000000000 -0400
55001@@ -1265,19 +1265,19 @@ static inline int blk_integrity_rq(struc
55002 #endif /* CONFIG_BLK_DEV_INTEGRITY */
55003
55004 struct block_device_operations {
55005- int (*open) (struct block_device *, fmode_t);
55006- int (*release) (struct gendisk *, fmode_t);
55007- int (*locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
55008- int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
55009- int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
55010- int (*direct_access) (struct block_device *, sector_t,
55011+ int (* const open) (struct block_device *, fmode_t);
55012+ int (* const release) (struct gendisk *, fmode_t);
55013+ int (* const locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
55014+ int (* const ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
55015+ int (* const compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
55016+ int (* const direct_access) (struct block_device *, sector_t,
55017 void **, unsigned long *);
55018- int (*media_changed) (struct gendisk *);
55019- unsigned long long (*set_capacity) (struct gendisk *,
55020+ int (* const media_changed) (struct gendisk *);
55021+ unsigned long long (* const set_capacity) (struct gendisk *,
55022 unsigned long long);
55023- int (*revalidate_disk) (struct gendisk *);
55024- int (*getgeo)(struct block_device *, struct hd_geometry *);
55025- struct module *owner;
55026+ int (* const revalidate_disk) (struct gendisk *);
55027+ int (*const getgeo)(struct block_device *, struct hd_geometry *);
55028+ struct module * const owner;
55029 };
55030
55031 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
55032diff -urNp linux-2.6.32.42/include/linux/blktrace_api.h linux-2.6.32.42/include/linux/blktrace_api.h
55033--- linux-2.6.32.42/include/linux/blktrace_api.h 2011-03-27 14:31:47.000000000 -0400
55034+++ linux-2.6.32.42/include/linux/blktrace_api.h 2011-05-04 17:56:28.000000000 -0400
55035@@ -160,7 +160,7 @@ struct blk_trace {
55036 struct dentry *dir;
55037 struct dentry *dropped_file;
55038 struct dentry *msg_file;
55039- atomic_t dropped;
55040+ atomic_unchecked_t dropped;
55041 };
55042
55043 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
55044diff -urNp linux-2.6.32.42/include/linux/byteorder/little_endian.h linux-2.6.32.42/include/linux/byteorder/little_endian.h
55045--- linux-2.6.32.42/include/linux/byteorder/little_endian.h 2011-03-27 14:31:47.000000000 -0400
55046+++ linux-2.6.32.42/include/linux/byteorder/little_endian.h 2011-04-17 15:56:46.000000000 -0400
55047@@ -42,51 +42,51 @@
55048
55049 static inline __le64 __cpu_to_le64p(const __u64 *p)
55050 {
55051- return (__force __le64)*p;
55052+ return (__force const __le64)*p;
55053 }
55054 static inline __u64 __le64_to_cpup(const __le64 *p)
55055 {
55056- return (__force __u64)*p;
55057+ return (__force const __u64)*p;
55058 }
55059 static inline __le32 __cpu_to_le32p(const __u32 *p)
55060 {
55061- return (__force __le32)*p;
55062+ return (__force const __le32)*p;
55063 }
55064 static inline __u32 __le32_to_cpup(const __le32 *p)
55065 {
55066- return (__force __u32)*p;
55067+ return (__force const __u32)*p;
55068 }
55069 static inline __le16 __cpu_to_le16p(const __u16 *p)
55070 {
55071- return (__force __le16)*p;
55072+ return (__force const __le16)*p;
55073 }
55074 static inline __u16 __le16_to_cpup(const __le16 *p)
55075 {
55076- return (__force __u16)*p;
55077+ return (__force const __u16)*p;
55078 }
55079 static inline __be64 __cpu_to_be64p(const __u64 *p)
55080 {
55081- return (__force __be64)__swab64p(p);
55082+ return (__force const __be64)__swab64p(p);
55083 }
55084 static inline __u64 __be64_to_cpup(const __be64 *p)
55085 {
55086- return __swab64p((__u64 *)p);
55087+ return __swab64p((const __u64 *)p);
55088 }
55089 static inline __be32 __cpu_to_be32p(const __u32 *p)
55090 {
55091- return (__force __be32)__swab32p(p);
55092+ return (__force const __be32)__swab32p(p);
55093 }
55094 static inline __u32 __be32_to_cpup(const __be32 *p)
55095 {
55096- return __swab32p((__u32 *)p);
55097+ return __swab32p((const __u32 *)p);
55098 }
55099 static inline __be16 __cpu_to_be16p(const __u16 *p)
55100 {
55101- return (__force __be16)__swab16p(p);
55102+ return (__force const __be16)__swab16p(p);
55103 }
55104 static inline __u16 __be16_to_cpup(const __be16 *p)
55105 {
55106- return __swab16p((__u16 *)p);
55107+ return __swab16p((const __u16 *)p);
55108 }
55109 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
55110 #define __le64_to_cpus(x) do { (void)(x); } while (0)
55111diff -urNp linux-2.6.32.42/include/linux/cache.h linux-2.6.32.42/include/linux/cache.h
55112--- linux-2.6.32.42/include/linux/cache.h 2011-03-27 14:31:47.000000000 -0400
55113+++ linux-2.6.32.42/include/linux/cache.h 2011-04-17 15:56:46.000000000 -0400
55114@@ -16,6 +16,10 @@
55115 #define __read_mostly
55116 #endif
55117
55118+#ifndef __read_only
55119+#define __read_only __read_mostly
55120+#endif
55121+
55122 #ifndef ____cacheline_aligned
55123 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
55124 #endif
55125diff -urNp linux-2.6.32.42/include/linux/capability.h linux-2.6.32.42/include/linux/capability.h
55126--- linux-2.6.32.42/include/linux/capability.h 2011-03-27 14:31:47.000000000 -0400
55127+++ linux-2.6.32.42/include/linux/capability.h 2011-04-17 15:56:46.000000000 -0400
55128@@ -563,6 +563,7 @@ extern const kernel_cap_t __cap_init_eff
55129 (security_real_capable_noaudit((t), (cap)) == 0)
55130
55131 extern int capable(int cap);
55132+int capable_nolog(int cap);
55133
55134 /* audit system wants to get cap info from files as well */
55135 struct dentry;
55136diff -urNp linux-2.6.32.42/include/linux/compiler-gcc4.h linux-2.6.32.42/include/linux/compiler-gcc4.h
55137--- linux-2.6.32.42/include/linux/compiler-gcc4.h 2011-03-27 14:31:47.000000000 -0400
55138+++ linux-2.6.32.42/include/linux/compiler-gcc4.h 2011-04-17 15:56:46.000000000 -0400
55139@@ -36,4 +36,8 @@
55140 the kernel context */
55141 #define __cold __attribute__((__cold__))
55142
55143+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
55144+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
55145+#define __bos0(ptr) __bos((ptr), 0)
55146+#define __bos1(ptr) __bos((ptr), 1)
55147 #endif
55148diff -urNp linux-2.6.32.42/include/linux/compiler.h linux-2.6.32.42/include/linux/compiler.h
55149--- linux-2.6.32.42/include/linux/compiler.h 2011-03-27 14:31:47.000000000 -0400
55150+++ linux-2.6.32.42/include/linux/compiler.h 2011-04-17 15:56:46.000000000 -0400
55151@@ -256,6 +256,22 @@ void ftrace_likely_update(struct ftrace_
55152 #define __cold
55153 #endif
55154
55155+#ifndef __alloc_size
55156+#define __alloc_size
55157+#endif
55158+
55159+#ifndef __bos
55160+#define __bos
55161+#endif
55162+
55163+#ifndef __bos0
55164+#define __bos0
55165+#endif
55166+
55167+#ifndef __bos1
55168+#define __bos1
55169+#endif
55170+
55171 /* Simple shorthand for a section definition */
55172 #ifndef __section
55173 # define __section(S) __attribute__ ((__section__(#S)))
55174@@ -278,6 +294,7 @@ void ftrace_likely_update(struct ftrace_
55175 * use is to mediate communication between process-level code and irq/NMI
55176 * handlers, all running on the same CPU.
55177 */
55178-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
55179+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
55180+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
55181
55182 #endif /* __LINUX_COMPILER_H */
55183diff -urNp linux-2.6.32.42/include/linux/dcache.h linux-2.6.32.42/include/linux/dcache.h
55184--- linux-2.6.32.42/include/linux/dcache.h 2011-03-27 14:31:47.000000000 -0400
55185+++ linux-2.6.32.42/include/linux/dcache.h 2011-04-23 13:34:46.000000000 -0400
55186@@ -119,6 +119,8 @@ struct dentry {
55187 unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
55188 };
55189
55190+#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
55191+
55192 /*
55193 * dentry->d_lock spinlock nesting subclasses:
55194 *
55195diff -urNp linux-2.6.32.42/include/linux/decompress/mm.h linux-2.6.32.42/include/linux/decompress/mm.h
55196--- linux-2.6.32.42/include/linux/decompress/mm.h 2011-03-27 14:31:47.000000000 -0400
55197+++ linux-2.6.32.42/include/linux/decompress/mm.h 2011-04-17 15:56:46.000000000 -0400
55198@@ -78,7 +78,7 @@ static void free(void *where)
55199 * warnings when not needed (indeed large_malloc / large_free are not
55200 * needed by inflate */
55201
55202-#define malloc(a) kmalloc(a, GFP_KERNEL)
55203+#define malloc(a) kmalloc((a), GFP_KERNEL)
55204 #define free(a) kfree(a)
55205
55206 #define large_malloc(a) vmalloc(a)
55207diff -urNp linux-2.6.32.42/include/linux/dma-mapping.h linux-2.6.32.42/include/linux/dma-mapping.h
55208--- linux-2.6.32.42/include/linux/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
55209+++ linux-2.6.32.42/include/linux/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
55210@@ -16,50 +16,50 @@ enum dma_data_direction {
55211 };
55212
55213 struct dma_map_ops {
55214- void* (*alloc_coherent)(struct device *dev, size_t size,
55215+ void* (* const alloc_coherent)(struct device *dev, size_t size,
55216 dma_addr_t *dma_handle, gfp_t gfp);
55217- void (*free_coherent)(struct device *dev, size_t size,
55218+ void (* const free_coherent)(struct device *dev, size_t size,
55219 void *vaddr, dma_addr_t dma_handle);
55220- dma_addr_t (*map_page)(struct device *dev, struct page *page,
55221+ dma_addr_t (* const map_page)(struct device *dev, struct page *page,
55222 unsigned long offset, size_t size,
55223 enum dma_data_direction dir,
55224 struct dma_attrs *attrs);
55225- void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
55226+ void (* const unmap_page)(struct device *dev, dma_addr_t dma_handle,
55227 size_t size, enum dma_data_direction dir,
55228 struct dma_attrs *attrs);
55229- int (*map_sg)(struct device *dev, struct scatterlist *sg,
55230+ int (* const map_sg)(struct device *dev, struct scatterlist *sg,
55231 int nents, enum dma_data_direction dir,
55232 struct dma_attrs *attrs);
55233- void (*unmap_sg)(struct device *dev,
55234+ void (* const unmap_sg)(struct device *dev,
55235 struct scatterlist *sg, int nents,
55236 enum dma_data_direction dir,
55237 struct dma_attrs *attrs);
55238- void (*sync_single_for_cpu)(struct device *dev,
55239+ void (* const sync_single_for_cpu)(struct device *dev,
55240 dma_addr_t dma_handle, size_t size,
55241 enum dma_data_direction dir);
55242- void (*sync_single_for_device)(struct device *dev,
55243+ void (* const sync_single_for_device)(struct device *dev,
55244 dma_addr_t dma_handle, size_t size,
55245 enum dma_data_direction dir);
55246- void (*sync_single_range_for_cpu)(struct device *dev,
55247+ void (* const sync_single_range_for_cpu)(struct device *dev,
55248 dma_addr_t dma_handle,
55249 unsigned long offset,
55250 size_t size,
55251 enum dma_data_direction dir);
55252- void (*sync_single_range_for_device)(struct device *dev,
55253+ void (* const sync_single_range_for_device)(struct device *dev,
55254 dma_addr_t dma_handle,
55255 unsigned long offset,
55256 size_t size,
55257 enum dma_data_direction dir);
55258- void (*sync_sg_for_cpu)(struct device *dev,
55259+ void (* const sync_sg_for_cpu)(struct device *dev,
55260 struct scatterlist *sg, int nents,
55261 enum dma_data_direction dir);
55262- void (*sync_sg_for_device)(struct device *dev,
55263+ void (* const sync_sg_for_device)(struct device *dev,
55264 struct scatterlist *sg, int nents,
55265 enum dma_data_direction dir);
55266- int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
55267- int (*dma_supported)(struct device *dev, u64 mask);
55268+ int (* const mapping_error)(struct device *dev, dma_addr_t dma_addr);
55269+ int (* const dma_supported)(struct device *dev, u64 mask);
55270 int (*set_dma_mask)(struct device *dev, u64 mask);
55271- int is_phys;
55272+ const int is_phys;
55273 };
55274
55275 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
55276diff -urNp linux-2.6.32.42/include/linux/dst.h linux-2.6.32.42/include/linux/dst.h
55277--- linux-2.6.32.42/include/linux/dst.h 2011-03-27 14:31:47.000000000 -0400
55278+++ linux-2.6.32.42/include/linux/dst.h 2011-04-17 15:56:46.000000000 -0400
55279@@ -380,7 +380,7 @@ struct dst_node
55280 struct thread_pool *pool;
55281
55282 /* Transaction IDs live here */
55283- atomic_long_t gen;
55284+ atomic_long_unchecked_t gen;
55285
55286 /*
55287 * How frequently and how many times transaction
55288diff -urNp linux-2.6.32.42/include/linux/elf.h linux-2.6.32.42/include/linux/elf.h
55289--- linux-2.6.32.42/include/linux/elf.h 2011-03-27 14:31:47.000000000 -0400
55290+++ linux-2.6.32.42/include/linux/elf.h 2011-04-17 15:56:46.000000000 -0400
55291@@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
55292 #define PT_GNU_EH_FRAME 0x6474e550
55293
55294 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
55295+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
55296+
55297+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
55298+
55299+/* Constants for the e_flags field */
55300+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
55301+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
55302+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
55303+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
55304+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
55305+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
55306
55307 /* These constants define the different elf file types */
55308 #define ET_NONE 0
55309@@ -84,6 +95,8 @@ typedef __s64 Elf64_Sxword;
55310 #define DT_DEBUG 21
55311 #define DT_TEXTREL 22
55312 #define DT_JMPREL 23
55313+#define DT_FLAGS 30
55314+ #define DF_TEXTREL 0x00000004
55315 #define DT_ENCODING 32
55316 #define OLD_DT_LOOS 0x60000000
55317 #define DT_LOOS 0x6000000d
55318@@ -230,6 +243,19 @@ typedef struct elf64_hdr {
55319 #define PF_W 0x2
55320 #define PF_X 0x1
55321
55322+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
55323+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
55324+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
55325+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
55326+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
55327+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
55328+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
55329+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
55330+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
55331+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
55332+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
55333+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
55334+
55335 typedef struct elf32_phdr{
55336 Elf32_Word p_type;
55337 Elf32_Off p_offset;
55338@@ -322,6 +348,8 @@ typedef struct elf64_shdr {
55339 #define EI_OSABI 7
55340 #define EI_PAD 8
55341
55342+#define EI_PAX 14
55343+
55344 #define ELFMAG0 0x7f /* EI_MAG */
55345 #define ELFMAG1 'E'
55346 #define ELFMAG2 'L'
55347@@ -386,6 +414,7 @@ extern Elf32_Dyn _DYNAMIC [];
55348 #define elf_phdr elf32_phdr
55349 #define elf_note elf32_note
55350 #define elf_addr_t Elf32_Off
55351+#define elf_dyn Elf32_Dyn
55352
55353 #else
55354
55355@@ -394,6 +423,7 @@ extern Elf64_Dyn _DYNAMIC [];
55356 #define elf_phdr elf64_phdr
55357 #define elf_note elf64_note
55358 #define elf_addr_t Elf64_Off
55359+#define elf_dyn Elf64_Dyn
55360
55361 #endif
55362
55363diff -urNp linux-2.6.32.42/include/linux/fscache-cache.h linux-2.6.32.42/include/linux/fscache-cache.h
55364--- linux-2.6.32.42/include/linux/fscache-cache.h 2011-03-27 14:31:47.000000000 -0400
55365+++ linux-2.6.32.42/include/linux/fscache-cache.h 2011-05-04 17:56:28.000000000 -0400
55366@@ -116,7 +116,7 @@ struct fscache_operation {
55367 #endif
55368 };
55369
55370-extern atomic_t fscache_op_debug_id;
55371+extern atomic_unchecked_t fscache_op_debug_id;
55372 extern const struct slow_work_ops fscache_op_slow_work_ops;
55373
55374 extern void fscache_enqueue_operation(struct fscache_operation *);
55375@@ -134,7 +134,7 @@ static inline void fscache_operation_ini
55376 fscache_operation_release_t release)
55377 {
55378 atomic_set(&op->usage, 1);
55379- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
55380+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
55381 op->release = release;
55382 INIT_LIST_HEAD(&op->pend_link);
55383 fscache_set_op_state(op, "Init");
55384diff -urNp linux-2.6.32.42/include/linux/fs.h linux-2.6.32.42/include/linux/fs.h
55385--- linux-2.6.32.42/include/linux/fs.h 2011-03-27 14:31:47.000000000 -0400
55386+++ linux-2.6.32.42/include/linux/fs.h 2011-04-17 15:56:46.000000000 -0400
55387@@ -90,6 +90,11 @@ struct inodes_stat_t {
55388 /* Expect random access pattern */
55389 #define FMODE_RANDOM ((__force fmode_t)4096)
55390
55391+/* Hack for grsec so as not to require read permission simply to execute
55392+ * a binary
55393+ */
55394+#define FMODE_GREXEC ((__force fmode_t)0x2000000)
55395+
55396 /*
55397 * The below are the various read and write types that we support. Some of
55398 * them include behavioral modifiers that send information down to the
55399@@ -568,41 +573,41 @@ typedef int (*read_actor_t)(read_descrip
55400 unsigned long, unsigned long);
55401
55402 struct address_space_operations {
55403- int (*writepage)(struct page *page, struct writeback_control *wbc);
55404- int (*readpage)(struct file *, struct page *);
55405- void (*sync_page)(struct page *);
55406+ int (* const writepage)(struct page *page, struct writeback_control *wbc);
55407+ int (* const readpage)(struct file *, struct page *);
55408+ void (* const sync_page)(struct page *);
55409
55410 /* Write back some dirty pages from this mapping. */
55411- int (*writepages)(struct address_space *, struct writeback_control *);
55412+ int (* const writepages)(struct address_space *, struct writeback_control *);
55413
55414 /* Set a page dirty. Return true if this dirtied it */
55415- int (*set_page_dirty)(struct page *page);
55416+ int (* const set_page_dirty)(struct page *page);
55417
55418- int (*readpages)(struct file *filp, struct address_space *mapping,
55419+ int (* const readpages)(struct file *filp, struct address_space *mapping,
55420 struct list_head *pages, unsigned nr_pages);
55421
55422- int (*write_begin)(struct file *, struct address_space *mapping,
55423+ int (* const write_begin)(struct file *, struct address_space *mapping,
55424 loff_t pos, unsigned len, unsigned flags,
55425 struct page **pagep, void **fsdata);
55426- int (*write_end)(struct file *, struct address_space *mapping,
55427+ int (* const write_end)(struct file *, struct address_space *mapping,
55428 loff_t pos, unsigned len, unsigned copied,
55429 struct page *page, void *fsdata);
55430
55431 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
55432- sector_t (*bmap)(struct address_space *, sector_t);
55433- void (*invalidatepage) (struct page *, unsigned long);
55434- int (*releasepage) (struct page *, gfp_t);
55435- ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
55436+ sector_t (* const bmap)(struct address_space *, sector_t);
55437+ void (* const invalidatepage) (struct page *, unsigned long);
55438+ int (* const releasepage) (struct page *, gfp_t);
55439+ ssize_t (* const direct_IO)(int, struct kiocb *, const struct iovec *iov,
55440 loff_t offset, unsigned long nr_segs);
55441- int (*get_xip_mem)(struct address_space *, pgoff_t, int,
55442+ int (* const get_xip_mem)(struct address_space *, pgoff_t, int,
55443 void **, unsigned long *);
55444 /* migrate the contents of a page to the specified target */
55445- int (*migratepage) (struct address_space *,
55446+ int (* const migratepage) (struct address_space *,
55447 struct page *, struct page *);
55448- int (*launder_page) (struct page *);
55449- int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
55450+ int (* const launder_page) (struct page *);
55451+ int (* const is_partially_uptodate) (struct page *, read_descriptor_t *,
55452 unsigned long);
55453- int (*error_remove_page)(struct address_space *, struct page *);
55454+ int (* const error_remove_page)(struct address_space *, struct page *);
55455 };
55456
55457 /*
55458@@ -1030,19 +1035,19 @@ static inline int file_check_writeable(s
55459 typedef struct files_struct *fl_owner_t;
55460
55461 struct file_lock_operations {
55462- void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
55463- void (*fl_release_private)(struct file_lock *);
55464+ void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
55465+ void (* const fl_release_private)(struct file_lock *);
55466 };
55467
55468 struct lock_manager_operations {
55469- int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
55470- void (*fl_notify)(struct file_lock *); /* unblock callback */
55471- int (*fl_grant)(struct file_lock *, struct file_lock *, int);
55472- void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
55473- void (*fl_release_private)(struct file_lock *);
55474- void (*fl_break)(struct file_lock *);
55475- int (*fl_mylease)(struct file_lock *, struct file_lock *);
55476- int (*fl_change)(struct file_lock **, int);
55477+ int (* const fl_compare_owner)(struct file_lock *, struct file_lock *);
55478+ void (* const fl_notify)(struct file_lock *); /* unblock callback */
55479+ int (* const fl_grant)(struct file_lock *, struct file_lock *, int);
55480+ void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
55481+ void (* const fl_release_private)(struct file_lock *);
55482+ void (* const fl_break)(struct file_lock *);
55483+ int (* const fl_mylease)(struct file_lock *, struct file_lock *);
55484+ int (* const fl_change)(struct file_lock **, int);
55485 };
55486
55487 struct lock_manager {
55488@@ -1441,7 +1446,7 @@ struct fiemap_extent_info {
55489 unsigned int fi_flags; /* Flags as passed from user */
55490 unsigned int fi_extents_mapped; /* Number of mapped extents */
55491 unsigned int fi_extents_max; /* Size of fiemap_extent array */
55492- struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent
55493+ struct fiemap_extent __user *fi_extents_start; /* Start of fiemap_extent
55494 * array */
55495 };
55496 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
55497@@ -1558,30 +1563,30 @@ extern ssize_t vfs_writev(struct file *,
55498 unsigned long, loff_t *);
55499
55500 struct super_operations {
55501- struct inode *(*alloc_inode)(struct super_block *sb);
55502- void (*destroy_inode)(struct inode *);
55503+ struct inode *(* const alloc_inode)(struct super_block *sb);
55504+ void (* const destroy_inode)(struct inode *);
55505
55506- void (*dirty_inode) (struct inode *);
55507- int (*write_inode) (struct inode *, int);
55508- void (*drop_inode) (struct inode *);
55509- void (*delete_inode) (struct inode *);
55510- void (*put_super) (struct super_block *);
55511- void (*write_super) (struct super_block *);
55512- int (*sync_fs)(struct super_block *sb, int wait);
55513- int (*freeze_fs) (struct super_block *);
55514- int (*unfreeze_fs) (struct super_block *);
55515- int (*statfs) (struct dentry *, struct kstatfs *);
55516- int (*remount_fs) (struct super_block *, int *, char *);
55517- void (*clear_inode) (struct inode *);
55518- void (*umount_begin) (struct super_block *);
55519+ void (* const dirty_inode) (struct inode *);
55520+ int (* const write_inode) (struct inode *, int);
55521+ void (* const drop_inode) (struct inode *);
55522+ void (* const delete_inode) (struct inode *);
55523+ void (* const put_super) (struct super_block *);
55524+ void (* const write_super) (struct super_block *);
55525+ int (* const sync_fs)(struct super_block *sb, int wait);
55526+ int (* const freeze_fs) (struct super_block *);
55527+ int (* const unfreeze_fs) (struct super_block *);
55528+ int (* const statfs) (struct dentry *, struct kstatfs *);
55529+ int (* const remount_fs) (struct super_block *, int *, char *);
55530+ void (* const clear_inode) (struct inode *);
55531+ void (* const umount_begin) (struct super_block *);
55532
55533- int (*show_options)(struct seq_file *, struct vfsmount *);
55534- int (*show_stats)(struct seq_file *, struct vfsmount *);
55535+ int (* const show_options)(struct seq_file *, struct vfsmount *);
55536+ int (* const show_stats)(struct seq_file *, struct vfsmount *);
55537 #ifdef CONFIG_QUOTA
55538- ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
55539- ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
55540+ ssize_t (* const quota_read)(struct super_block *, int, char *, size_t, loff_t);
55541+ ssize_t (* const quota_write)(struct super_block *, int, const char *, size_t, loff_t);
55542 #endif
55543- int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
55544+ int (* const bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
55545 };
55546
55547 /*
55548diff -urNp linux-2.6.32.42/include/linux/fs_struct.h linux-2.6.32.42/include/linux/fs_struct.h
55549--- linux-2.6.32.42/include/linux/fs_struct.h 2011-03-27 14:31:47.000000000 -0400
55550+++ linux-2.6.32.42/include/linux/fs_struct.h 2011-04-17 15:56:46.000000000 -0400
55551@@ -4,7 +4,7 @@
55552 #include <linux/path.h>
55553
55554 struct fs_struct {
55555- int users;
55556+ atomic_t users;
55557 rwlock_t lock;
55558 int umask;
55559 int in_exec;
55560diff -urNp linux-2.6.32.42/include/linux/ftrace_event.h linux-2.6.32.42/include/linux/ftrace_event.h
55561--- linux-2.6.32.42/include/linux/ftrace_event.h 2011-03-27 14:31:47.000000000 -0400
55562+++ linux-2.6.32.42/include/linux/ftrace_event.h 2011-05-04 17:56:28.000000000 -0400
55563@@ -163,7 +163,7 @@ extern int trace_define_field(struct ftr
55564 int filter_type);
55565 extern int trace_define_common_fields(struct ftrace_event_call *call);
55566
55567-#define is_signed_type(type) (((type)(-1)) < 0)
55568+#define is_signed_type(type) (((type)(-1)) < (type)1)
55569
55570 int trace_set_clr_event(const char *system, const char *event, int set);
55571
55572diff -urNp linux-2.6.32.42/include/linux/genhd.h linux-2.6.32.42/include/linux/genhd.h
55573--- linux-2.6.32.42/include/linux/genhd.h 2011-03-27 14:31:47.000000000 -0400
55574+++ linux-2.6.32.42/include/linux/genhd.h 2011-04-17 15:56:46.000000000 -0400
55575@@ -161,7 +161,7 @@ struct gendisk {
55576
55577 struct timer_rand_state *random;
55578
55579- atomic_t sync_io; /* RAID */
55580+ atomic_unchecked_t sync_io; /* RAID */
55581 struct work_struct async_notify;
55582 #ifdef CONFIG_BLK_DEV_INTEGRITY
55583 struct blk_integrity *integrity;
55584diff -urNp linux-2.6.32.42/include/linux/gracl.h linux-2.6.32.42/include/linux/gracl.h
55585--- linux-2.6.32.42/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
55586+++ linux-2.6.32.42/include/linux/gracl.h 2011-04-17 15:56:46.000000000 -0400
55587@@ -0,0 +1,317 @@
55588+#ifndef GR_ACL_H
55589+#define GR_ACL_H
55590+
55591+#include <linux/grdefs.h>
55592+#include <linux/resource.h>
55593+#include <linux/capability.h>
55594+#include <linux/dcache.h>
55595+#include <asm/resource.h>
55596+
55597+/* Major status information */
55598+
55599+#define GR_VERSION "grsecurity 2.2.2"
55600+#define GRSECURITY_VERSION 0x2202
55601+
55602+enum {
55603+ GR_SHUTDOWN = 0,
55604+ GR_ENABLE = 1,
55605+ GR_SPROLE = 2,
55606+ GR_RELOAD = 3,
55607+ GR_SEGVMOD = 4,
55608+ GR_STATUS = 5,
55609+ GR_UNSPROLE = 6,
55610+ GR_PASSSET = 7,
55611+ GR_SPROLEPAM = 8,
55612+};
55613+
55614+/* Password setup definitions
55615+ * kernel/grhash.c */
55616+enum {
55617+ GR_PW_LEN = 128,
55618+ GR_SALT_LEN = 16,
55619+ GR_SHA_LEN = 32,
55620+};
55621+
55622+enum {
55623+ GR_SPROLE_LEN = 64,
55624+};
55625+
55626+enum {
55627+ GR_NO_GLOB = 0,
55628+ GR_REG_GLOB,
55629+ GR_CREATE_GLOB
55630+};
55631+
55632+#define GR_NLIMITS 32
55633+
55634+/* Begin Data Structures */
55635+
55636+struct sprole_pw {
55637+ unsigned char *rolename;
55638+ unsigned char salt[GR_SALT_LEN];
55639+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
55640+};
55641+
55642+struct name_entry {
55643+ __u32 key;
55644+ ino_t inode;
55645+ dev_t device;
55646+ char *name;
55647+ __u16 len;
55648+ __u8 deleted;
55649+ struct name_entry *prev;
55650+ struct name_entry *next;
55651+};
55652+
55653+struct inodev_entry {
55654+ struct name_entry *nentry;
55655+ struct inodev_entry *prev;
55656+ struct inodev_entry *next;
55657+};
55658+
55659+struct acl_role_db {
55660+ struct acl_role_label **r_hash;
55661+ __u32 r_size;
55662+};
55663+
55664+struct inodev_db {
55665+ struct inodev_entry **i_hash;
55666+ __u32 i_size;
55667+};
55668+
55669+struct name_db {
55670+ struct name_entry **n_hash;
55671+ __u32 n_size;
55672+};
55673+
55674+struct crash_uid {
55675+ uid_t uid;
55676+ unsigned long expires;
55677+};
55678+
55679+struct gr_hash_struct {
55680+ void **table;
55681+ void **nametable;
55682+ void *first;
55683+ __u32 table_size;
55684+ __u32 used_size;
55685+ int type;
55686+};
55687+
55688+/* Userspace Grsecurity ACL data structures */
55689+
55690+struct acl_subject_label {
55691+ char *filename;
55692+ ino_t inode;
55693+ dev_t device;
55694+ __u32 mode;
55695+ kernel_cap_t cap_mask;
55696+ kernel_cap_t cap_lower;
55697+ kernel_cap_t cap_invert_audit;
55698+
55699+ struct rlimit res[GR_NLIMITS];
55700+ __u32 resmask;
55701+
55702+ __u8 user_trans_type;
55703+ __u8 group_trans_type;
55704+ uid_t *user_transitions;
55705+ gid_t *group_transitions;
55706+ __u16 user_trans_num;
55707+ __u16 group_trans_num;
55708+
55709+ __u32 sock_families[2];
55710+ __u32 ip_proto[8];
55711+ __u32 ip_type;
55712+ struct acl_ip_label **ips;
55713+ __u32 ip_num;
55714+ __u32 inaddr_any_override;
55715+
55716+ __u32 crashes;
55717+ unsigned long expires;
55718+
55719+ struct acl_subject_label *parent_subject;
55720+ struct gr_hash_struct *hash;
55721+ struct acl_subject_label *prev;
55722+ struct acl_subject_label *next;
55723+
55724+ struct acl_object_label **obj_hash;
55725+ __u32 obj_hash_size;
55726+ __u16 pax_flags;
55727+};
55728+
55729+struct role_allowed_ip {
55730+ __u32 addr;
55731+ __u32 netmask;
55732+
55733+ struct role_allowed_ip *prev;
55734+ struct role_allowed_ip *next;
55735+};
55736+
55737+struct role_transition {
55738+ char *rolename;
55739+
55740+ struct role_transition *prev;
55741+ struct role_transition *next;
55742+};
55743+
55744+struct acl_role_label {
55745+ char *rolename;
55746+ uid_t uidgid;
55747+ __u16 roletype;
55748+
55749+ __u16 auth_attempts;
55750+ unsigned long expires;
55751+
55752+ struct acl_subject_label *root_label;
55753+ struct gr_hash_struct *hash;
55754+
55755+ struct acl_role_label *prev;
55756+ struct acl_role_label *next;
55757+
55758+ struct role_transition *transitions;
55759+ struct role_allowed_ip *allowed_ips;
55760+ uid_t *domain_children;
55761+ __u16 domain_child_num;
55762+
55763+ struct acl_subject_label **subj_hash;
55764+ __u32 subj_hash_size;
55765+};
55766+
55767+struct user_acl_role_db {
55768+ struct acl_role_label **r_table;
55769+ __u32 num_pointers; /* Number of allocations to track */
55770+ __u32 num_roles; /* Number of roles */
55771+ __u32 num_domain_children; /* Number of domain children */
55772+ __u32 num_subjects; /* Number of subjects */
55773+ __u32 num_objects; /* Number of objects */
55774+};
55775+
55776+struct acl_object_label {
55777+ char *filename;
55778+ ino_t inode;
55779+ dev_t device;
55780+ __u32 mode;
55781+
55782+ struct acl_subject_label *nested;
55783+ struct acl_object_label *globbed;
55784+
55785+ /* next two structures not used */
55786+
55787+ struct acl_object_label *prev;
55788+ struct acl_object_label *next;
55789+};
55790+
55791+struct acl_ip_label {
55792+ char *iface;
55793+ __u32 addr;
55794+ __u32 netmask;
55795+ __u16 low, high;
55796+ __u8 mode;
55797+ __u32 type;
55798+ __u32 proto[8];
55799+
55800+ /* next two structures not used */
55801+
55802+ struct acl_ip_label *prev;
55803+ struct acl_ip_label *next;
55804+};
55805+
55806+struct gr_arg {
55807+ struct user_acl_role_db role_db;
55808+ unsigned char pw[GR_PW_LEN];
55809+ unsigned char salt[GR_SALT_LEN];
55810+ unsigned char sum[GR_SHA_LEN];
55811+ unsigned char sp_role[GR_SPROLE_LEN];
55812+ struct sprole_pw *sprole_pws;
55813+ dev_t segv_device;
55814+ ino_t segv_inode;
55815+ uid_t segv_uid;
55816+ __u16 num_sprole_pws;
55817+ __u16 mode;
55818+};
55819+
55820+struct gr_arg_wrapper {
55821+ struct gr_arg *arg;
55822+ __u32 version;
55823+ __u32 size;
55824+};
55825+
55826+struct subject_map {
55827+ struct acl_subject_label *user;
55828+ struct acl_subject_label *kernel;
55829+ struct subject_map *prev;
55830+ struct subject_map *next;
55831+};
55832+
55833+struct acl_subj_map_db {
55834+ struct subject_map **s_hash;
55835+ __u32 s_size;
55836+};
55837+
55838+/* End Data Structures Section */
55839+
55840+/* Hash functions generated by empirical testing by Brad Spengler
55841+ Makes good use of the low bits of the inode. Generally 0-1 times
55842+ in loop for successful match. 0-3 for unsuccessful match.
55843+ Shift/add algorithm with modulus of table size and an XOR*/
55844+
55845+static __inline__ unsigned int
55846+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
55847+{
55848+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
55849+}
55850+
55851+ static __inline__ unsigned int
55852+shash(const struct acl_subject_label *userp, const unsigned int sz)
55853+{
55854+ return ((const unsigned long)userp % sz);
55855+}
55856+
55857+static __inline__ unsigned int
55858+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
55859+{
55860+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
55861+}
55862+
55863+static __inline__ unsigned int
55864+nhash(const char *name, const __u16 len, const unsigned int sz)
55865+{
55866+ return full_name_hash((const unsigned char *)name, len) % sz;
55867+}
55868+
55869+#define FOR_EACH_ROLE_START(role) \
55870+ role = role_list; \
55871+ while (role) {
55872+
55873+#define FOR_EACH_ROLE_END(role) \
55874+ role = role->prev; \
55875+ }
55876+
55877+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
55878+ subj = NULL; \
55879+ iter = 0; \
55880+ while (iter < role->subj_hash_size) { \
55881+ if (subj == NULL) \
55882+ subj = role->subj_hash[iter]; \
55883+ if (subj == NULL) { \
55884+ iter++; \
55885+ continue; \
55886+ }
55887+
55888+#define FOR_EACH_SUBJECT_END(subj,iter) \
55889+ subj = subj->next; \
55890+ if (subj == NULL) \
55891+ iter++; \
55892+ }
55893+
55894+
55895+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
55896+ subj = role->hash->first; \
55897+ while (subj != NULL) {
55898+
55899+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
55900+ subj = subj->next; \
55901+ }
55902+
55903+#endif
55904+
55905diff -urNp linux-2.6.32.42/include/linux/gralloc.h linux-2.6.32.42/include/linux/gralloc.h
55906--- linux-2.6.32.42/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
55907+++ linux-2.6.32.42/include/linux/gralloc.h 2011-04-17 15:56:46.000000000 -0400
55908@@ -0,0 +1,9 @@
55909+#ifndef __GRALLOC_H
55910+#define __GRALLOC_H
55911+
55912+void acl_free_all(void);
55913+int acl_alloc_stack_init(unsigned long size);
55914+void *acl_alloc(unsigned long len);
55915+void *acl_alloc_num(unsigned long num, unsigned long len);
55916+
55917+#endif
55918diff -urNp linux-2.6.32.42/include/linux/grdefs.h linux-2.6.32.42/include/linux/grdefs.h
55919--- linux-2.6.32.42/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
55920+++ linux-2.6.32.42/include/linux/grdefs.h 2011-06-11 16:20:26.000000000 -0400
55921@@ -0,0 +1,140 @@
55922+#ifndef GRDEFS_H
55923+#define GRDEFS_H
55924+
55925+/* Begin grsecurity status declarations */
55926+
55927+enum {
55928+ GR_READY = 0x01,
55929+ GR_STATUS_INIT = 0x00 // disabled state
55930+};
55931+
55932+/* Begin ACL declarations */
55933+
55934+/* Role flags */
55935+
55936+enum {
55937+ GR_ROLE_USER = 0x0001,
55938+ GR_ROLE_GROUP = 0x0002,
55939+ GR_ROLE_DEFAULT = 0x0004,
55940+ GR_ROLE_SPECIAL = 0x0008,
55941+ GR_ROLE_AUTH = 0x0010,
55942+ GR_ROLE_NOPW = 0x0020,
55943+ GR_ROLE_GOD = 0x0040,
55944+ GR_ROLE_LEARN = 0x0080,
55945+ GR_ROLE_TPE = 0x0100,
55946+ GR_ROLE_DOMAIN = 0x0200,
55947+ GR_ROLE_PAM = 0x0400,
55948+ GR_ROLE_PERSIST = 0x800
55949+};
55950+
55951+/* ACL Subject and Object mode flags */
55952+enum {
55953+ GR_DELETED = 0x80000000
55954+};
55955+
55956+/* ACL Object-only mode flags */
55957+enum {
55958+ GR_READ = 0x00000001,
55959+ GR_APPEND = 0x00000002,
55960+ GR_WRITE = 0x00000004,
55961+ GR_EXEC = 0x00000008,
55962+ GR_FIND = 0x00000010,
55963+ GR_INHERIT = 0x00000020,
55964+ GR_SETID = 0x00000040,
55965+ GR_CREATE = 0x00000080,
55966+ GR_DELETE = 0x00000100,
55967+ GR_LINK = 0x00000200,
55968+ GR_AUDIT_READ = 0x00000400,
55969+ GR_AUDIT_APPEND = 0x00000800,
55970+ GR_AUDIT_WRITE = 0x00001000,
55971+ GR_AUDIT_EXEC = 0x00002000,
55972+ GR_AUDIT_FIND = 0x00004000,
55973+ GR_AUDIT_INHERIT= 0x00008000,
55974+ GR_AUDIT_SETID = 0x00010000,
55975+ GR_AUDIT_CREATE = 0x00020000,
55976+ GR_AUDIT_DELETE = 0x00040000,
55977+ GR_AUDIT_LINK = 0x00080000,
55978+ GR_PTRACERD = 0x00100000,
55979+ GR_NOPTRACE = 0x00200000,
55980+ GR_SUPPRESS = 0x00400000,
55981+ GR_NOLEARN = 0x00800000,
55982+ GR_INIT_TRANSFER= 0x01000000
55983+};
55984+
55985+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
55986+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
55987+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
55988+
55989+/* ACL subject-only mode flags */
55990+enum {
55991+ GR_KILL = 0x00000001,
55992+ GR_VIEW = 0x00000002,
55993+ GR_PROTECTED = 0x00000004,
55994+ GR_LEARN = 0x00000008,
55995+ GR_OVERRIDE = 0x00000010,
55996+ /* just a placeholder, this mode is only used in userspace */
55997+ GR_DUMMY = 0x00000020,
55998+ GR_PROTSHM = 0x00000040,
55999+ GR_KILLPROC = 0x00000080,
56000+ GR_KILLIPPROC = 0x00000100,
56001+ /* just a placeholder, this mode is only used in userspace */
56002+ GR_NOTROJAN = 0x00000200,
56003+ GR_PROTPROCFD = 0x00000400,
56004+ GR_PROCACCT = 0x00000800,
56005+ GR_RELAXPTRACE = 0x00001000,
56006+ GR_NESTED = 0x00002000,
56007+ GR_INHERITLEARN = 0x00004000,
56008+ GR_PROCFIND = 0x00008000,
56009+ GR_POVERRIDE = 0x00010000,
56010+ GR_KERNELAUTH = 0x00020000,
56011+ GR_ATSECURE = 0x00040000,
56012+ GR_SHMEXEC = 0x00080000
56013+};
56014+
56015+enum {
56016+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
56017+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
56018+ GR_PAX_ENABLE_MPROTECT = 0x0004,
56019+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
56020+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
56021+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
56022+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
56023+ GR_PAX_DISABLE_MPROTECT = 0x0400,
56024+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
56025+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
56026+};
56027+
56028+enum {
56029+ GR_ID_USER = 0x01,
56030+ GR_ID_GROUP = 0x02,
56031+};
56032+
56033+enum {
56034+ GR_ID_ALLOW = 0x01,
56035+ GR_ID_DENY = 0x02,
56036+};
56037+
56038+#define GR_CRASH_RES 31
56039+#define GR_UIDTABLE_MAX 500
56040+
56041+/* begin resource learning section */
56042+enum {
56043+ GR_RLIM_CPU_BUMP = 60,
56044+ GR_RLIM_FSIZE_BUMP = 50000,
56045+ GR_RLIM_DATA_BUMP = 10000,
56046+ GR_RLIM_STACK_BUMP = 1000,
56047+ GR_RLIM_CORE_BUMP = 10000,
56048+ GR_RLIM_RSS_BUMP = 500000,
56049+ GR_RLIM_NPROC_BUMP = 1,
56050+ GR_RLIM_NOFILE_BUMP = 5,
56051+ GR_RLIM_MEMLOCK_BUMP = 50000,
56052+ GR_RLIM_AS_BUMP = 500000,
56053+ GR_RLIM_LOCKS_BUMP = 2,
56054+ GR_RLIM_SIGPENDING_BUMP = 5,
56055+ GR_RLIM_MSGQUEUE_BUMP = 10000,
56056+ GR_RLIM_NICE_BUMP = 1,
56057+ GR_RLIM_RTPRIO_BUMP = 1,
56058+ GR_RLIM_RTTIME_BUMP = 1000000
56059+};
56060+
56061+#endif
56062diff -urNp linux-2.6.32.42/include/linux/grinternal.h linux-2.6.32.42/include/linux/grinternal.h
56063--- linux-2.6.32.42/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
56064+++ linux-2.6.32.42/include/linux/grinternal.h 2011-06-29 19:41:14.000000000 -0400
56065@@ -0,0 +1,219 @@
56066+#ifndef __GRINTERNAL_H
56067+#define __GRINTERNAL_H
56068+
56069+#ifdef CONFIG_GRKERNSEC
56070+
56071+#include <linux/fs.h>
56072+#include <linux/mnt_namespace.h>
56073+#include <linux/nsproxy.h>
56074+#include <linux/gracl.h>
56075+#include <linux/grdefs.h>
56076+#include <linux/grmsg.h>
56077+
56078+void gr_add_learn_entry(const char *fmt, ...)
56079+ __attribute__ ((format (printf, 1, 2)));
56080+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
56081+ const struct vfsmount *mnt);
56082+__u32 gr_check_create(const struct dentry *new_dentry,
56083+ const struct dentry *parent,
56084+ const struct vfsmount *mnt, const __u32 mode);
56085+int gr_check_protected_task(const struct task_struct *task);
56086+__u32 to_gr_audit(const __u32 reqmode);
56087+int gr_set_acls(const int type);
56088+int gr_apply_subject_to_task(struct task_struct *task);
56089+int gr_acl_is_enabled(void);
56090+char gr_roletype_to_char(void);
56091+
56092+void gr_handle_alertkill(struct task_struct *task);
56093+char *gr_to_filename(const struct dentry *dentry,
56094+ const struct vfsmount *mnt);
56095+char *gr_to_filename1(const struct dentry *dentry,
56096+ const struct vfsmount *mnt);
56097+char *gr_to_filename2(const struct dentry *dentry,
56098+ const struct vfsmount *mnt);
56099+char *gr_to_filename3(const struct dentry *dentry,
56100+ const struct vfsmount *mnt);
56101+
56102+extern int grsec_enable_harden_ptrace;
56103+extern int grsec_enable_link;
56104+extern int grsec_enable_fifo;
56105+extern int grsec_enable_execve;
56106+extern int grsec_enable_shm;
56107+extern int grsec_enable_execlog;
56108+extern int grsec_enable_signal;
56109+extern int grsec_enable_audit_ptrace;
56110+extern int grsec_enable_forkfail;
56111+extern int grsec_enable_time;
56112+extern int grsec_enable_rofs;
56113+extern int grsec_enable_chroot_shmat;
56114+extern int grsec_enable_chroot_findtask;
56115+extern int grsec_enable_chroot_mount;
56116+extern int grsec_enable_chroot_double;
56117+extern int grsec_enable_chroot_pivot;
56118+extern int grsec_enable_chroot_chdir;
56119+extern int grsec_enable_chroot_chmod;
56120+extern int grsec_enable_chroot_mknod;
56121+extern int grsec_enable_chroot_fchdir;
56122+extern int grsec_enable_chroot_nice;
56123+extern int grsec_enable_chroot_execlog;
56124+extern int grsec_enable_chroot_caps;
56125+extern int grsec_enable_chroot_sysctl;
56126+extern int grsec_enable_chroot_unix;
56127+extern int grsec_enable_tpe;
56128+extern int grsec_tpe_gid;
56129+extern int grsec_enable_tpe_all;
56130+extern int grsec_enable_tpe_invert;
56131+extern int grsec_enable_socket_all;
56132+extern int grsec_socket_all_gid;
56133+extern int grsec_enable_socket_client;
56134+extern int grsec_socket_client_gid;
56135+extern int grsec_enable_socket_server;
56136+extern int grsec_socket_server_gid;
56137+extern int grsec_audit_gid;
56138+extern int grsec_enable_group;
56139+extern int grsec_enable_audit_textrel;
56140+extern int grsec_enable_log_rwxmaps;
56141+extern int grsec_enable_mount;
56142+extern int grsec_enable_chdir;
56143+extern int grsec_resource_logging;
56144+extern int grsec_enable_blackhole;
56145+extern int grsec_lastack_retries;
56146+extern int grsec_enable_brute;
56147+extern int grsec_lock;
56148+
56149+extern spinlock_t grsec_alert_lock;
56150+extern unsigned long grsec_alert_wtime;
56151+extern unsigned long grsec_alert_fyet;
56152+
56153+extern spinlock_t grsec_audit_lock;
56154+
56155+extern rwlock_t grsec_exec_file_lock;
56156+
56157+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
56158+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
56159+ (tsk)->exec_file->f_vfsmnt) : "/")
56160+
56161+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
56162+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
56163+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
56164+
56165+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
56166+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
56167+ (tsk)->exec_file->f_vfsmnt) : "/")
56168+
56169+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
56170+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
56171+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
56172+
56173+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
56174+
56175+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
56176+
56177+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
56178+ (task)->pid, (cred)->uid, \
56179+ (cred)->euid, (cred)->gid, (cred)->egid, \
56180+ gr_parent_task_fullpath(task), \
56181+ (task)->real_parent->comm, (task)->real_parent->pid, \
56182+ (pcred)->uid, (pcred)->euid, \
56183+ (pcred)->gid, (pcred)->egid
56184+
56185+#define GR_CHROOT_CAPS {{ \
56186+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
56187+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
56188+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
56189+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
56190+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
56191+ CAP_TO_MASK(CAP_IPC_OWNER) , 0 }}
56192+
56193+#define security_learn(normal_msg,args...) \
56194+({ \
56195+ read_lock(&grsec_exec_file_lock); \
56196+ gr_add_learn_entry(normal_msg "\n", ## args); \
56197+ read_unlock(&grsec_exec_file_lock); \
56198+})
56199+
56200+enum {
56201+ GR_DO_AUDIT,
56202+ GR_DONT_AUDIT,
56203+ GR_DONT_AUDIT_GOOD
56204+};
56205+
56206+enum {
56207+ GR_TTYSNIFF,
56208+ GR_RBAC,
56209+ GR_RBAC_STR,
56210+ GR_STR_RBAC,
56211+ GR_RBAC_MODE2,
56212+ GR_RBAC_MODE3,
56213+ GR_FILENAME,
56214+ GR_SYSCTL_HIDDEN,
56215+ GR_NOARGS,
56216+ GR_ONE_INT,
56217+ GR_ONE_INT_TWO_STR,
56218+ GR_ONE_STR,
56219+ GR_STR_INT,
56220+ GR_TWO_STR_INT,
56221+ GR_TWO_INT,
56222+ GR_TWO_U64,
56223+ GR_THREE_INT,
56224+ GR_FIVE_INT_TWO_STR,
56225+ GR_TWO_STR,
56226+ GR_THREE_STR,
56227+ GR_FOUR_STR,
56228+ GR_STR_FILENAME,
56229+ GR_FILENAME_STR,
56230+ GR_FILENAME_TWO_INT,
56231+ GR_FILENAME_TWO_INT_STR,
56232+ GR_TEXTREL,
56233+ GR_PTRACE,
56234+ GR_RESOURCE,
56235+ GR_CAP,
56236+ GR_SIG,
56237+ GR_SIG2,
56238+ GR_CRASH1,
56239+ GR_CRASH2,
56240+ GR_PSACCT,
56241+ GR_RWXMAP
56242+};
56243+
56244+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
56245+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
56246+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
56247+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
56248+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
56249+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
56250+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
56251+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
56252+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
56253+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
56254+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
56255+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
56256+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
56257+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
56258+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
56259+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
56260+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
56261+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
56262+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
56263+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
56264+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
56265+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
56266+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
56267+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
56268+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
56269+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
56270+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
56271+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
56272+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
56273+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
56274+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
56275+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
56276+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
56277+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
56278+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
56279+
56280+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
56281+
56282+#endif
56283+
56284+#endif
56285diff -urNp linux-2.6.32.42/include/linux/grmsg.h linux-2.6.32.42/include/linux/grmsg.h
56286--- linux-2.6.32.42/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
56287+++ linux-2.6.32.42/include/linux/grmsg.h 2011-04-17 15:56:46.000000000 -0400
56288@@ -0,0 +1,108 @@
56289+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
56290+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
56291+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
56292+#define GR_STOPMOD_MSG "denied modification of module state by "
56293+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
56294+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
56295+#define GR_IOPERM_MSG "denied use of ioperm() by "
56296+#define GR_IOPL_MSG "denied use of iopl() by "
56297+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
56298+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
56299+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
56300+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
56301+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
56302+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
56303+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
56304+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
56305+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
56306+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
56307+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
56308+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
56309+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
56310+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
56311+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
56312+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
56313+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
56314+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
56315+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
56316+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
56317+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
56318+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
56319+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
56320+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
56321+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
56322+#define GR_NPROC_MSG "denied overstep of process limit by "
56323+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
56324+#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
56325+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
56326+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
56327+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
56328+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
56329+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
56330+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
56331+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
56332+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
56333+#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
56334+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
56335+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
56336+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
56337+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
56338+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
56339+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
56340+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
56341+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
56342+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
56343+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
56344+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
56345+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
56346+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
56347+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
56348+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
56349+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
56350+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
56351+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
56352+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
56353+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
56354+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
56355+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
56356+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
56357+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
56358+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
56359+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
56360+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
56361+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
56362+#define GR_FAILFORK_MSG "failed fork with errno %s by "
56363+#define GR_NICE_CHROOT_MSG "denied priority change by "
56364+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
56365+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
56366+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
56367+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
56368+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
56369+#define GR_TIME_MSG "time set by "
56370+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
56371+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
56372+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
56373+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
56374+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
56375+#define GR_BIND_MSG "denied bind() by "
56376+#define GR_CONNECT_MSG "denied connect() by "
56377+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
56378+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
56379+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
56380+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
56381+#define GR_CAP_ACL_MSG "use of %s denied for "
56382+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
56383+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
56384+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
56385+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
56386+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
56387+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
56388+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
56389+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
56390+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
56391+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
56392+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
56393+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
56394+#define GR_VM86_MSG "denied use of vm86 by "
56395+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
56396+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
56397diff -urNp linux-2.6.32.42/include/linux/grsecurity.h linux-2.6.32.42/include/linux/grsecurity.h
56398--- linux-2.6.32.42/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
56399+++ linux-2.6.32.42/include/linux/grsecurity.h 2011-04-17 15:56:46.000000000 -0400
56400@@ -0,0 +1,212 @@
56401+#ifndef GR_SECURITY_H
56402+#define GR_SECURITY_H
56403+#include <linux/fs.h>
56404+#include <linux/fs_struct.h>
56405+#include <linux/binfmts.h>
56406+#include <linux/gracl.h>
56407+#include <linux/compat.h>
56408+
56409+/* notify of brain-dead configs */
56410+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
56411+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
56412+#endif
56413+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
56414+#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
56415+#endif
56416+#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
56417+#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
56418+#endif
56419+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
56420+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
56421+#endif
56422+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
56423+#error "CONFIG_PAX enabled, but no PaX options are enabled."
56424+#endif
56425+
56426+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
56427+void gr_handle_brute_check(void);
56428+void gr_handle_kernel_exploit(void);
56429+int gr_process_user_ban(void);
56430+
56431+char gr_roletype_to_char(void);
56432+
56433+int gr_acl_enable_at_secure(void);
56434+
56435+int gr_check_user_change(int real, int effective, int fs);
56436+int gr_check_group_change(int real, int effective, int fs);
56437+
56438+void gr_del_task_from_ip_table(struct task_struct *p);
56439+
56440+int gr_pid_is_chrooted(struct task_struct *p);
56441+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
56442+int gr_handle_chroot_nice(void);
56443+int gr_handle_chroot_sysctl(const int op);
56444+int gr_handle_chroot_setpriority(struct task_struct *p,
56445+ const int niceval);
56446+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
56447+int gr_handle_chroot_chroot(const struct dentry *dentry,
56448+ const struct vfsmount *mnt);
56449+int gr_handle_chroot_caps(struct path *path);
56450+void gr_handle_chroot_chdir(struct path *path);
56451+int gr_handle_chroot_chmod(const struct dentry *dentry,
56452+ const struct vfsmount *mnt, const int mode);
56453+int gr_handle_chroot_mknod(const struct dentry *dentry,
56454+ const struct vfsmount *mnt, const int mode);
56455+int gr_handle_chroot_mount(const struct dentry *dentry,
56456+ const struct vfsmount *mnt,
56457+ const char *dev_name);
56458+int gr_handle_chroot_pivot(void);
56459+int gr_handle_chroot_unix(const pid_t pid);
56460+
56461+int gr_handle_rawio(const struct inode *inode);
56462+int gr_handle_nproc(void);
56463+
56464+void gr_handle_ioperm(void);
56465+void gr_handle_iopl(void);
56466+
56467+int gr_tpe_allow(const struct file *file);
56468+
56469+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
56470+void gr_clear_chroot_entries(struct task_struct *task);
56471+
56472+void gr_log_forkfail(const int retval);
56473+void gr_log_timechange(void);
56474+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
56475+void gr_log_chdir(const struct dentry *dentry,
56476+ const struct vfsmount *mnt);
56477+void gr_log_chroot_exec(const struct dentry *dentry,
56478+ const struct vfsmount *mnt);
56479+void gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv);
56480+#ifdef CONFIG_COMPAT
56481+void gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv);
56482+#endif
56483+void gr_log_remount(const char *devname, const int retval);
56484+void gr_log_unmount(const char *devname, const int retval);
56485+void gr_log_mount(const char *from, const char *to, const int retval);
56486+void gr_log_textrel(struct vm_area_struct *vma);
56487+void gr_log_rwxmmap(struct file *file);
56488+void gr_log_rwxmprotect(struct file *file);
56489+
56490+int gr_handle_follow_link(const struct inode *parent,
56491+ const struct inode *inode,
56492+ const struct dentry *dentry,
56493+ const struct vfsmount *mnt);
56494+int gr_handle_fifo(const struct dentry *dentry,
56495+ const struct vfsmount *mnt,
56496+ const struct dentry *dir, const int flag,
56497+ const int acc_mode);
56498+int gr_handle_hardlink(const struct dentry *dentry,
56499+ const struct vfsmount *mnt,
56500+ struct inode *inode,
56501+ const int mode, const char *to);
56502+
56503+int gr_is_capable(const int cap);
56504+int gr_is_capable_nolog(const int cap);
56505+void gr_learn_resource(const struct task_struct *task, const int limit,
56506+ const unsigned long wanted, const int gt);
56507+void gr_copy_label(struct task_struct *tsk);
56508+void gr_handle_crash(struct task_struct *task, const int sig);
56509+int gr_handle_signal(const struct task_struct *p, const int sig);
56510+int gr_check_crash_uid(const uid_t uid);
56511+int gr_check_protected_task(const struct task_struct *task);
56512+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
56513+int gr_acl_handle_mmap(const struct file *file,
56514+ const unsigned long prot);
56515+int gr_acl_handle_mprotect(const struct file *file,
56516+ const unsigned long prot);
56517+int gr_check_hidden_task(const struct task_struct *tsk);
56518+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
56519+ const struct vfsmount *mnt);
56520+__u32 gr_acl_handle_utime(const struct dentry *dentry,
56521+ const struct vfsmount *mnt);
56522+__u32 gr_acl_handle_access(const struct dentry *dentry,
56523+ const struct vfsmount *mnt, const int fmode);
56524+__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
56525+ const struct vfsmount *mnt, mode_t mode);
56526+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
56527+ const struct vfsmount *mnt, mode_t mode);
56528+__u32 gr_acl_handle_chown(const struct dentry *dentry,
56529+ const struct vfsmount *mnt);
56530+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
56531+ const struct vfsmount *mnt);
56532+int gr_handle_ptrace(struct task_struct *task, const long request);
56533+int gr_handle_proc_ptrace(struct task_struct *task);
56534+__u32 gr_acl_handle_execve(const struct dentry *dentry,
56535+ const struct vfsmount *mnt);
56536+int gr_check_crash_exec(const struct file *filp);
56537+int gr_acl_is_enabled(void);
56538+void gr_set_kernel_label(struct task_struct *task);
56539+void gr_set_role_label(struct task_struct *task, const uid_t uid,
56540+ const gid_t gid);
56541+int gr_set_proc_label(const struct dentry *dentry,
56542+ const struct vfsmount *mnt,
56543+ const int unsafe_share);
56544+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
56545+ const struct vfsmount *mnt);
56546+__u32 gr_acl_handle_open(const struct dentry *dentry,
56547+ const struct vfsmount *mnt, const int fmode);
56548+__u32 gr_acl_handle_creat(const struct dentry *dentry,
56549+ const struct dentry *p_dentry,
56550+ const struct vfsmount *p_mnt, const int fmode,
56551+ const int imode);
56552+void gr_handle_create(const struct dentry *dentry,
56553+ const struct vfsmount *mnt);
56554+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
56555+ const struct dentry *parent_dentry,
56556+ const struct vfsmount *parent_mnt,
56557+ const int mode);
56558+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
56559+ const struct dentry *parent_dentry,
56560+ const struct vfsmount *parent_mnt);
56561+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
56562+ const struct vfsmount *mnt);
56563+void gr_handle_delete(const ino_t ino, const dev_t dev);
56564+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
56565+ const struct vfsmount *mnt);
56566+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
56567+ const struct dentry *parent_dentry,
56568+ const struct vfsmount *parent_mnt,
56569+ const char *from);
56570+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
56571+ const struct dentry *parent_dentry,
56572+ const struct vfsmount *parent_mnt,
56573+ const struct dentry *old_dentry,
56574+ const struct vfsmount *old_mnt, const char *to);
56575+int gr_acl_handle_rename(struct dentry *new_dentry,
56576+ struct dentry *parent_dentry,
56577+ const struct vfsmount *parent_mnt,
56578+ struct dentry *old_dentry,
56579+ struct inode *old_parent_inode,
56580+ struct vfsmount *old_mnt, const char *newname);
56581+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
56582+ struct dentry *old_dentry,
56583+ struct dentry *new_dentry,
56584+ struct vfsmount *mnt, const __u8 replace);
56585+__u32 gr_check_link(const struct dentry *new_dentry,
56586+ const struct dentry *parent_dentry,
56587+ const struct vfsmount *parent_mnt,
56588+ const struct dentry *old_dentry,
56589+ const struct vfsmount *old_mnt);
56590+int gr_acl_handle_filldir(const struct file *file, const char *name,
56591+ const unsigned int namelen, const ino_t ino);
56592+
56593+__u32 gr_acl_handle_unix(const struct dentry *dentry,
56594+ const struct vfsmount *mnt);
56595+void gr_acl_handle_exit(void);
56596+void gr_acl_handle_psacct(struct task_struct *task, const long code);
56597+int gr_acl_handle_procpidmem(const struct task_struct *task);
56598+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
56599+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
56600+void gr_audit_ptrace(struct task_struct *task);
56601+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
56602+
56603+#ifdef CONFIG_GRKERNSEC
56604+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
56605+void gr_handle_vm86(void);
56606+void gr_handle_mem_readwrite(u64 from, u64 to);
56607+
56608+extern int grsec_enable_dmesg;
56609+extern int grsec_disable_privio;
56610+#endif
56611+
56612+#endif
56613diff -urNp linux-2.6.32.42/include/linux/hdpu_features.h linux-2.6.32.42/include/linux/hdpu_features.h
56614--- linux-2.6.32.42/include/linux/hdpu_features.h 2011-03-27 14:31:47.000000000 -0400
56615+++ linux-2.6.32.42/include/linux/hdpu_features.h 2011-04-17 15:56:46.000000000 -0400
56616@@ -3,7 +3,7 @@
56617 struct cpustate_t {
56618 spinlock_t lock;
56619 int excl;
56620- int open_count;
56621+ atomic_t open_count;
56622 unsigned char cached_val;
56623 int inited;
56624 unsigned long *set_addr;
56625diff -urNp linux-2.6.32.42/include/linux/highmem.h linux-2.6.32.42/include/linux/highmem.h
56626--- linux-2.6.32.42/include/linux/highmem.h 2011-03-27 14:31:47.000000000 -0400
56627+++ linux-2.6.32.42/include/linux/highmem.h 2011-04-17 15:56:46.000000000 -0400
56628@@ -137,6 +137,18 @@ static inline void clear_highpage(struct
56629 kunmap_atomic(kaddr, KM_USER0);
56630 }
56631
56632+static inline void sanitize_highpage(struct page *page)
56633+{
56634+ void *kaddr;
56635+ unsigned long flags;
56636+
56637+ local_irq_save(flags);
56638+ kaddr = kmap_atomic(page, KM_CLEARPAGE);
56639+ clear_page(kaddr);
56640+ kunmap_atomic(kaddr, KM_CLEARPAGE);
56641+ local_irq_restore(flags);
56642+}
56643+
56644 static inline void zero_user_segments(struct page *page,
56645 unsigned start1, unsigned end1,
56646 unsigned start2, unsigned end2)
56647diff -urNp linux-2.6.32.42/include/linux/i2o.h linux-2.6.32.42/include/linux/i2o.h
56648--- linux-2.6.32.42/include/linux/i2o.h 2011-03-27 14:31:47.000000000 -0400
56649+++ linux-2.6.32.42/include/linux/i2o.h 2011-05-04 17:56:28.000000000 -0400
56650@@ -564,7 +564,7 @@ struct i2o_controller {
56651 struct i2o_device *exec; /* Executive */
56652 #if BITS_PER_LONG == 64
56653 spinlock_t context_list_lock; /* lock for context_list */
56654- atomic_t context_list_counter; /* needed for unique contexts */
56655+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
56656 struct list_head context_list; /* list of context id's
56657 and pointers */
56658 #endif
56659diff -urNp linux-2.6.32.42/include/linux/init_task.h linux-2.6.32.42/include/linux/init_task.h
56660--- linux-2.6.32.42/include/linux/init_task.h 2011-03-27 14:31:47.000000000 -0400
56661+++ linux-2.6.32.42/include/linux/init_task.h 2011-05-18 20:44:59.000000000 -0400
56662@@ -83,6 +83,12 @@ extern struct group_info init_groups;
56663 #define INIT_IDS
56664 #endif
56665
56666+#ifdef CONFIG_X86
56667+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
56668+#else
56669+#define INIT_TASK_THREAD_INFO
56670+#endif
56671+
56672 #ifdef CONFIG_SECURITY_FILE_CAPABILITIES
56673 /*
56674 * Because of the reduced scope of CAP_SETPCAP when filesystem
56675@@ -156,6 +162,7 @@ extern struct cred init_cred;
56676 __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \
56677 .comm = "swapper", \
56678 .thread = INIT_THREAD, \
56679+ INIT_TASK_THREAD_INFO \
56680 .fs = &init_fs, \
56681 .files = &init_files, \
56682 .signal = &init_signals, \
56683diff -urNp linux-2.6.32.42/include/linux/interrupt.h linux-2.6.32.42/include/linux/interrupt.h
56684--- linux-2.6.32.42/include/linux/interrupt.h 2011-06-25 12:55:35.000000000 -0400
56685+++ linux-2.6.32.42/include/linux/interrupt.h 2011-06-25 12:56:37.000000000 -0400
56686@@ -363,7 +363,7 @@ enum
56687 /* map softirq index to softirq name. update 'softirq_to_name' in
56688 * kernel/softirq.c when adding a new softirq.
56689 */
56690-extern char *softirq_to_name[NR_SOFTIRQS];
56691+extern const char * const softirq_to_name[NR_SOFTIRQS];
56692
56693 /* softirq mask and active fields moved to irq_cpustat_t in
56694 * asm/hardirq.h to get better cache usage. KAO
56695@@ -371,12 +371,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
56696
56697 struct softirq_action
56698 {
56699- void (*action)(struct softirq_action *);
56700+ void (*action)(void);
56701 };
56702
56703 asmlinkage void do_softirq(void);
56704 asmlinkage void __do_softirq(void);
56705-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
56706+extern void open_softirq(int nr, void (*action)(void));
56707 extern void softirq_init(void);
56708 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
56709 extern void raise_softirq_irqoff(unsigned int nr);
56710diff -urNp linux-2.6.32.42/include/linux/irq.h linux-2.6.32.42/include/linux/irq.h
56711--- linux-2.6.32.42/include/linux/irq.h 2011-03-27 14:31:47.000000000 -0400
56712+++ linux-2.6.32.42/include/linux/irq.h 2011-04-17 15:56:46.000000000 -0400
56713@@ -438,12 +438,12 @@ extern int set_irq_msi(unsigned int irq,
56714 static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
56715 bool boot)
56716 {
56717+#ifdef CONFIG_CPUMASK_OFFSTACK
56718 gfp_t gfp = GFP_ATOMIC;
56719
56720 if (boot)
56721 gfp = GFP_NOWAIT;
56722
56723-#ifdef CONFIG_CPUMASK_OFFSTACK
56724 if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
56725 return false;
56726
56727diff -urNp linux-2.6.32.42/include/linux/kallsyms.h linux-2.6.32.42/include/linux/kallsyms.h
56728--- linux-2.6.32.42/include/linux/kallsyms.h 2011-03-27 14:31:47.000000000 -0400
56729+++ linux-2.6.32.42/include/linux/kallsyms.h 2011-04-17 15:56:46.000000000 -0400
56730@@ -15,7 +15,8 @@
56731
56732 struct module;
56733
56734-#ifdef CONFIG_KALLSYMS
56735+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
56736+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
56737 /* Lookup the address for a symbol. Returns 0 if not found. */
56738 unsigned long kallsyms_lookup_name(const char *name);
56739
56740@@ -92,6 +93,15 @@ static inline int lookup_symbol_attrs(un
56741 /* Stupid that this does nothing, but I didn't create this mess. */
56742 #define __print_symbol(fmt, addr)
56743 #endif /*CONFIG_KALLSYMS*/
56744+#else /* when included by kallsyms.c, vsnprintf.c, or
56745+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
56746+extern void __print_symbol(const char *fmt, unsigned long address);
56747+extern int sprint_symbol(char *buffer, unsigned long address);
56748+const char *kallsyms_lookup(unsigned long addr,
56749+ unsigned long *symbolsize,
56750+ unsigned long *offset,
56751+ char **modname, char *namebuf);
56752+#endif
56753
56754 /* This macro allows us to keep printk typechecking */
56755 static void __check_printsym_format(const char *fmt, ...)
56756diff -urNp linux-2.6.32.42/include/linux/kgdb.h linux-2.6.32.42/include/linux/kgdb.h
56757--- linux-2.6.32.42/include/linux/kgdb.h 2011-03-27 14:31:47.000000000 -0400
56758+++ linux-2.6.32.42/include/linux/kgdb.h 2011-05-04 17:56:20.000000000 -0400
56759@@ -74,8 +74,8 @@ void kgdb_breakpoint(void);
56760
56761 extern int kgdb_connected;
56762
56763-extern atomic_t kgdb_setting_breakpoint;
56764-extern atomic_t kgdb_cpu_doing_single_step;
56765+extern atomic_unchecked_t kgdb_setting_breakpoint;
56766+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
56767
56768 extern struct task_struct *kgdb_usethread;
56769 extern struct task_struct *kgdb_contthread;
56770@@ -251,20 +251,20 @@ struct kgdb_arch {
56771 */
56772 struct kgdb_io {
56773 const char *name;
56774- int (*read_char) (void);
56775- void (*write_char) (u8);
56776- void (*flush) (void);
56777- int (*init) (void);
56778- void (*pre_exception) (void);
56779- void (*post_exception) (void);
56780+ int (* const read_char) (void);
56781+ void (* const write_char) (u8);
56782+ void (* const flush) (void);
56783+ int (* const init) (void);
56784+ void (* const pre_exception) (void);
56785+ void (* const post_exception) (void);
56786 };
56787
56788-extern struct kgdb_arch arch_kgdb_ops;
56789+extern const struct kgdb_arch arch_kgdb_ops;
56790
56791 extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
56792
56793-extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
56794-extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
56795+extern int kgdb_register_io_module(const struct kgdb_io *local_kgdb_io_ops);
56796+extern void kgdb_unregister_io_module(const struct kgdb_io *local_kgdb_io_ops);
56797
56798 extern int kgdb_hex2long(char **ptr, unsigned long *long_val);
56799 extern int kgdb_mem2hex(char *mem, char *buf, int count);
56800diff -urNp linux-2.6.32.42/include/linux/kmod.h linux-2.6.32.42/include/linux/kmod.h
56801--- linux-2.6.32.42/include/linux/kmod.h 2011-03-27 14:31:47.000000000 -0400
56802+++ linux-2.6.32.42/include/linux/kmod.h 2011-04-17 15:56:46.000000000 -0400
56803@@ -31,6 +31,8 @@
56804 * usually useless though. */
56805 extern int __request_module(bool wait, const char *name, ...) \
56806 __attribute__((format(printf, 2, 3)));
56807+extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
56808+ __attribute__((format(printf, 3, 4)));
56809 #define request_module(mod...) __request_module(true, mod)
56810 #define request_module_nowait(mod...) __request_module(false, mod)
56811 #define try_then_request_module(x, mod...) \
56812diff -urNp linux-2.6.32.42/include/linux/kobject.h linux-2.6.32.42/include/linux/kobject.h
56813--- linux-2.6.32.42/include/linux/kobject.h 2011-03-27 14:31:47.000000000 -0400
56814+++ linux-2.6.32.42/include/linux/kobject.h 2011-04-17 15:56:46.000000000 -0400
56815@@ -106,7 +106,7 @@ extern char *kobject_get_path(struct kob
56816
56817 struct kobj_type {
56818 void (*release)(struct kobject *kobj);
56819- struct sysfs_ops *sysfs_ops;
56820+ const struct sysfs_ops *sysfs_ops;
56821 struct attribute **default_attrs;
56822 };
56823
56824@@ -118,9 +118,9 @@ struct kobj_uevent_env {
56825 };
56826
56827 struct kset_uevent_ops {
56828- int (*filter)(struct kset *kset, struct kobject *kobj);
56829- const char *(*name)(struct kset *kset, struct kobject *kobj);
56830- int (*uevent)(struct kset *kset, struct kobject *kobj,
56831+ int (* const filter)(struct kset *kset, struct kobject *kobj);
56832+ const char *(* const name)(struct kset *kset, struct kobject *kobj);
56833+ int (* const uevent)(struct kset *kset, struct kobject *kobj,
56834 struct kobj_uevent_env *env);
56835 };
56836
56837@@ -132,7 +132,7 @@ struct kobj_attribute {
56838 const char *buf, size_t count);
56839 };
56840
56841-extern struct sysfs_ops kobj_sysfs_ops;
56842+extern const struct sysfs_ops kobj_sysfs_ops;
56843
56844 /**
56845 * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem.
56846@@ -155,14 +155,14 @@ struct kset {
56847 struct list_head list;
56848 spinlock_t list_lock;
56849 struct kobject kobj;
56850- struct kset_uevent_ops *uevent_ops;
56851+ const struct kset_uevent_ops *uevent_ops;
56852 };
56853
56854 extern void kset_init(struct kset *kset);
56855 extern int __must_check kset_register(struct kset *kset);
56856 extern void kset_unregister(struct kset *kset);
56857 extern struct kset * __must_check kset_create_and_add(const char *name,
56858- struct kset_uevent_ops *u,
56859+ const struct kset_uevent_ops *u,
56860 struct kobject *parent_kobj);
56861
56862 static inline struct kset *to_kset(struct kobject *kobj)
56863diff -urNp linux-2.6.32.42/include/linux/kvm_host.h linux-2.6.32.42/include/linux/kvm_host.h
56864--- linux-2.6.32.42/include/linux/kvm_host.h 2011-03-27 14:31:47.000000000 -0400
56865+++ linux-2.6.32.42/include/linux/kvm_host.h 2011-04-17 15:56:46.000000000 -0400
56866@@ -210,7 +210,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
56867 void vcpu_load(struct kvm_vcpu *vcpu);
56868 void vcpu_put(struct kvm_vcpu *vcpu);
56869
56870-int kvm_init(void *opaque, unsigned int vcpu_size,
56871+int kvm_init(const void *opaque, unsigned int vcpu_size,
56872 struct module *module);
56873 void kvm_exit(void);
56874
56875@@ -316,7 +316,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
56876 struct kvm_guest_debug *dbg);
56877 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
56878
56879-int kvm_arch_init(void *opaque);
56880+int kvm_arch_init(const void *opaque);
56881 void kvm_arch_exit(void);
56882
56883 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
56884diff -urNp linux-2.6.32.42/include/linux/libata.h linux-2.6.32.42/include/linux/libata.h
56885--- linux-2.6.32.42/include/linux/libata.h 2011-03-27 14:31:47.000000000 -0400
56886+++ linux-2.6.32.42/include/linux/libata.h 2011-04-23 12:56:11.000000000 -0400
56887@@ -525,11 +525,11 @@ struct ata_ioports {
56888
56889 struct ata_host {
56890 spinlock_t lock;
56891- struct device *dev;
56892+ struct device *dev;
56893 void __iomem * const *iomap;
56894 unsigned int n_ports;
56895 void *private_data;
56896- struct ata_port_operations *ops;
56897+ const struct ata_port_operations *ops;
56898 unsigned long flags;
56899 #ifdef CONFIG_ATA_ACPI
56900 acpi_handle acpi_handle;
56901@@ -710,7 +710,7 @@ struct ata_link {
56902
56903 struct ata_port {
56904 struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
56905- struct ata_port_operations *ops;
56906+ const struct ata_port_operations *ops;
56907 spinlock_t *lock;
56908 /* Flags owned by the EH context. Only EH should touch these once the
56909 port is active */
56910@@ -892,7 +892,7 @@ struct ata_port_info {
56911 unsigned long pio_mask;
56912 unsigned long mwdma_mask;
56913 unsigned long udma_mask;
56914- struct ata_port_operations *port_ops;
56915+ const struct ata_port_operations *port_ops;
56916 void *private_data;
56917 };
56918
56919@@ -916,7 +916,7 @@ extern const unsigned long sata_deb_timi
56920 extern const unsigned long sata_deb_timing_hotplug[];
56921 extern const unsigned long sata_deb_timing_long[];
56922
56923-extern struct ata_port_operations ata_dummy_port_ops;
56924+extern const struct ata_port_operations ata_dummy_port_ops;
56925 extern const struct ata_port_info ata_dummy_port_info;
56926
56927 static inline const unsigned long *
56928@@ -962,7 +962,7 @@ extern int ata_host_activate(struct ata_
56929 struct scsi_host_template *sht);
56930 extern void ata_host_detach(struct ata_host *host);
56931 extern void ata_host_init(struct ata_host *, struct device *,
56932- unsigned long, struct ata_port_operations *);
56933+ unsigned long, const struct ata_port_operations *);
56934 extern int ata_scsi_detect(struct scsi_host_template *sht);
56935 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
56936 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
56937diff -urNp linux-2.6.32.42/include/linux/lockd/bind.h linux-2.6.32.42/include/linux/lockd/bind.h
56938--- linux-2.6.32.42/include/linux/lockd/bind.h 2011-03-27 14:31:47.000000000 -0400
56939+++ linux-2.6.32.42/include/linux/lockd/bind.h 2011-04-17 15:56:46.000000000 -0400
56940@@ -23,13 +23,13 @@ struct svc_rqst;
56941 * This is the set of functions for lockd->nfsd communication
56942 */
56943 struct nlmsvc_binding {
56944- __be32 (*fopen)(struct svc_rqst *,
56945+ __be32 (* const fopen)(struct svc_rqst *,
56946 struct nfs_fh *,
56947 struct file **);
56948- void (*fclose)(struct file *);
56949+ void (* const fclose)(struct file *);
56950 };
56951
56952-extern struct nlmsvc_binding * nlmsvc_ops;
56953+extern const struct nlmsvc_binding * nlmsvc_ops;
56954
56955 /*
56956 * Similar to nfs_client_initdata, but without the NFS-specific
56957diff -urNp linux-2.6.32.42/include/linux/mm.h linux-2.6.32.42/include/linux/mm.h
56958--- linux-2.6.32.42/include/linux/mm.h 2011-03-27 14:31:47.000000000 -0400
56959+++ linux-2.6.32.42/include/linux/mm.h 2011-04-17 15:56:46.000000000 -0400
56960@@ -106,7 +106,14 @@ extern unsigned int kobjsize(const void
56961
56962 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
56963 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
56964+
56965+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
56966+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
56967+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
56968+#else
56969 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
56970+#endif
56971+
56972 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
56973 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
56974
56975@@ -841,12 +848,6 @@ int set_page_dirty(struct page *page);
56976 int set_page_dirty_lock(struct page *page);
56977 int clear_page_dirty_for_io(struct page *page);
56978
56979-/* Is the vma a continuation of the stack vma above it? */
56980-static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
56981-{
56982- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
56983-}
56984-
56985 extern unsigned long move_page_tables(struct vm_area_struct *vma,
56986 unsigned long old_addr, struct vm_area_struct *new_vma,
56987 unsigned long new_addr, unsigned long len);
56988@@ -890,6 +891,8 @@ struct shrinker {
56989 extern void register_shrinker(struct shrinker *);
56990 extern void unregister_shrinker(struct shrinker *);
56991
56992+pgprot_t vm_get_page_prot(unsigned long vm_flags);
56993+
56994 int vma_wants_writenotify(struct vm_area_struct *vma);
56995
56996 extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
56997@@ -1162,6 +1165,7 @@ out:
56998 }
56999
57000 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
57001+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
57002
57003 extern unsigned long do_brk(unsigned long, unsigned long);
57004
57005@@ -1218,6 +1222,10 @@ extern struct vm_area_struct * find_vma(
57006 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
57007 struct vm_area_struct **pprev);
57008
57009+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
57010+extern void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
57011+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
57012+
57013 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
57014 NULL if none. Assume start_addr < end_addr. */
57015 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
57016@@ -1234,7 +1242,6 @@ static inline unsigned long vma_pages(st
57017 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
57018 }
57019
57020-pgprot_t vm_get_page_prot(unsigned long vm_flags);
57021 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
57022 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
57023 unsigned long pfn, unsigned long size, pgprot_t);
57024@@ -1332,7 +1339,13 @@ extern void memory_failure(unsigned long
57025 extern int __memory_failure(unsigned long pfn, int trapno, int ref);
57026 extern int sysctl_memory_failure_early_kill;
57027 extern int sysctl_memory_failure_recovery;
57028-extern atomic_long_t mce_bad_pages;
57029+extern atomic_long_unchecked_t mce_bad_pages;
57030+
57031+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
57032+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
57033+#else
57034+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
57035+#endif
57036
57037 #endif /* __KERNEL__ */
57038 #endif /* _LINUX_MM_H */
57039diff -urNp linux-2.6.32.42/include/linux/mm_types.h linux-2.6.32.42/include/linux/mm_types.h
57040--- linux-2.6.32.42/include/linux/mm_types.h 2011-03-27 14:31:47.000000000 -0400
57041+++ linux-2.6.32.42/include/linux/mm_types.h 2011-04-17 15:56:46.000000000 -0400
57042@@ -186,6 +186,8 @@ struct vm_area_struct {
57043 #ifdef CONFIG_NUMA
57044 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
57045 #endif
57046+
57047+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
57048 };
57049
57050 struct core_thread {
57051@@ -287,6 +289,24 @@ struct mm_struct {
57052 #ifdef CONFIG_MMU_NOTIFIER
57053 struct mmu_notifier_mm *mmu_notifier_mm;
57054 #endif
57055+
57056+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
57057+ unsigned long pax_flags;
57058+#endif
57059+
57060+#ifdef CONFIG_PAX_DLRESOLVE
57061+ unsigned long call_dl_resolve;
57062+#endif
57063+
57064+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
57065+ unsigned long call_syscall;
57066+#endif
57067+
57068+#ifdef CONFIG_PAX_ASLR
57069+ unsigned long delta_mmap; /* randomized offset */
57070+ unsigned long delta_stack; /* randomized offset */
57071+#endif
57072+
57073 };
57074
57075 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
57076diff -urNp linux-2.6.32.42/include/linux/mmu_notifier.h linux-2.6.32.42/include/linux/mmu_notifier.h
57077--- linux-2.6.32.42/include/linux/mmu_notifier.h 2011-03-27 14:31:47.000000000 -0400
57078+++ linux-2.6.32.42/include/linux/mmu_notifier.h 2011-04-17 15:56:46.000000000 -0400
57079@@ -235,12 +235,12 @@ static inline void mmu_notifier_mm_destr
57080 */
57081 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
57082 ({ \
57083- pte_t __pte; \
57084+ pte_t ___pte; \
57085 struct vm_area_struct *___vma = __vma; \
57086 unsigned long ___address = __address; \
57087- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
57088+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
57089 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
57090- __pte; \
57091+ ___pte; \
57092 })
57093
57094 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
57095diff -urNp linux-2.6.32.42/include/linux/mmzone.h linux-2.6.32.42/include/linux/mmzone.h
57096--- linux-2.6.32.42/include/linux/mmzone.h 2011-03-27 14:31:47.000000000 -0400
57097+++ linux-2.6.32.42/include/linux/mmzone.h 2011-04-17 15:56:46.000000000 -0400
57098@@ -350,7 +350,7 @@ struct zone {
57099 unsigned long flags; /* zone flags, see below */
57100
57101 /* Zone statistics */
57102- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
57103+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
57104
57105 /*
57106 * prev_priority holds the scanning priority for this zone. It is
57107diff -urNp linux-2.6.32.42/include/linux/mod_devicetable.h linux-2.6.32.42/include/linux/mod_devicetable.h
57108--- linux-2.6.32.42/include/linux/mod_devicetable.h 2011-03-27 14:31:47.000000000 -0400
57109+++ linux-2.6.32.42/include/linux/mod_devicetable.h 2011-04-17 15:56:46.000000000 -0400
57110@@ -12,7 +12,7 @@
57111 typedef unsigned long kernel_ulong_t;
57112 #endif
57113
57114-#define PCI_ANY_ID (~0)
57115+#define PCI_ANY_ID ((__u16)~0)
57116
57117 struct pci_device_id {
57118 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
57119@@ -131,7 +131,7 @@ struct usb_device_id {
57120 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
57121 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
57122
57123-#define HID_ANY_ID (~0)
57124+#define HID_ANY_ID (~0U)
57125
57126 struct hid_device_id {
57127 __u16 bus;
57128diff -urNp linux-2.6.32.42/include/linux/module.h linux-2.6.32.42/include/linux/module.h
57129--- linux-2.6.32.42/include/linux/module.h 2011-03-27 14:31:47.000000000 -0400
57130+++ linux-2.6.32.42/include/linux/module.h 2011-04-17 15:56:46.000000000 -0400
57131@@ -287,16 +287,16 @@ struct module
57132 int (*init)(void);
57133
57134 /* If this is non-NULL, vfree after init() returns */
57135- void *module_init;
57136+ void *module_init_rx, *module_init_rw;
57137
57138 /* Here is the actual code + data, vfree'd on unload. */
57139- void *module_core;
57140+ void *module_core_rx, *module_core_rw;
57141
57142 /* Here are the sizes of the init and core sections */
57143- unsigned int init_size, core_size;
57144+ unsigned int init_size_rw, core_size_rw;
57145
57146 /* The size of the executable code in each section. */
57147- unsigned int init_text_size, core_text_size;
57148+ unsigned int init_size_rx, core_size_rx;
57149
57150 /* Arch-specific module values */
57151 struct mod_arch_specific arch;
57152@@ -393,16 +393,46 @@ struct module *__module_address(unsigned
57153 bool is_module_address(unsigned long addr);
57154 bool is_module_text_address(unsigned long addr);
57155
57156+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
57157+{
57158+
57159+#ifdef CONFIG_PAX_KERNEXEC
57160+ if (ktla_ktva(addr) >= (unsigned long)start &&
57161+ ktla_ktva(addr) < (unsigned long)start + size)
57162+ return 1;
57163+#endif
57164+
57165+ return ((void *)addr >= start && (void *)addr < start + size);
57166+}
57167+
57168+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
57169+{
57170+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
57171+}
57172+
57173+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
57174+{
57175+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
57176+}
57177+
57178+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
57179+{
57180+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
57181+}
57182+
57183+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
57184+{
57185+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
57186+}
57187+
57188 static inline int within_module_core(unsigned long addr, struct module *mod)
57189 {
57190- return (unsigned long)mod->module_core <= addr &&
57191- addr < (unsigned long)mod->module_core + mod->core_size;
57192+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
57193 }
57194
57195 static inline int within_module_init(unsigned long addr, struct module *mod)
57196 {
57197- return (unsigned long)mod->module_init <= addr &&
57198- addr < (unsigned long)mod->module_init + mod->init_size;
57199+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
57200 }
57201
57202 /* Search for module by name: must hold module_mutex. */
57203diff -urNp linux-2.6.32.42/include/linux/moduleloader.h linux-2.6.32.42/include/linux/moduleloader.h
57204--- linux-2.6.32.42/include/linux/moduleloader.h 2011-03-27 14:31:47.000000000 -0400
57205+++ linux-2.6.32.42/include/linux/moduleloader.h 2011-04-17 15:56:46.000000000 -0400
57206@@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(st
57207 sections. Returns NULL on failure. */
57208 void *module_alloc(unsigned long size);
57209
57210+#ifdef CONFIG_PAX_KERNEXEC
57211+void *module_alloc_exec(unsigned long size);
57212+#else
57213+#define module_alloc_exec(x) module_alloc(x)
57214+#endif
57215+
57216 /* Free memory returned from module_alloc. */
57217 void module_free(struct module *mod, void *module_region);
57218
57219+#ifdef CONFIG_PAX_KERNEXEC
57220+void module_free_exec(struct module *mod, void *module_region);
57221+#else
57222+#define module_free_exec(x, y) module_free((x), (y))
57223+#endif
57224+
57225 /* Apply the given relocation to the (simplified) ELF. Return -error
57226 or 0. */
57227 int apply_relocate(Elf_Shdr *sechdrs,
57228diff -urNp linux-2.6.32.42/include/linux/moduleparam.h linux-2.6.32.42/include/linux/moduleparam.h
57229--- linux-2.6.32.42/include/linux/moduleparam.h 2011-03-27 14:31:47.000000000 -0400
57230+++ linux-2.6.32.42/include/linux/moduleparam.h 2011-04-17 15:56:46.000000000 -0400
57231@@ -132,7 +132,7 @@ struct kparam_array
57232
57233 /* Actually copy string: maxlen param is usually sizeof(string). */
57234 #define module_param_string(name, string, len, perm) \
57235- static const struct kparam_string __param_string_##name \
57236+ static const struct kparam_string __param_string_##name __used \
57237 = { len, string }; \
57238 __module_param_call(MODULE_PARAM_PREFIX, name, \
57239 param_set_copystring, param_get_string, \
57240@@ -211,7 +211,7 @@ extern int param_get_invbool(char *buffe
57241
57242 /* Comma-separated array: *nump is set to number they actually specified. */
57243 #define module_param_array_named(name, array, type, nump, perm) \
57244- static const struct kparam_array __param_arr_##name \
57245+ static const struct kparam_array __param_arr_##name __used \
57246 = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\
57247 sizeof(array[0]), array }; \
57248 __module_param_call(MODULE_PARAM_PREFIX, name, \
57249diff -urNp linux-2.6.32.42/include/linux/mutex.h linux-2.6.32.42/include/linux/mutex.h
57250--- linux-2.6.32.42/include/linux/mutex.h 2011-03-27 14:31:47.000000000 -0400
57251+++ linux-2.6.32.42/include/linux/mutex.h 2011-04-17 15:56:46.000000000 -0400
57252@@ -51,7 +51,7 @@ struct mutex {
57253 spinlock_t wait_lock;
57254 struct list_head wait_list;
57255 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
57256- struct thread_info *owner;
57257+ struct task_struct *owner;
57258 #endif
57259 #ifdef CONFIG_DEBUG_MUTEXES
57260 const char *name;
57261diff -urNp linux-2.6.32.42/include/linux/namei.h linux-2.6.32.42/include/linux/namei.h
57262--- linux-2.6.32.42/include/linux/namei.h 2011-03-27 14:31:47.000000000 -0400
57263+++ linux-2.6.32.42/include/linux/namei.h 2011-04-17 15:56:46.000000000 -0400
57264@@ -22,7 +22,7 @@ struct nameidata {
57265 unsigned int flags;
57266 int last_type;
57267 unsigned depth;
57268- char *saved_names[MAX_NESTED_LINKS + 1];
57269+ const char *saved_names[MAX_NESTED_LINKS + 1];
57270
57271 /* Intent data */
57272 union {
57273@@ -84,12 +84,12 @@ extern int follow_up(struct path *);
57274 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
57275 extern void unlock_rename(struct dentry *, struct dentry *);
57276
57277-static inline void nd_set_link(struct nameidata *nd, char *path)
57278+static inline void nd_set_link(struct nameidata *nd, const char *path)
57279 {
57280 nd->saved_names[nd->depth] = path;
57281 }
57282
57283-static inline char *nd_get_link(struct nameidata *nd)
57284+static inline const char *nd_get_link(const struct nameidata *nd)
57285 {
57286 return nd->saved_names[nd->depth];
57287 }
57288diff -urNp linux-2.6.32.42/include/linux/netfilter/xt_gradm.h linux-2.6.32.42/include/linux/netfilter/xt_gradm.h
57289--- linux-2.6.32.42/include/linux/netfilter/xt_gradm.h 1969-12-31 19:00:00.000000000 -0500
57290+++ linux-2.6.32.42/include/linux/netfilter/xt_gradm.h 2011-04-17 15:56:46.000000000 -0400
57291@@ -0,0 +1,9 @@
57292+#ifndef _LINUX_NETFILTER_XT_GRADM_H
57293+#define _LINUX_NETFILTER_XT_GRADM_H 1
57294+
57295+struct xt_gradm_mtinfo {
57296+ __u16 flags;
57297+ __u16 invflags;
57298+};
57299+
57300+#endif
57301diff -urNp linux-2.6.32.42/include/linux/nodemask.h linux-2.6.32.42/include/linux/nodemask.h
57302--- linux-2.6.32.42/include/linux/nodemask.h 2011-03-27 14:31:47.000000000 -0400
57303+++ linux-2.6.32.42/include/linux/nodemask.h 2011-04-17 15:56:46.000000000 -0400
57304@@ -464,11 +464,11 @@ static inline int num_node_state(enum no
57305
57306 #define any_online_node(mask) \
57307 ({ \
57308- int node; \
57309- for_each_node_mask(node, (mask)) \
57310- if (node_online(node)) \
57311+ int __node; \
57312+ for_each_node_mask(__node, (mask)) \
57313+ if (node_online(__node)) \
57314 break; \
57315- node; \
57316+ __node; \
57317 })
57318
57319 #define num_online_nodes() num_node_state(N_ONLINE)
57320diff -urNp linux-2.6.32.42/include/linux/oprofile.h linux-2.6.32.42/include/linux/oprofile.h
57321--- linux-2.6.32.42/include/linux/oprofile.h 2011-03-27 14:31:47.000000000 -0400
57322+++ linux-2.6.32.42/include/linux/oprofile.h 2011-04-17 15:56:46.000000000 -0400
57323@@ -129,9 +129,9 @@ int oprofilefs_create_ulong(struct super
57324 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
57325 char const * name, ulong * val);
57326
57327-/** Create a file for read-only access to an atomic_t. */
57328+/** Create a file for read-only access to an atomic_unchecked_t. */
57329 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
57330- char const * name, atomic_t * val);
57331+ char const * name, atomic_unchecked_t * val);
57332
57333 /** create a directory */
57334 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
57335diff -urNp linux-2.6.32.42/include/linux/perf_event.h linux-2.6.32.42/include/linux/perf_event.h
57336--- linux-2.6.32.42/include/linux/perf_event.h 2011-03-27 14:31:47.000000000 -0400
57337+++ linux-2.6.32.42/include/linux/perf_event.h 2011-05-04 17:56:28.000000000 -0400
57338@@ -476,7 +476,7 @@ struct hw_perf_event {
57339 struct hrtimer hrtimer;
57340 };
57341 };
57342- atomic64_t prev_count;
57343+ atomic64_unchecked_t prev_count;
57344 u64 sample_period;
57345 u64 last_period;
57346 atomic64_t period_left;
57347@@ -557,7 +557,7 @@ struct perf_event {
57348 const struct pmu *pmu;
57349
57350 enum perf_event_active_state state;
57351- atomic64_t count;
57352+ atomic64_unchecked_t count;
57353
57354 /*
57355 * These are the total time in nanoseconds that the event
57356@@ -595,8 +595,8 @@ struct perf_event {
57357 * These accumulate total time (in nanoseconds) that children
57358 * events have been enabled and running, respectively.
57359 */
57360- atomic64_t child_total_time_enabled;
57361- atomic64_t child_total_time_running;
57362+ atomic64_unchecked_t child_total_time_enabled;
57363+ atomic64_unchecked_t child_total_time_running;
57364
57365 /*
57366 * Protect attach/detach and child_list:
57367diff -urNp linux-2.6.32.42/include/linux/pipe_fs_i.h linux-2.6.32.42/include/linux/pipe_fs_i.h
57368--- linux-2.6.32.42/include/linux/pipe_fs_i.h 2011-03-27 14:31:47.000000000 -0400
57369+++ linux-2.6.32.42/include/linux/pipe_fs_i.h 2011-04-17 15:56:46.000000000 -0400
57370@@ -46,9 +46,9 @@ struct pipe_inode_info {
57371 wait_queue_head_t wait;
57372 unsigned int nrbufs, curbuf;
57373 struct page *tmp_page;
57374- unsigned int readers;
57375- unsigned int writers;
57376- unsigned int waiting_writers;
57377+ atomic_t readers;
57378+ atomic_t writers;
57379+ atomic_t waiting_writers;
57380 unsigned int r_counter;
57381 unsigned int w_counter;
57382 struct fasync_struct *fasync_readers;
57383diff -urNp linux-2.6.32.42/include/linux/poison.h linux-2.6.32.42/include/linux/poison.h
57384--- linux-2.6.32.42/include/linux/poison.h 2011-03-27 14:31:47.000000000 -0400
57385+++ linux-2.6.32.42/include/linux/poison.h 2011-04-17 15:56:46.000000000 -0400
57386@@ -19,8 +19,8 @@
57387 * under normal circumstances, used to verify that nobody uses
57388 * non-initialized list entries.
57389 */
57390-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
57391-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
57392+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
57393+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
57394
57395 /********** include/linux/timer.h **********/
57396 /*
57397diff -urNp linux-2.6.32.42/include/linux/proc_fs.h linux-2.6.32.42/include/linux/proc_fs.h
57398--- linux-2.6.32.42/include/linux/proc_fs.h 2011-03-27 14:31:47.000000000 -0400
57399+++ linux-2.6.32.42/include/linux/proc_fs.h 2011-04-17 15:56:46.000000000 -0400
57400@@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
57401 return proc_create_data(name, mode, parent, proc_fops, NULL);
57402 }
57403
57404+static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
57405+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
57406+{
57407+#ifdef CONFIG_GRKERNSEC_PROC_USER
57408+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
57409+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
57410+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
57411+#else
57412+ return proc_create_data(name, mode, parent, proc_fops, NULL);
57413+#endif
57414+}
57415+
57416+
57417 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
57418 mode_t mode, struct proc_dir_entry *base,
57419 read_proc_t *read_proc, void * data)
57420diff -urNp linux-2.6.32.42/include/linux/ptrace.h linux-2.6.32.42/include/linux/ptrace.h
57421--- linux-2.6.32.42/include/linux/ptrace.h 2011-03-27 14:31:47.000000000 -0400
57422+++ linux-2.6.32.42/include/linux/ptrace.h 2011-04-17 15:56:46.000000000 -0400
57423@@ -96,10 +96,10 @@ extern void __ptrace_unlink(struct task_
57424 extern void exit_ptrace(struct task_struct *tracer);
57425 #define PTRACE_MODE_READ 1
57426 #define PTRACE_MODE_ATTACH 2
57427-/* Returns 0 on success, -errno on denial. */
57428-extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
57429 /* Returns true on success, false on denial. */
57430 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
57431+/* Returns true on success, false on denial. */
57432+extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
57433
57434 static inline int ptrace_reparented(struct task_struct *child)
57435 {
57436diff -urNp linux-2.6.32.42/include/linux/random.h linux-2.6.32.42/include/linux/random.h
57437--- linux-2.6.32.42/include/linux/random.h 2011-03-27 14:31:47.000000000 -0400
57438+++ linux-2.6.32.42/include/linux/random.h 2011-04-17 15:56:46.000000000 -0400
57439@@ -74,6 +74,11 @@ unsigned long randomize_range(unsigned l
57440 u32 random32(void);
57441 void srandom32(u32 seed);
57442
57443+static inline unsigned long pax_get_random_long(void)
57444+{
57445+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
57446+}
57447+
57448 #endif /* __KERNEL___ */
57449
57450 #endif /* _LINUX_RANDOM_H */
57451diff -urNp linux-2.6.32.42/include/linux/reboot.h linux-2.6.32.42/include/linux/reboot.h
57452--- linux-2.6.32.42/include/linux/reboot.h 2011-03-27 14:31:47.000000000 -0400
57453+++ linux-2.6.32.42/include/linux/reboot.h 2011-05-22 23:02:06.000000000 -0400
57454@@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(st
57455 * Architecture-specific implementations of sys_reboot commands.
57456 */
57457
57458-extern void machine_restart(char *cmd);
57459-extern void machine_halt(void);
57460-extern void machine_power_off(void);
57461+extern void machine_restart(char *cmd) __noreturn;
57462+extern void machine_halt(void) __noreturn;
57463+extern void machine_power_off(void) __noreturn;
57464
57465 extern void machine_shutdown(void);
57466 struct pt_regs;
57467@@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struc
57468 */
57469
57470 extern void kernel_restart_prepare(char *cmd);
57471-extern void kernel_restart(char *cmd);
57472-extern void kernel_halt(void);
57473-extern void kernel_power_off(void);
57474+extern void kernel_restart(char *cmd) __noreturn;
57475+extern void kernel_halt(void) __noreturn;
57476+extern void kernel_power_off(void) __noreturn;
57477
57478 void ctrl_alt_del(void);
57479
57480@@ -75,7 +75,7 @@ extern int orderly_poweroff(bool force);
57481 * Emergency restart, callable from an interrupt handler.
57482 */
57483
57484-extern void emergency_restart(void);
57485+extern void emergency_restart(void) __noreturn;
57486 #include <asm/emergency-restart.h>
57487
57488 #endif
57489diff -urNp linux-2.6.32.42/include/linux/reiserfs_fs.h linux-2.6.32.42/include/linux/reiserfs_fs.h
57490--- linux-2.6.32.42/include/linux/reiserfs_fs.h 2011-03-27 14:31:47.000000000 -0400
57491+++ linux-2.6.32.42/include/linux/reiserfs_fs.h 2011-04-17 15:56:46.000000000 -0400
57492@@ -1326,7 +1326,7 @@ static inline loff_t max_reiserfs_offset
57493 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
57494
57495 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
57496-#define get_generation(s) atomic_read (&fs_generation(s))
57497+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
57498 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
57499 #define __fs_changed(gen,s) (gen != get_generation (s))
57500 #define fs_changed(gen,s) ({cond_resched(); __fs_changed(gen, s);})
57501@@ -1534,24 +1534,24 @@ static inline struct super_block *sb_fro
57502 */
57503
57504 struct item_operations {
57505- int (*bytes_number) (struct item_head * ih, int block_size);
57506- void (*decrement_key) (struct cpu_key *);
57507- int (*is_left_mergeable) (struct reiserfs_key * ih,
57508+ int (* const bytes_number) (struct item_head * ih, int block_size);
57509+ void (* const decrement_key) (struct cpu_key *);
57510+ int (* const is_left_mergeable) (struct reiserfs_key * ih,
57511 unsigned long bsize);
57512- void (*print_item) (struct item_head *, char *item);
57513- void (*check_item) (struct item_head *, char *item);
57514+ void (* const print_item) (struct item_head *, char *item);
57515+ void (* const check_item) (struct item_head *, char *item);
57516
57517- int (*create_vi) (struct virtual_node * vn, struct virtual_item * vi,
57518+ int (* const create_vi) (struct virtual_node * vn, struct virtual_item * vi,
57519 int is_affected, int insert_size);
57520- int (*check_left) (struct virtual_item * vi, int free,
57521+ int (* const check_left) (struct virtual_item * vi, int free,
57522 int start_skip, int end_skip);
57523- int (*check_right) (struct virtual_item * vi, int free);
57524- int (*part_size) (struct virtual_item * vi, int from, int to);
57525- int (*unit_num) (struct virtual_item * vi);
57526- void (*print_vi) (struct virtual_item * vi);
57527+ int (* const check_right) (struct virtual_item * vi, int free);
57528+ int (* const part_size) (struct virtual_item * vi, int from, int to);
57529+ int (* const unit_num) (struct virtual_item * vi);
57530+ void (* const print_vi) (struct virtual_item * vi);
57531 };
57532
57533-extern struct item_operations *item_ops[TYPE_ANY + 1];
57534+extern const struct item_operations * const item_ops[TYPE_ANY + 1];
57535
57536 #define op_bytes_number(ih,bsize) item_ops[le_ih_k_type (ih)]->bytes_number (ih, bsize)
57537 #define op_is_left_mergeable(key,bsize) item_ops[le_key_k_type (le_key_version (key), key)]->is_left_mergeable (key, bsize)
57538diff -urNp linux-2.6.32.42/include/linux/reiserfs_fs_sb.h linux-2.6.32.42/include/linux/reiserfs_fs_sb.h
57539--- linux-2.6.32.42/include/linux/reiserfs_fs_sb.h 2011-03-27 14:31:47.000000000 -0400
57540+++ linux-2.6.32.42/include/linux/reiserfs_fs_sb.h 2011-04-17 15:56:46.000000000 -0400
57541@@ -377,7 +377,7 @@ struct reiserfs_sb_info {
57542 /* Comment? -Hans */
57543 wait_queue_head_t s_wait;
57544 /* To be obsoleted soon by per buffer seals.. -Hans */
57545- atomic_t s_generation_counter; // increased by one every time the
57546+ atomic_unchecked_t s_generation_counter; // increased by one every time the
57547 // tree gets re-balanced
57548 unsigned long s_properties; /* File system properties. Currently holds
57549 on-disk FS format */
57550diff -urNp linux-2.6.32.42/include/linux/sched.h linux-2.6.32.42/include/linux/sched.h
57551--- linux-2.6.32.42/include/linux/sched.h 2011-03-27 14:31:47.000000000 -0400
57552+++ linux-2.6.32.42/include/linux/sched.h 2011-07-06 19:53:33.000000000 -0400
57553@@ -101,6 +101,7 @@ struct bio;
57554 struct fs_struct;
57555 struct bts_context;
57556 struct perf_event_context;
57557+struct linux_binprm;
57558
57559 /*
57560 * List of flags we want to share for kernel threads,
57561@@ -350,7 +351,7 @@ extern signed long schedule_timeout_kill
57562 extern signed long schedule_timeout_uninterruptible(signed long timeout);
57563 asmlinkage void __schedule(void);
57564 asmlinkage void schedule(void);
57565-extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
57566+extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
57567
57568 struct nsproxy;
57569 struct user_namespace;
57570@@ -371,9 +372,12 @@ struct user_namespace;
57571 #define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
57572
57573 extern int sysctl_max_map_count;
57574+extern unsigned long sysctl_heap_stack_gap;
57575
57576 #include <linux/aio.h>
57577
57578+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
57579+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
57580 extern unsigned long
57581 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
57582 unsigned long, unsigned long);
57583@@ -666,6 +670,16 @@ struct signal_struct {
57584 struct tty_audit_buf *tty_audit_buf;
57585 #endif
57586
57587+#ifdef CONFIG_GRKERNSEC
57588+ u32 curr_ip;
57589+ u32 saved_ip;
57590+ u32 gr_saddr;
57591+ u32 gr_daddr;
57592+ u16 gr_sport;
57593+ u16 gr_dport;
57594+ u8 used_accept:1;
57595+#endif
57596+
57597 int oom_adj; /* OOM kill score adjustment (bit shift) */
57598 };
57599
57600@@ -723,6 +737,11 @@ struct user_struct {
57601 struct key *session_keyring; /* UID's default session keyring */
57602 #endif
57603
57604+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
57605+ unsigned int banned;
57606+ unsigned long ban_expires;
57607+#endif
57608+
57609 /* Hash table maintenance information */
57610 struct hlist_node uidhash_node;
57611 uid_t uid;
57612@@ -1328,8 +1347,8 @@ struct task_struct {
57613 struct list_head thread_group;
57614
57615 struct completion *vfork_done; /* for vfork() */
57616- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
57617- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
57618+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
57619+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
57620
57621 cputime_t utime, stime, utimescaled, stimescaled;
57622 cputime_t gtime;
57623@@ -1343,16 +1362,6 @@ struct task_struct {
57624 struct task_cputime cputime_expires;
57625 struct list_head cpu_timers[3];
57626
57627-/* process credentials */
57628- const struct cred *real_cred; /* objective and real subjective task
57629- * credentials (COW) */
57630- const struct cred *cred; /* effective (overridable) subjective task
57631- * credentials (COW) */
57632- struct mutex cred_guard_mutex; /* guard against foreign influences on
57633- * credential calculations
57634- * (notably. ptrace) */
57635- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
57636-
57637 char comm[TASK_COMM_LEN]; /* executable name excluding path
57638 - access with [gs]et_task_comm (which lock
57639 it with task_lock())
57640@@ -1369,6 +1378,10 @@ struct task_struct {
57641 #endif
57642 /* CPU-specific state of this task */
57643 struct thread_struct thread;
57644+/* thread_info moved to task_struct */
57645+#ifdef CONFIG_X86
57646+ struct thread_info tinfo;
57647+#endif
57648 /* filesystem information */
57649 struct fs_struct *fs;
57650 /* open file information */
57651@@ -1436,6 +1449,15 @@ struct task_struct {
57652 int hardirq_context;
57653 int softirq_context;
57654 #endif
57655+
57656+/* process credentials */
57657+ const struct cred *real_cred; /* objective and real subjective task
57658+ * credentials (COW) */
57659+ struct mutex cred_guard_mutex; /* guard against foreign influences on
57660+ * credential calculations
57661+ * (notably. ptrace) */
57662+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
57663+
57664 #ifdef CONFIG_LOCKDEP
57665 # define MAX_LOCK_DEPTH 48UL
57666 u64 curr_chain_key;
57667@@ -1456,6 +1478,9 @@ struct task_struct {
57668
57669 struct backing_dev_info *backing_dev_info;
57670
57671+ const struct cred *cred; /* effective (overridable) subjective task
57672+ * credentials (COW) */
57673+
57674 struct io_context *io_context;
57675
57676 unsigned long ptrace_message;
57677@@ -1519,6 +1544,21 @@ struct task_struct {
57678 unsigned long default_timer_slack_ns;
57679
57680 struct list_head *scm_work_list;
57681+
57682+#ifdef CONFIG_GRKERNSEC
57683+ /* grsecurity */
57684+ struct dentry *gr_chroot_dentry;
57685+ struct acl_subject_label *acl;
57686+ struct acl_role_label *role;
57687+ struct file *exec_file;
57688+ u16 acl_role_id;
57689+ /* is this the task that authenticated to the special role */
57690+ u8 acl_sp_role;
57691+ u8 is_writable;
57692+ u8 brute;
57693+ u8 gr_is_chrooted;
57694+#endif
57695+
57696 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
57697 /* Index of current stored adress in ret_stack */
57698 int curr_ret_stack;
57699@@ -1542,6 +1582,57 @@ struct task_struct {
57700 #endif /* CONFIG_TRACING */
57701 };
57702
57703+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
57704+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
57705+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
57706+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
57707+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
57708+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
57709+
57710+#ifdef CONFIG_PAX_SOFTMODE
57711+extern unsigned int pax_softmode;
57712+#endif
57713+
57714+extern int pax_check_flags(unsigned long *);
57715+
57716+/* if tsk != current then task_lock must be held on it */
57717+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
57718+static inline unsigned long pax_get_flags(struct task_struct *tsk)
57719+{
57720+ if (likely(tsk->mm))
57721+ return tsk->mm->pax_flags;
57722+ else
57723+ return 0UL;
57724+}
57725+
57726+/* if tsk != current then task_lock must be held on it */
57727+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
57728+{
57729+ if (likely(tsk->mm)) {
57730+ tsk->mm->pax_flags = flags;
57731+ return 0;
57732+ }
57733+ return -EINVAL;
57734+}
57735+#endif
57736+
57737+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
57738+extern void pax_set_initial_flags(struct linux_binprm *bprm);
57739+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
57740+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
57741+#endif
57742+
57743+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
57744+extern void pax_report_insns(void *pc, void *sp);
57745+extern void pax_report_refcount_overflow(struct pt_regs *regs);
57746+extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
57747+
57748+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
57749+extern void pax_track_stack(void);
57750+#else
57751+static inline void pax_track_stack(void) {}
57752+#endif
57753+
57754 /* Future-safe accessor for struct task_struct's cpus_allowed. */
57755 #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
57756
57757@@ -1978,7 +2069,9 @@ void yield(void);
57758 extern struct exec_domain default_exec_domain;
57759
57760 union thread_union {
57761+#ifndef CONFIG_X86
57762 struct thread_info thread_info;
57763+#endif
57764 unsigned long stack[THREAD_SIZE/sizeof(long)];
57765 };
57766
57767@@ -2155,7 +2248,7 @@ extern void __cleanup_sighand(struct sig
57768 extern void exit_itimers(struct signal_struct *);
57769 extern void flush_itimer_signals(void);
57770
57771-extern NORET_TYPE void do_group_exit(int);
57772+extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
57773
57774 extern void daemonize(const char *, ...);
57775 extern int allow_signal(int);
57776@@ -2284,13 +2377,17 @@ static inline unsigned long *end_of_stac
57777
57778 #endif
57779
57780-static inline int object_is_on_stack(void *obj)
57781+static inline int object_starts_on_stack(void *obj)
57782 {
57783- void *stack = task_stack_page(current);
57784+ const void *stack = task_stack_page(current);
57785
57786 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
57787 }
57788
57789+#ifdef CONFIG_PAX_USERCOPY
57790+extern int object_is_on_stack(const void *obj, unsigned long len);
57791+#endif
57792+
57793 extern void thread_info_cache_init(void);
57794
57795 #ifdef CONFIG_DEBUG_STACK_USAGE
57796diff -urNp linux-2.6.32.42/include/linux/screen_info.h linux-2.6.32.42/include/linux/screen_info.h
57797--- linux-2.6.32.42/include/linux/screen_info.h 2011-03-27 14:31:47.000000000 -0400
57798+++ linux-2.6.32.42/include/linux/screen_info.h 2011-04-17 15:56:46.000000000 -0400
57799@@ -42,7 +42,8 @@ struct screen_info {
57800 __u16 pages; /* 0x32 */
57801 __u16 vesa_attributes; /* 0x34 */
57802 __u32 capabilities; /* 0x36 */
57803- __u8 _reserved[6]; /* 0x3a */
57804+ __u16 vesapm_size; /* 0x3a */
57805+ __u8 _reserved[4]; /* 0x3c */
57806 } __attribute__((packed));
57807
57808 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
57809diff -urNp linux-2.6.32.42/include/linux/security.h linux-2.6.32.42/include/linux/security.h
57810--- linux-2.6.32.42/include/linux/security.h 2011-03-27 14:31:47.000000000 -0400
57811+++ linux-2.6.32.42/include/linux/security.h 2011-04-17 15:56:46.000000000 -0400
57812@@ -34,6 +34,7 @@
57813 #include <linux/key.h>
57814 #include <linux/xfrm.h>
57815 #include <linux/gfp.h>
57816+#include <linux/grsecurity.h>
57817 #include <net/flow.h>
57818
57819 /* Maximum number of letters for an LSM name string */
57820diff -urNp linux-2.6.32.42/include/linux/shm.h linux-2.6.32.42/include/linux/shm.h
57821--- linux-2.6.32.42/include/linux/shm.h 2011-03-27 14:31:47.000000000 -0400
57822+++ linux-2.6.32.42/include/linux/shm.h 2011-04-17 15:56:46.000000000 -0400
57823@@ -95,6 +95,10 @@ struct shmid_kernel /* private to the ke
57824 pid_t shm_cprid;
57825 pid_t shm_lprid;
57826 struct user_struct *mlock_user;
57827+#ifdef CONFIG_GRKERNSEC
57828+ time_t shm_createtime;
57829+ pid_t shm_lapid;
57830+#endif
57831 };
57832
57833 /* shm_mode upper byte flags */
57834diff -urNp linux-2.6.32.42/include/linux/skbuff.h linux-2.6.32.42/include/linux/skbuff.h
57835--- linux-2.6.32.42/include/linux/skbuff.h 2011-03-27 14:31:47.000000000 -0400
57836+++ linux-2.6.32.42/include/linux/skbuff.h 2011-07-06 19:53:33.000000000 -0400
57837@@ -544,7 +544,7 @@ static inline union skb_shared_tx *skb_t
57838 */
57839 static inline int skb_queue_empty(const struct sk_buff_head *list)
57840 {
57841- return list->next == (struct sk_buff *)list;
57842+ return list->next == (const struct sk_buff *)list;
57843 }
57844
57845 /**
57846@@ -557,7 +557,7 @@ static inline int skb_queue_empty(const
57847 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
57848 const struct sk_buff *skb)
57849 {
57850- return (skb->next == (struct sk_buff *) list);
57851+ return (skb->next == (const struct sk_buff *) list);
57852 }
57853
57854 /**
57855@@ -570,7 +570,7 @@ static inline bool skb_queue_is_last(con
57856 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
57857 const struct sk_buff *skb)
57858 {
57859- return (skb->prev == (struct sk_buff *) list);
57860+ return (skb->prev == (const struct sk_buff *) list);
57861 }
57862
57863 /**
57864@@ -1367,7 +1367,7 @@ static inline int skb_network_offset(con
57865 * headroom, you should not reduce this.
57866 */
57867 #ifndef NET_SKB_PAD
57868-#define NET_SKB_PAD 32
57869+#define NET_SKB_PAD (_AC(32,UL))
57870 #endif
57871
57872 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
57873diff -urNp linux-2.6.32.42/include/linux/slab_def.h linux-2.6.32.42/include/linux/slab_def.h
57874--- linux-2.6.32.42/include/linux/slab_def.h 2011-03-27 14:31:47.000000000 -0400
57875+++ linux-2.6.32.42/include/linux/slab_def.h 2011-05-04 17:56:28.000000000 -0400
57876@@ -69,10 +69,10 @@ struct kmem_cache {
57877 unsigned long node_allocs;
57878 unsigned long node_frees;
57879 unsigned long node_overflow;
57880- atomic_t allochit;
57881- atomic_t allocmiss;
57882- atomic_t freehit;
57883- atomic_t freemiss;
57884+ atomic_unchecked_t allochit;
57885+ atomic_unchecked_t allocmiss;
57886+ atomic_unchecked_t freehit;
57887+ atomic_unchecked_t freemiss;
57888
57889 /*
57890 * If debugging is enabled, then the allocator can add additional
57891diff -urNp linux-2.6.32.42/include/linux/slab.h linux-2.6.32.42/include/linux/slab.h
57892--- linux-2.6.32.42/include/linux/slab.h 2011-03-27 14:31:47.000000000 -0400
57893+++ linux-2.6.32.42/include/linux/slab.h 2011-04-17 15:56:46.000000000 -0400
57894@@ -11,12 +11,20 @@
57895
57896 #include <linux/gfp.h>
57897 #include <linux/types.h>
57898+#include <linux/err.h>
57899
57900 /*
57901 * Flags to pass to kmem_cache_create().
57902 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
57903 */
57904 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
57905+
57906+#ifdef CONFIG_PAX_USERCOPY
57907+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
57908+#else
57909+#define SLAB_USERCOPY 0x00000000UL
57910+#endif
57911+
57912 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
57913 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
57914 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
57915@@ -82,10 +90,13 @@
57916 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
57917 * Both make kfree a no-op.
57918 */
57919-#define ZERO_SIZE_PTR ((void *)16)
57920+#define ZERO_SIZE_PTR \
57921+({ \
57922+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
57923+ (void *)(-MAX_ERRNO-1L); \
57924+})
57925
57926-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
57927- (unsigned long)ZERO_SIZE_PTR)
57928+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
57929
57930 /*
57931 * struct kmem_cache related prototypes
57932@@ -138,6 +149,7 @@ void * __must_check krealloc(const void
57933 void kfree(const void *);
57934 void kzfree(const void *);
57935 size_t ksize(const void *);
57936+void check_object_size(const void *ptr, unsigned long n, bool to);
57937
57938 /*
57939 * Allocator specific definitions. These are mainly used to establish optimized
57940@@ -328,4 +340,37 @@ static inline void *kzalloc_node(size_t
57941
57942 void __init kmem_cache_init_late(void);
57943
57944+#define kmalloc(x, y) \
57945+({ \
57946+ void *___retval; \
57947+ intoverflow_t ___x = (intoverflow_t)x; \
57948+ if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n"))\
57949+ ___retval = NULL; \
57950+ else \
57951+ ___retval = kmalloc((size_t)___x, (y)); \
57952+ ___retval; \
57953+})
57954+
57955+#define kmalloc_node(x, y, z) \
57956+({ \
57957+ void *___retval; \
57958+ intoverflow_t ___x = (intoverflow_t)x; \
57959+ if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
57960+ ___retval = NULL; \
57961+ else \
57962+ ___retval = kmalloc_node((size_t)___x, (y), (z));\
57963+ ___retval; \
57964+})
57965+
57966+#define kzalloc(x, y) \
57967+({ \
57968+ void *___retval; \
57969+ intoverflow_t ___x = (intoverflow_t)x; \
57970+ if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n"))\
57971+ ___retval = NULL; \
57972+ else \
57973+ ___retval = kzalloc((size_t)___x, (y)); \
57974+ ___retval; \
57975+})
57976+
57977 #endif /* _LINUX_SLAB_H */
57978diff -urNp linux-2.6.32.42/include/linux/slub_def.h linux-2.6.32.42/include/linux/slub_def.h
57979--- linux-2.6.32.42/include/linux/slub_def.h 2011-03-27 14:31:47.000000000 -0400
57980+++ linux-2.6.32.42/include/linux/slub_def.h 2011-04-17 15:56:46.000000000 -0400
57981@@ -86,7 +86,7 @@ struct kmem_cache {
57982 struct kmem_cache_order_objects max;
57983 struct kmem_cache_order_objects min;
57984 gfp_t allocflags; /* gfp flags to use on each alloc */
57985- int refcount; /* Refcount for slab cache destroy */
57986+ atomic_t refcount; /* Refcount for slab cache destroy */
57987 void (*ctor)(void *);
57988 int inuse; /* Offset to metadata */
57989 int align; /* Alignment */
57990diff -urNp linux-2.6.32.42/include/linux/sonet.h linux-2.6.32.42/include/linux/sonet.h
57991--- linux-2.6.32.42/include/linux/sonet.h 2011-03-27 14:31:47.000000000 -0400
57992+++ linux-2.6.32.42/include/linux/sonet.h 2011-04-17 15:56:46.000000000 -0400
57993@@ -61,7 +61,7 @@ struct sonet_stats {
57994 #include <asm/atomic.h>
57995
57996 struct k_sonet_stats {
57997-#define __HANDLE_ITEM(i) atomic_t i
57998+#define __HANDLE_ITEM(i) atomic_unchecked_t i
57999 __SONET_ITEMS
58000 #undef __HANDLE_ITEM
58001 };
58002diff -urNp linux-2.6.32.42/include/linux/sunrpc/clnt.h linux-2.6.32.42/include/linux/sunrpc/clnt.h
58003--- linux-2.6.32.42/include/linux/sunrpc/clnt.h 2011-03-27 14:31:47.000000000 -0400
58004+++ linux-2.6.32.42/include/linux/sunrpc/clnt.h 2011-04-17 15:56:46.000000000 -0400
58005@@ -167,9 +167,9 @@ static inline unsigned short rpc_get_por
58006 {
58007 switch (sap->sa_family) {
58008 case AF_INET:
58009- return ntohs(((struct sockaddr_in *)sap)->sin_port);
58010+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
58011 case AF_INET6:
58012- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
58013+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
58014 }
58015 return 0;
58016 }
58017@@ -202,7 +202,7 @@ static inline bool __rpc_cmp_addr4(const
58018 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
58019 const struct sockaddr *src)
58020 {
58021- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
58022+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
58023 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
58024
58025 dsin->sin_family = ssin->sin_family;
58026@@ -299,7 +299,7 @@ static inline u32 rpc_get_scope_id(const
58027 if (sa->sa_family != AF_INET6)
58028 return 0;
58029
58030- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
58031+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
58032 }
58033
58034 #endif /* __KERNEL__ */
58035diff -urNp linux-2.6.32.42/include/linux/sunrpc/svc_rdma.h linux-2.6.32.42/include/linux/sunrpc/svc_rdma.h
58036--- linux-2.6.32.42/include/linux/sunrpc/svc_rdma.h 2011-03-27 14:31:47.000000000 -0400
58037+++ linux-2.6.32.42/include/linux/sunrpc/svc_rdma.h 2011-05-04 17:56:28.000000000 -0400
58038@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
58039 extern unsigned int svcrdma_max_requests;
58040 extern unsigned int svcrdma_max_req_size;
58041
58042-extern atomic_t rdma_stat_recv;
58043-extern atomic_t rdma_stat_read;
58044-extern atomic_t rdma_stat_write;
58045-extern atomic_t rdma_stat_sq_starve;
58046-extern atomic_t rdma_stat_rq_starve;
58047-extern atomic_t rdma_stat_rq_poll;
58048-extern atomic_t rdma_stat_rq_prod;
58049-extern atomic_t rdma_stat_sq_poll;
58050-extern atomic_t rdma_stat_sq_prod;
58051+extern atomic_unchecked_t rdma_stat_recv;
58052+extern atomic_unchecked_t rdma_stat_read;
58053+extern atomic_unchecked_t rdma_stat_write;
58054+extern atomic_unchecked_t rdma_stat_sq_starve;
58055+extern atomic_unchecked_t rdma_stat_rq_starve;
58056+extern atomic_unchecked_t rdma_stat_rq_poll;
58057+extern atomic_unchecked_t rdma_stat_rq_prod;
58058+extern atomic_unchecked_t rdma_stat_sq_poll;
58059+extern atomic_unchecked_t rdma_stat_sq_prod;
58060
58061 #define RPCRDMA_VERSION 1
58062
58063diff -urNp linux-2.6.32.42/include/linux/suspend.h linux-2.6.32.42/include/linux/suspend.h
58064--- linux-2.6.32.42/include/linux/suspend.h 2011-03-27 14:31:47.000000000 -0400
58065+++ linux-2.6.32.42/include/linux/suspend.h 2011-04-17 15:56:46.000000000 -0400
58066@@ -104,15 +104,15 @@ typedef int __bitwise suspend_state_t;
58067 * which require special recovery actions in that situation.
58068 */
58069 struct platform_suspend_ops {
58070- int (*valid)(suspend_state_t state);
58071- int (*begin)(suspend_state_t state);
58072- int (*prepare)(void);
58073- int (*prepare_late)(void);
58074- int (*enter)(suspend_state_t state);
58075- void (*wake)(void);
58076- void (*finish)(void);
58077- void (*end)(void);
58078- void (*recover)(void);
58079+ int (* const valid)(suspend_state_t state);
58080+ int (* const begin)(suspend_state_t state);
58081+ int (* const prepare)(void);
58082+ int (* const prepare_late)(void);
58083+ int (* const enter)(suspend_state_t state);
58084+ void (* const wake)(void);
58085+ void (* const finish)(void);
58086+ void (* const end)(void);
58087+ void (* const recover)(void);
58088 };
58089
58090 #ifdef CONFIG_SUSPEND
58091@@ -120,7 +120,7 @@ struct platform_suspend_ops {
58092 * suspend_set_ops - set platform dependent suspend operations
58093 * @ops: The new suspend operations to set.
58094 */
58095-extern void suspend_set_ops(struct platform_suspend_ops *ops);
58096+extern void suspend_set_ops(const struct platform_suspend_ops *ops);
58097 extern int suspend_valid_only_mem(suspend_state_t state);
58098
58099 /**
58100@@ -145,7 +145,7 @@ extern int pm_suspend(suspend_state_t st
58101 #else /* !CONFIG_SUSPEND */
58102 #define suspend_valid_only_mem NULL
58103
58104-static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
58105+static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
58106 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
58107 #endif /* !CONFIG_SUSPEND */
58108
58109@@ -215,16 +215,16 @@ extern void mark_free_pages(struct zone
58110 * platforms which require special recovery actions in that situation.
58111 */
58112 struct platform_hibernation_ops {
58113- int (*begin)(void);
58114- void (*end)(void);
58115- int (*pre_snapshot)(void);
58116- void (*finish)(void);
58117- int (*prepare)(void);
58118- int (*enter)(void);
58119- void (*leave)(void);
58120- int (*pre_restore)(void);
58121- void (*restore_cleanup)(void);
58122- void (*recover)(void);
58123+ int (* const begin)(void);
58124+ void (* const end)(void);
58125+ int (* const pre_snapshot)(void);
58126+ void (* const finish)(void);
58127+ int (* const prepare)(void);
58128+ int (* const enter)(void);
58129+ void (* const leave)(void);
58130+ int (* const pre_restore)(void);
58131+ void (* const restore_cleanup)(void);
58132+ void (* const recover)(void);
58133 };
58134
58135 #ifdef CONFIG_HIBERNATION
58136@@ -243,7 +243,7 @@ extern void swsusp_set_page_free(struct
58137 extern void swsusp_unset_page_free(struct page *);
58138 extern unsigned long get_safe_page(gfp_t gfp_mask);
58139
58140-extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
58141+extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
58142 extern int hibernate(void);
58143 extern bool system_entering_hibernation(void);
58144 #else /* CONFIG_HIBERNATION */
58145@@ -251,7 +251,7 @@ static inline int swsusp_page_is_forbidd
58146 static inline void swsusp_set_page_free(struct page *p) {}
58147 static inline void swsusp_unset_page_free(struct page *p) {}
58148
58149-static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
58150+static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
58151 static inline int hibernate(void) { return -ENOSYS; }
58152 static inline bool system_entering_hibernation(void) { return false; }
58153 #endif /* CONFIG_HIBERNATION */
58154diff -urNp linux-2.6.32.42/include/linux/sysctl.h linux-2.6.32.42/include/linux/sysctl.h
58155--- linux-2.6.32.42/include/linux/sysctl.h 2011-03-27 14:31:47.000000000 -0400
58156+++ linux-2.6.32.42/include/linux/sysctl.h 2011-04-17 15:56:46.000000000 -0400
58157@@ -164,7 +164,11 @@ enum
58158 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
58159 };
58160
58161-
58162+#ifdef CONFIG_PAX_SOFTMODE
58163+enum {
58164+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
58165+};
58166+#endif
58167
58168 /* CTL_VM names: */
58169 enum
58170@@ -982,6 +986,8 @@ typedef int proc_handler (struct ctl_tab
58171
58172 extern int proc_dostring(struct ctl_table *, int,
58173 void __user *, size_t *, loff_t *);
58174+extern int proc_dostring_modpriv(struct ctl_table *, int,
58175+ void __user *, size_t *, loff_t *);
58176 extern int proc_dointvec(struct ctl_table *, int,
58177 void __user *, size_t *, loff_t *);
58178 extern int proc_dointvec_minmax(struct ctl_table *, int,
58179@@ -1003,6 +1009,7 @@ extern int do_sysctl (int __user *name,
58180
58181 extern ctl_handler sysctl_data;
58182 extern ctl_handler sysctl_string;
58183+extern ctl_handler sysctl_string_modpriv;
58184 extern ctl_handler sysctl_intvec;
58185 extern ctl_handler sysctl_jiffies;
58186 extern ctl_handler sysctl_ms_jiffies;
58187diff -urNp linux-2.6.32.42/include/linux/sysfs.h linux-2.6.32.42/include/linux/sysfs.h
58188--- linux-2.6.32.42/include/linux/sysfs.h 2011-03-27 14:31:47.000000000 -0400
58189+++ linux-2.6.32.42/include/linux/sysfs.h 2011-04-17 15:56:46.000000000 -0400
58190@@ -75,8 +75,8 @@ struct bin_attribute {
58191 };
58192
58193 struct sysfs_ops {
58194- ssize_t (*show)(struct kobject *, struct attribute *,char *);
58195- ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
58196+ ssize_t (* const show)(struct kobject *, struct attribute *,char *);
58197+ ssize_t (* const store)(struct kobject *,struct attribute *,const char *, size_t);
58198 };
58199
58200 struct sysfs_dirent;
58201diff -urNp linux-2.6.32.42/include/linux/thread_info.h linux-2.6.32.42/include/linux/thread_info.h
58202--- linux-2.6.32.42/include/linux/thread_info.h 2011-03-27 14:31:47.000000000 -0400
58203+++ linux-2.6.32.42/include/linux/thread_info.h 2011-04-17 15:56:46.000000000 -0400
58204@@ -23,7 +23,7 @@ struct restart_block {
58205 };
58206 /* For futex_wait and futex_wait_requeue_pi */
58207 struct {
58208- u32 *uaddr;
58209+ u32 __user *uaddr;
58210 u32 val;
58211 u32 flags;
58212 u32 bitset;
58213diff -urNp linux-2.6.32.42/include/linux/tty.h linux-2.6.32.42/include/linux/tty.h
58214--- linux-2.6.32.42/include/linux/tty.h 2011-03-27 14:31:47.000000000 -0400
58215+++ linux-2.6.32.42/include/linux/tty.h 2011-04-17 15:56:46.000000000 -0400
58216@@ -13,6 +13,7 @@
58217 #include <linux/tty_driver.h>
58218 #include <linux/tty_ldisc.h>
58219 #include <linux/mutex.h>
58220+#include <linux/poll.h>
58221
58222 #include <asm/system.h>
58223
58224@@ -443,7 +444,6 @@ extern int tty_perform_flush(struct tty_
58225 extern dev_t tty_devnum(struct tty_struct *tty);
58226 extern void proc_clear_tty(struct task_struct *p);
58227 extern struct tty_struct *get_current_tty(void);
58228-extern void tty_default_fops(struct file_operations *fops);
58229 extern struct tty_struct *alloc_tty_struct(void);
58230 extern void free_tty_struct(struct tty_struct *tty);
58231 extern void initialize_tty_struct(struct tty_struct *tty,
58232@@ -493,6 +493,18 @@ extern void tty_ldisc_begin(void);
58233 /* This last one is just for the tty layer internals and shouldn't be used elsewhere */
58234 extern void tty_ldisc_enable(struct tty_struct *tty);
58235
58236+/* tty_io.c */
58237+extern ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
58238+extern ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
58239+extern unsigned int tty_poll(struct file *, poll_table *);
58240+#ifdef CONFIG_COMPAT
58241+extern long tty_compat_ioctl(struct file *file, unsigned int cmd,
58242+ unsigned long arg);
58243+#else
58244+#define tty_compat_ioctl NULL
58245+#endif
58246+extern int tty_release(struct inode *, struct file *);
58247+extern int tty_fasync(int fd, struct file *filp, int on);
58248
58249 /* n_tty.c */
58250 extern struct tty_ldisc_ops tty_ldisc_N_TTY;
58251diff -urNp linux-2.6.32.42/include/linux/tty_ldisc.h linux-2.6.32.42/include/linux/tty_ldisc.h
58252--- linux-2.6.32.42/include/linux/tty_ldisc.h 2011-03-27 14:31:47.000000000 -0400
58253+++ linux-2.6.32.42/include/linux/tty_ldisc.h 2011-04-17 15:56:46.000000000 -0400
58254@@ -139,7 +139,7 @@ struct tty_ldisc_ops {
58255
58256 struct module *owner;
58257
58258- int refcount;
58259+ atomic_t refcount;
58260 };
58261
58262 struct tty_ldisc {
58263diff -urNp linux-2.6.32.42/include/linux/types.h linux-2.6.32.42/include/linux/types.h
58264--- linux-2.6.32.42/include/linux/types.h 2011-03-27 14:31:47.000000000 -0400
58265+++ linux-2.6.32.42/include/linux/types.h 2011-04-17 15:56:46.000000000 -0400
58266@@ -191,10 +191,26 @@ typedef struct {
58267 volatile int counter;
58268 } atomic_t;
58269
58270+#ifdef CONFIG_PAX_REFCOUNT
58271+typedef struct {
58272+ volatile int counter;
58273+} atomic_unchecked_t;
58274+#else
58275+typedef atomic_t atomic_unchecked_t;
58276+#endif
58277+
58278 #ifdef CONFIG_64BIT
58279 typedef struct {
58280 volatile long counter;
58281 } atomic64_t;
58282+
58283+#ifdef CONFIG_PAX_REFCOUNT
58284+typedef struct {
58285+ volatile long counter;
58286+} atomic64_unchecked_t;
58287+#else
58288+typedef atomic64_t atomic64_unchecked_t;
58289+#endif
58290 #endif
58291
58292 struct ustat {
58293diff -urNp linux-2.6.32.42/include/linux/uaccess.h linux-2.6.32.42/include/linux/uaccess.h
58294--- linux-2.6.32.42/include/linux/uaccess.h 2011-03-27 14:31:47.000000000 -0400
58295+++ linux-2.6.32.42/include/linux/uaccess.h 2011-04-17 15:56:46.000000000 -0400
58296@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
58297 long ret; \
58298 mm_segment_t old_fs = get_fs(); \
58299 \
58300- set_fs(KERNEL_DS); \
58301 pagefault_disable(); \
58302+ set_fs(KERNEL_DS); \
58303 ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
58304- pagefault_enable(); \
58305 set_fs(old_fs); \
58306+ pagefault_enable(); \
58307 ret; \
58308 })
58309
58310@@ -93,7 +93,7 @@ static inline unsigned long __copy_from_
58311 * Safely read from address @src to the buffer at @dst. If a kernel fault
58312 * happens, handle that and return -EFAULT.
58313 */
58314-extern long probe_kernel_read(void *dst, void *src, size_t size);
58315+extern long probe_kernel_read(void *dst, const void *src, size_t size);
58316
58317 /*
58318 * probe_kernel_write(): safely attempt to write to a location
58319@@ -104,6 +104,6 @@ extern long probe_kernel_read(void *dst,
58320 * Safely write to address @dst from the buffer at @src. If a kernel fault
58321 * happens, handle that and return -EFAULT.
58322 */
58323-extern long probe_kernel_write(void *dst, void *src, size_t size);
58324+extern long probe_kernel_write(void *dst, const void *src, size_t size);
58325
58326 #endif /* __LINUX_UACCESS_H__ */
58327diff -urNp linux-2.6.32.42/include/linux/unaligned/access_ok.h linux-2.6.32.42/include/linux/unaligned/access_ok.h
58328--- linux-2.6.32.42/include/linux/unaligned/access_ok.h 2011-03-27 14:31:47.000000000 -0400
58329+++ linux-2.6.32.42/include/linux/unaligned/access_ok.h 2011-04-17 15:56:46.000000000 -0400
58330@@ -6,32 +6,32 @@
58331
58332 static inline u16 get_unaligned_le16(const void *p)
58333 {
58334- return le16_to_cpup((__le16 *)p);
58335+ return le16_to_cpup((const __le16 *)p);
58336 }
58337
58338 static inline u32 get_unaligned_le32(const void *p)
58339 {
58340- return le32_to_cpup((__le32 *)p);
58341+ return le32_to_cpup((const __le32 *)p);
58342 }
58343
58344 static inline u64 get_unaligned_le64(const void *p)
58345 {
58346- return le64_to_cpup((__le64 *)p);
58347+ return le64_to_cpup((const __le64 *)p);
58348 }
58349
58350 static inline u16 get_unaligned_be16(const void *p)
58351 {
58352- return be16_to_cpup((__be16 *)p);
58353+ return be16_to_cpup((const __be16 *)p);
58354 }
58355
58356 static inline u32 get_unaligned_be32(const void *p)
58357 {
58358- return be32_to_cpup((__be32 *)p);
58359+ return be32_to_cpup((const __be32 *)p);
58360 }
58361
58362 static inline u64 get_unaligned_be64(const void *p)
58363 {
58364- return be64_to_cpup((__be64 *)p);
58365+ return be64_to_cpup((const __be64 *)p);
58366 }
58367
58368 static inline void put_unaligned_le16(u16 val, void *p)
58369diff -urNp linux-2.6.32.42/include/linux/vmalloc.h linux-2.6.32.42/include/linux/vmalloc.h
58370--- linux-2.6.32.42/include/linux/vmalloc.h 2011-03-27 14:31:47.000000000 -0400
58371+++ linux-2.6.32.42/include/linux/vmalloc.h 2011-04-17 15:56:46.000000000 -0400
58372@@ -13,6 +13,11 @@ struct vm_area_struct; /* vma defining
58373 #define VM_MAP 0x00000004 /* vmap()ed pages */
58374 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
58375 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
58376+
58377+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
58378+#define VM_KERNEXEC 0x00000020 /* allocate from executable kernel memory range */
58379+#endif
58380+
58381 /* bits [20..32] reserved for arch specific ioremap internals */
58382
58383 /*
58384@@ -123,4 +128,81 @@ struct vm_struct **pcpu_get_vm_areas(con
58385
58386 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
58387
58388+#define vmalloc(x) \
58389+({ \
58390+ void *___retval; \
58391+ intoverflow_t ___x = (intoverflow_t)x; \
58392+ if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
58393+ ___retval = NULL; \
58394+ else \
58395+ ___retval = vmalloc((unsigned long)___x); \
58396+ ___retval; \
58397+})
58398+
58399+#define __vmalloc(x, y, z) \
58400+({ \
58401+ void *___retval; \
58402+ intoverflow_t ___x = (intoverflow_t)x; \
58403+ if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
58404+ ___retval = NULL; \
58405+ else \
58406+ ___retval = __vmalloc((unsigned long)___x, (y), (z));\
58407+ ___retval; \
58408+})
58409+
58410+#define vmalloc_user(x) \
58411+({ \
58412+ void *___retval; \
58413+ intoverflow_t ___x = (intoverflow_t)x; \
58414+ if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
58415+ ___retval = NULL; \
58416+ else \
58417+ ___retval = vmalloc_user((unsigned long)___x); \
58418+ ___retval; \
58419+})
58420+
58421+#define vmalloc_exec(x) \
58422+({ \
58423+ void *___retval; \
58424+ intoverflow_t ___x = (intoverflow_t)x; \
58425+ if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
58426+ ___retval = NULL; \
58427+ else \
58428+ ___retval = vmalloc_exec((unsigned long)___x); \
58429+ ___retval; \
58430+})
58431+
58432+#define vmalloc_node(x, y) \
58433+({ \
58434+ void *___retval; \
58435+ intoverflow_t ___x = (intoverflow_t)x; \
58436+ if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
58437+ ___retval = NULL; \
58438+ else \
58439+ ___retval = vmalloc_node((unsigned long)___x, (y));\
58440+ ___retval; \
58441+})
58442+
58443+#define vmalloc_32(x) \
58444+({ \
58445+ void *___retval; \
58446+ intoverflow_t ___x = (intoverflow_t)x; \
58447+ if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
58448+ ___retval = NULL; \
58449+ else \
58450+ ___retval = vmalloc_32((unsigned long)___x); \
58451+ ___retval; \
58452+})
58453+
58454+#define vmalloc_32_user(x) \
58455+({ \
58456+ void *___retval; \
58457+ intoverflow_t ___x = (intoverflow_t)x; \
58458+ if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
58459+ ___retval = NULL; \
58460+ else \
58461+ ___retval = vmalloc_32_user((unsigned long)___x);\
58462+ ___retval; \
58463+})
58464+
58465 #endif /* _LINUX_VMALLOC_H */
58466diff -urNp linux-2.6.32.42/include/linux/vmstat.h linux-2.6.32.42/include/linux/vmstat.h
58467--- linux-2.6.32.42/include/linux/vmstat.h 2011-03-27 14:31:47.000000000 -0400
58468+++ linux-2.6.32.42/include/linux/vmstat.h 2011-04-17 15:56:46.000000000 -0400
58469@@ -136,18 +136,18 @@ static inline void vm_events_fold_cpu(in
58470 /*
58471 * Zone based page accounting with per cpu differentials.
58472 */
58473-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
58474+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
58475
58476 static inline void zone_page_state_add(long x, struct zone *zone,
58477 enum zone_stat_item item)
58478 {
58479- atomic_long_add(x, &zone->vm_stat[item]);
58480- atomic_long_add(x, &vm_stat[item]);
58481+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
58482+ atomic_long_add_unchecked(x, &vm_stat[item]);
58483 }
58484
58485 static inline unsigned long global_page_state(enum zone_stat_item item)
58486 {
58487- long x = atomic_long_read(&vm_stat[item]);
58488+ long x = atomic_long_read_unchecked(&vm_stat[item]);
58489 #ifdef CONFIG_SMP
58490 if (x < 0)
58491 x = 0;
58492@@ -158,7 +158,7 @@ static inline unsigned long global_page_
58493 static inline unsigned long zone_page_state(struct zone *zone,
58494 enum zone_stat_item item)
58495 {
58496- long x = atomic_long_read(&zone->vm_stat[item]);
58497+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
58498 #ifdef CONFIG_SMP
58499 if (x < 0)
58500 x = 0;
58501@@ -175,7 +175,7 @@ static inline unsigned long zone_page_st
58502 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
58503 enum zone_stat_item item)
58504 {
58505- long x = atomic_long_read(&zone->vm_stat[item]);
58506+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
58507
58508 #ifdef CONFIG_SMP
58509 int cpu;
58510@@ -264,8 +264,8 @@ static inline void __mod_zone_page_state
58511
58512 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
58513 {
58514- atomic_long_inc(&zone->vm_stat[item]);
58515- atomic_long_inc(&vm_stat[item]);
58516+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
58517+ atomic_long_inc_unchecked(&vm_stat[item]);
58518 }
58519
58520 static inline void __inc_zone_page_state(struct page *page,
58521@@ -276,8 +276,8 @@ static inline void __inc_zone_page_state
58522
58523 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
58524 {
58525- atomic_long_dec(&zone->vm_stat[item]);
58526- atomic_long_dec(&vm_stat[item]);
58527+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
58528+ atomic_long_dec_unchecked(&vm_stat[item]);
58529 }
58530
58531 static inline void __dec_zone_page_state(struct page *page,
58532diff -urNp linux-2.6.32.42/include/media/v4l2-device.h linux-2.6.32.42/include/media/v4l2-device.h
58533--- linux-2.6.32.42/include/media/v4l2-device.h 2011-03-27 14:31:47.000000000 -0400
58534+++ linux-2.6.32.42/include/media/v4l2-device.h 2011-05-04 17:56:28.000000000 -0400
58535@@ -71,7 +71,7 @@ int __must_check v4l2_device_register(st
58536 this function returns 0. If the name ends with a digit (e.g. cx18),
58537 then the name will be set to cx18-0 since cx180 looks really odd. */
58538 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
58539- atomic_t *instance);
58540+ atomic_unchecked_t *instance);
58541
58542 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
58543 Since the parent disappears this ensures that v4l2_dev doesn't have an
58544diff -urNp linux-2.6.32.42/include/net/flow.h linux-2.6.32.42/include/net/flow.h
58545--- linux-2.6.32.42/include/net/flow.h 2011-03-27 14:31:47.000000000 -0400
58546+++ linux-2.6.32.42/include/net/flow.h 2011-05-04 17:56:28.000000000 -0400
58547@@ -92,7 +92,7 @@ typedef int (*flow_resolve_t)(struct net
58548 extern void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family,
58549 u8 dir, flow_resolve_t resolver);
58550 extern void flow_cache_flush(void);
58551-extern atomic_t flow_cache_genid;
58552+extern atomic_unchecked_t flow_cache_genid;
58553
58554 static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2)
58555 {
58556diff -urNp linux-2.6.32.42/include/net/inetpeer.h linux-2.6.32.42/include/net/inetpeer.h
58557--- linux-2.6.32.42/include/net/inetpeer.h 2011-03-27 14:31:47.000000000 -0400
58558+++ linux-2.6.32.42/include/net/inetpeer.h 2011-04-17 15:56:46.000000000 -0400
58559@@ -24,7 +24,7 @@ struct inet_peer
58560 __u32 dtime; /* the time of last use of not
58561 * referenced entries */
58562 atomic_t refcnt;
58563- atomic_t rid; /* Frag reception counter */
58564+ atomic_unchecked_t rid; /* Frag reception counter */
58565 __u32 tcp_ts;
58566 unsigned long tcp_ts_stamp;
58567 };
58568diff -urNp linux-2.6.32.42/include/net/ip_vs.h linux-2.6.32.42/include/net/ip_vs.h
58569--- linux-2.6.32.42/include/net/ip_vs.h 2011-03-27 14:31:47.000000000 -0400
58570+++ linux-2.6.32.42/include/net/ip_vs.h 2011-05-04 17:56:28.000000000 -0400
58571@@ -365,7 +365,7 @@ struct ip_vs_conn {
58572 struct ip_vs_conn *control; /* Master control connection */
58573 atomic_t n_control; /* Number of controlled ones */
58574 struct ip_vs_dest *dest; /* real server */
58575- atomic_t in_pkts; /* incoming packet counter */
58576+ atomic_unchecked_t in_pkts; /* incoming packet counter */
58577
58578 /* packet transmitter for different forwarding methods. If it
58579 mangles the packet, it must return NF_DROP or better NF_STOLEN,
58580@@ -466,7 +466,7 @@ struct ip_vs_dest {
58581 union nf_inet_addr addr; /* IP address of the server */
58582 __be16 port; /* port number of the server */
58583 volatile unsigned flags; /* dest status flags */
58584- atomic_t conn_flags; /* flags to copy to conn */
58585+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
58586 atomic_t weight; /* server weight */
58587
58588 atomic_t refcnt; /* reference counter */
58589diff -urNp linux-2.6.32.42/include/net/irda/ircomm_tty.h linux-2.6.32.42/include/net/irda/ircomm_tty.h
58590--- linux-2.6.32.42/include/net/irda/ircomm_tty.h 2011-03-27 14:31:47.000000000 -0400
58591+++ linux-2.6.32.42/include/net/irda/ircomm_tty.h 2011-04-17 15:56:46.000000000 -0400
58592@@ -35,6 +35,7 @@
58593 #include <linux/termios.h>
58594 #include <linux/timer.h>
58595 #include <linux/tty.h> /* struct tty_struct */
58596+#include <asm/local.h>
58597
58598 #include <net/irda/irias_object.h>
58599 #include <net/irda/ircomm_core.h>
58600@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
58601 unsigned short close_delay;
58602 unsigned short closing_wait; /* time to wait before closing */
58603
58604- int open_count;
58605- int blocked_open; /* # of blocked opens */
58606+ local_t open_count;
58607+ local_t blocked_open; /* # of blocked opens */
58608
58609 /* Protect concurent access to :
58610 * o self->open_count
58611diff -urNp linux-2.6.32.42/include/net/iucv/af_iucv.h linux-2.6.32.42/include/net/iucv/af_iucv.h
58612--- linux-2.6.32.42/include/net/iucv/af_iucv.h 2011-03-27 14:31:47.000000000 -0400
58613+++ linux-2.6.32.42/include/net/iucv/af_iucv.h 2011-05-04 17:56:28.000000000 -0400
58614@@ -87,7 +87,7 @@ struct iucv_sock {
58615 struct iucv_sock_list {
58616 struct hlist_head head;
58617 rwlock_t lock;
58618- atomic_t autobind_name;
58619+ atomic_unchecked_t autobind_name;
58620 };
58621
58622 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
58623diff -urNp linux-2.6.32.42/include/net/neighbour.h linux-2.6.32.42/include/net/neighbour.h
58624--- linux-2.6.32.42/include/net/neighbour.h 2011-03-27 14:31:47.000000000 -0400
58625+++ linux-2.6.32.42/include/net/neighbour.h 2011-04-17 15:56:46.000000000 -0400
58626@@ -125,12 +125,12 @@ struct neighbour
58627 struct neigh_ops
58628 {
58629 int family;
58630- void (*solicit)(struct neighbour *, struct sk_buff*);
58631- void (*error_report)(struct neighbour *, struct sk_buff*);
58632- int (*output)(struct sk_buff*);
58633- int (*connected_output)(struct sk_buff*);
58634- int (*hh_output)(struct sk_buff*);
58635- int (*queue_xmit)(struct sk_buff*);
58636+ void (* const solicit)(struct neighbour *, struct sk_buff*);
58637+ void (* const error_report)(struct neighbour *, struct sk_buff*);
58638+ int (* const output)(struct sk_buff*);
58639+ int (* const connected_output)(struct sk_buff*);
58640+ int (* const hh_output)(struct sk_buff*);
58641+ int (* const queue_xmit)(struct sk_buff*);
58642 };
58643
58644 struct pneigh_entry
58645diff -urNp linux-2.6.32.42/include/net/netlink.h linux-2.6.32.42/include/net/netlink.h
58646--- linux-2.6.32.42/include/net/netlink.h 2011-03-27 14:31:47.000000000 -0400
58647+++ linux-2.6.32.42/include/net/netlink.h 2011-04-17 15:56:46.000000000 -0400
58648@@ -558,7 +558,7 @@ static inline void *nlmsg_get_pos(struct
58649 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
58650 {
58651 if (mark)
58652- skb_trim(skb, (unsigned char *) mark - skb->data);
58653+ skb_trim(skb, (const unsigned char *) mark - skb->data);
58654 }
58655
58656 /**
58657diff -urNp linux-2.6.32.42/include/net/netns/ipv4.h linux-2.6.32.42/include/net/netns/ipv4.h
58658--- linux-2.6.32.42/include/net/netns/ipv4.h 2011-03-27 14:31:47.000000000 -0400
58659+++ linux-2.6.32.42/include/net/netns/ipv4.h 2011-05-04 17:56:28.000000000 -0400
58660@@ -54,7 +54,7 @@ struct netns_ipv4 {
58661 int current_rt_cache_rebuild_count;
58662
58663 struct timer_list rt_secret_timer;
58664- atomic_t rt_genid;
58665+ atomic_unchecked_t rt_genid;
58666
58667 #ifdef CONFIG_IP_MROUTE
58668 struct sock *mroute_sk;
58669diff -urNp linux-2.6.32.42/include/net/sctp/sctp.h linux-2.6.32.42/include/net/sctp/sctp.h
58670--- linux-2.6.32.42/include/net/sctp/sctp.h 2011-03-27 14:31:47.000000000 -0400
58671+++ linux-2.6.32.42/include/net/sctp/sctp.h 2011-04-17 15:56:46.000000000 -0400
58672@@ -305,8 +305,8 @@ extern int sctp_debug_flag;
58673
58674 #else /* SCTP_DEBUG */
58675
58676-#define SCTP_DEBUG_PRINTK(whatever...)
58677-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
58678+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
58679+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
58680 #define SCTP_ENABLE_DEBUG
58681 #define SCTP_DISABLE_DEBUG
58682 #define SCTP_ASSERT(expr, str, func)
58683diff -urNp linux-2.6.32.42/include/net/sock.h linux-2.6.32.42/include/net/sock.h
58684--- linux-2.6.32.42/include/net/sock.h 2011-03-27 14:31:47.000000000 -0400
58685+++ linux-2.6.32.42/include/net/sock.h 2011-05-04 17:56:28.000000000 -0400
58686@@ -272,7 +272,7 @@ struct sock {
58687 rwlock_t sk_callback_lock;
58688 int sk_err,
58689 sk_err_soft;
58690- atomic_t sk_drops;
58691+ atomic_unchecked_t sk_drops;
58692 unsigned short sk_ack_backlog;
58693 unsigned short sk_max_ack_backlog;
58694 __u32 sk_priority;
58695diff -urNp linux-2.6.32.42/include/net/tcp.h linux-2.6.32.42/include/net/tcp.h
58696--- linux-2.6.32.42/include/net/tcp.h 2011-03-27 14:31:47.000000000 -0400
58697+++ linux-2.6.32.42/include/net/tcp.h 2011-04-17 15:56:46.000000000 -0400
58698@@ -1444,6 +1444,7 @@ enum tcp_seq_states {
58699 struct tcp_seq_afinfo {
58700 char *name;
58701 sa_family_t family;
58702+ /* cannot be const */
58703 struct file_operations seq_fops;
58704 struct seq_operations seq_ops;
58705 };
58706diff -urNp linux-2.6.32.42/include/net/udp.h linux-2.6.32.42/include/net/udp.h
58707--- linux-2.6.32.42/include/net/udp.h 2011-03-27 14:31:47.000000000 -0400
58708+++ linux-2.6.32.42/include/net/udp.h 2011-04-17 15:56:46.000000000 -0400
58709@@ -187,6 +187,7 @@ struct udp_seq_afinfo {
58710 char *name;
58711 sa_family_t family;
58712 struct udp_table *udp_table;
58713+ /* cannot be const */
58714 struct file_operations seq_fops;
58715 struct seq_operations seq_ops;
58716 };
58717diff -urNp linux-2.6.32.42/include/scsi/scsi_device.h linux-2.6.32.42/include/scsi/scsi_device.h
58718--- linux-2.6.32.42/include/scsi/scsi_device.h 2011-04-17 17:00:52.000000000 -0400
58719+++ linux-2.6.32.42/include/scsi/scsi_device.h 2011-05-04 17:56:28.000000000 -0400
58720@@ -156,9 +156,9 @@ struct scsi_device {
58721 unsigned int max_device_blocked; /* what device_blocked counts down from */
58722 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
58723
58724- atomic_t iorequest_cnt;
58725- atomic_t iodone_cnt;
58726- atomic_t ioerr_cnt;
58727+ atomic_unchecked_t iorequest_cnt;
58728+ atomic_unchecked_t iodone_cnt;
58729+ atomic_unchecked_t ioerr_cnt;
58730
58731 struct device sdev_gendev,
58732 sdev_dev;
58733diff -urNp linux-2.6.32.42/include/sound/ac97_codec.h linux-2.6.32.42/include/sound/ac97_codec.h
58734--- linux-2.6.32.42/include/sound/ac97_codec.h 2011-03-27 14:31:47.000000000 -0400
58735+++ linux-2.6.32.42/include/sound/ac97_codec.h 2011-04-17 15:56:46.000000000 -0400
58736@@ -419,15 +419,15 @@
58737 struct snd_ac97;
58738
58739 struct snd_ac97_build_ops {
58740- int (*build_3d) (struct snd_ac97 *ac97);
58741- int (*build_specific) (struct snd_ac97 *ac97);
58742- int (*build_spdif) (struct snd_ac97 *ac97);
58743- int (*build_post_spdif) (struct snd_ac97 *ac97);
58744+ int (* const build_3d) (struct snd_ac97 *ac97);
58745+ int (* const build_specific) (struct snd_ac97 *ac97);
58746+ int (* const build_spdif) (struct snd_ac97 *ac97);
58747+ int (* const build_post_spdif) (struct snd_ac97 *ac97);
58748 #ifdef CONFIG_PM
58749- void (*suspend) (struct snd_ac97 *ac97);
58750- void (*resume) (struct snd_ac97 *ac97);
58751+ void (* const suspend) (struct snd_ac97 *ac97);
58752+ void (* const resume) (struct snd_ac97 *ac97);
58753 #endif
58754- void (*update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
58755+ void (* const update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
58756 };
58757
58758 struct snd_ac97_bus_ops {
58759@@ -477,7 +477,7 @@ struct snd_ac97_template {
58760
58761 struct snd_ac97 {
58762 /* -- lowlevel (hardware) driver specific -- */
58763- struct snd_ac97_build_ops * build_ops;
58764+ const struct snd_ac97_build_ops * build_ops;
58765 void *private_data;
58766 void (*private_free) (struct snd_ac97 *ac97);
58767 /* --- */
58768diff -urNp linux-2.6.32.42/include/sound/ymfpci.h linux-2.6.32.42/include/sound/ymfpci.h
58769--- linux-2.6.32.42/include/sound/ymfpci.h 2011-03-27 14:31:47.000000000 -0400
58770+++ linux-2.6.32.42/include/sound/ymfpci.h 2011-05-04 17:56:28.000000000 -0400
58771@@ -358,7 +358,7 @@ struct snd_ymfpci {
58772 spinlock_t reg_lock;
58773 spinlock_t voice_lock;
58774 wait_queue_head_t interrupt_sleep;
58775- atomic_t interrupt_sleep_count;
58776+ atomic_unchecked_t interrupt_sleep_count;
58777 struct snd_info_entry *proc_entry;
58778 const struct firmware *dsp_microcode;
58779 const struct firmware *controller_microcode;
58780diff -urNp linux-2.6.32.42/include/trace/events/irq.h linux-2.6.32.42/include/trace/events/irq.h
58781--- linux-2.6.32.42/include/trace/events/irq.h 2011-03-27 14:31:47.000000000 -0400
58782+++ linux-2.6.32.42/include/trace/events/irq.h 2011-04-17 15:56:46.000000000 -0400
58783@@ -34,7 +34,7 @@
58784 */
58785 TRACE_EVENT(irq_handler_entry,
58786
58787- TP_PROTO(int irq, struct irqaction *action),
58788+ TP_PROTO(int irq, const struct irqaction *action),
58789
58790 TP_ARGS(irq, action),
58791
58792@@ -64,7 +64,7 @@ TRACE_EVENT(irq_handler_entry,
58793 */
58794 TRACE_EVENT(irq_handler_exit,
58795
58796- TP_PROTO(int irq, struct irqaction *action, int ret),
58797+ TP_PROTO(int irq, const struct irqaction *action, int ret),
58798
58799 TP_ARGS(irq, action, ret),
58800
58801@@ -95,7 +95,7 @@ TRACE_EVENT(irq_handler_exit,
58802 */
58803 TRACE_EVENT(softirq_entry,
58804
58805- TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
58806+ TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
58807
58808 TP_ARGS(h, vec),
58809
58810@@ -124,7 +124,7 @@ TRACE_EVENT(softirq_entry,
58811 */
58812 TRACE_EVENT(softirq_exit,
58813
58814- TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
58815+ TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
58816
58817 TP_ARGS(h, vec),
58818
58819diff -urNp linux-2.6.32.42/include/video/uvesafb.h linux-2.6.32.42/include/video/uvesafb.h
58820--- linux-2.6.32.42/include/video/uvesafb.h 2011-03-27 14:31:47.000000000 -0400
58821+++ linux-2.6.32.42/include/video/uvesafb.h 2011-04-17 15:56:46.000000000 -0400
58822@@ -177,6 +177,7 @@ struct uvesafb_par {
58823 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
58824 u8 pmi_setpal; /* PMI for palette changes */
58825 u16 *pmi_base; /* protected mode interface location */
58826+ u8 *pmi_code; /* protected mode code location */
58827 void *pmi_start;
58828 void *pmi_pal;
58829 u8 *vbe_state_orig; /*
58830diff -urNp linux-2.6.32.42/init/do_mounts.c linux-2.6.32.42/init/do_mounts.c
58831--- linux-2.6.32.42/init/do_mounts.c 2011-03-27 14:31:47.000000000 -0400
58832+++ linux-2.6.32.42/init/do_mounts.c 2011-04-17 15:56:46.000000000 -0400
58833@@ -216,11 +216,11 @@ static void __init get_fs_names(char *pa
58834
58835 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
58836 {
58837- int err = sys_mount(name, "/root", fs, flags, data);
58838+ int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
58839 if (err)
58840 return err;
58841
58842- sys_chdir("/root");
58843+ sys_chdir((__force const char __user *)"/root");
58844 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
58845 printk("VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
58846 current->fs->pwd.mnt->mnt_sb->s_type->name,
58847@@ -311,18 +311,18 @@ void __init change_floppy(char *fmt, ...
58848 va_start(args, fmt);
58849 vsprintf(buf, fmt, args);
58850 va_end(args);
58851- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
58852+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
58853 if (fd >= 0) {
58854 sys_ioctl(fd, FDEJECT, 0);
58855 sys_close(fd);
58856 }
58857 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
58858- fd = sys_open("/dev/console", O_RDWR, 0);
58859+ fd = sys_open((char __user *)"/dev/console", O_RDWR, 0);
58860 if (fd >= 0) {
58861 sys_ioctl(fd, TCGETS, (long)&termios);
58862 termios.c_lflag &= ~ICANON;
58863 sys_ioctl(fd, TCSETSF, (long)&termios);
58864- sys_read(fd, &c, 1);
58865+ sys_read(fd, (char __user *)&c, 1);
58866 termios.c_lflag |= ICANON;
58867 sys_ioctl(fd, TCSETSF, (long)&termios);
58868 sys_close(fd);
58869@@ -416,6 +416,6 @@ void __init prepare_namespace(void)
58870 mount_root();
58871 out:
58872 devtmpfs_mount("dev");
58873- sys_mount(".", "/", NULL, MS_MOVE, NULL);
58874- sys_chroot(".");
58875+ sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
58876+ sys_chroot((__force char __user *)".");
58877 }
58878diff -urNp linux-2.6.32.42/init/do_mounts.h linux-2.6.32.42/init/do_mounts.h
58879--- linux-2.6.32.42/init/do_mounts.h 2011-03-27 14:31:47.000000000 -0400
58880+++ linux-2.6.32.42/init/do_mounts.h 2011-04-17 15:56:46.000000000 -0400
58881@@ -15,15 +15,15 @@ extern int root_mountflags;
58882
58883 static inline int create_dev(char *name, dev_t dev)
58884 {
58885- sys_unlink(name);
58886- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
58887+ sys_unlink((__force char __user *)name);
58888+ return sys_mknod((__force char __user *)name, S_IFBLK|0600, new_encode_dev(dev));
58889 }
58890
58891 #if BITS_PER_LONG == 32
58892 static inline u32 bstat(char *name)
58893 {
58894 struct stat64 stat;
58895- if (sys_stat64(name, &stat) != 0)
58896+ if (sys_stat64((__force char __user *)name, (__force struct stat64 __user *)&stat) != 0)
58897 return 0;
58898 if (!S_ISBLK(stat.st_mode))
58899 return 0;
58900diff -urNp linux-2.6.32.42/init/do_mounts_initrd.c linux-2.6.32.42/init/do_mounts_initrd.c
58901--- linux-2.6.32.42/init/do_mounts_initrd.c 2011-03-27 14:31:47.000000000 -0400
58902+++ linux-2.6.32.42/init/do_mounts_initrd.c 2011-04-17 15:56:46.000000000 -0400
58903@@ -32,7 +32,7 @@ static int __init do_linuxrc(void * shel
58904 sys_close(old_fd);sys_close(root_fd);
58905 sys_close(0);sys_close(1);sys_close(2);
58906 sys_setsid();
58907- (void) sys_open("/dev/console",O_RDWR,0);
58908+ (void) sys_open((__force const char __user *)"/dev/console",O_RDWR,0);
58909 (void) sys_dup(0);
58910 (void) sys_dup(0);
58911 return kernel_execve(shell, argv, envp_init);
58912@@ -47,13 +47,13 @@ static void __init handle_initrd(void)
58913 create_dev("/dev/root.old", Root_RAM0);
58914 /* mount initrd on rootfs' /root */
58915 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
58916- sys_mkdir("/old", 0700);
58917- root_fd = sys_open("/", 0, 0);
58918- old_fd = sys_open("/old", 0, 0);
58919+ sys_mkdir((__force const char __user *)"/old", 0700);
58920+ root_fd = sys_open((__force const char __user *)"/", 0, 0);
58921+ old_fd = sys_open((__force const char __user *)"/old", 0, 0);
58922 /* move initrd over / and chdir/chroot in initrd root */
58923- sys_chdir("/root");
58924- sys_mount(".", "/", NULL, MS_MOVE, NULL);
58925- sys_chroot(".");
58926+ sys_chdir((__force const char __user *)"/root");
58927+ sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
58928+ sys_chroot((__force const char __user *)".");
58929
58930 /*
58931 * In case that a resume from disk is carried out by linuxrc or one of
58932@@ -70,15 +70,15 @@ static void __init handle_initrd(void)
58933
58934 /* move initrd to rootfs' /old */
58935 sys_fchdir(old_fd);
58936- sys_mount("/", ".", NULL, MS_MOVE, NULL);
58937+ sys_mount((__force char __user *)"/", (__force char __user *)".", NULL, MS_MOVE, NULL);
58938 /* switch root and cwd back to / of rootfs */
58939 sys_fchdir(root_fd);
58940- sys_chroot(".");
58941+ sys_chroot((__force const char __user *)".");
58942 sys_close(old_fd);
58943 sys_close(root_fd);
58944
58945 if (new_decode_dev(real_root_dev) == Root_RAM0) {
58946- sys_chdir("/old");
58947+ sys_chdir((__force const char __user *)"/old");
58948 return;
58949 }
58950
58951@@ -86,17 +86,17 @@ static void __init handle_initrd(void)
58952 mount_root();
58953
58954 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
58955- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
58956+ error = sys_mount((__force char __user *)"/old", (__force char __user *)"/root/initrd", NULL, MS_MOVE, NULL);
58957 if (!error)
58958 printk("okay\n");
58959 else {
58960- int fd = sys_open("/dev/root.old", O_RDWR, 0);
58961+ int fd = sys_open((__force const char __user *)"/dev/root.old", O_RDWR, 0);
58962 if (error == -ENOENT)
58963 printk("/initrd does not exist. Ignored.\n");
58964 else
58965 printk("failed\n");
58966 printk(KERN_NOTICE "Unmounting old root\n");
58967- sys_umount("/old", MNT_DETACH);
58968+ sys_umount((__force char __user *)"/old", MNT_DETACH);
58969 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
58970 if (fd < 0) {
58971 error = fd;
58972@@ -119,11 +119,11 @@ int __init initrd_load(void)
58973 * mounted in the normal path.
58974 */
58975 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
58976- sys_unlink("/initrd.image");
58977+ sys_unlink((__force const char __user *)"/initrd.image");
58978 handle_initrd();
58979 return 1;
58980 }
58981 }
58982- sys_unlink("/initrd.image");
58983+ sys_unlink((__force const char __user *)"/initrd.image");
58984 return 0;
58985 }
58986diff -urNp linux-2.6.32.42/init/do_mounts_md.c linux-2.6.32.42/init/do_mounts_md.c
58987--- linux-2.6.32.42/init/do_mounts_md.c 2011-03-27 14:31:47.000000000 -0400
58988+++ linux-2.6.32.42/init/do_mounts_md.c 2011-04-17 15:56:46.000000000 -0400
58989@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
58990 partitioned ? "_d" : "", minor,
58991 md_setup_args[ent].device_names);
58992
58993- fd = sys_open(name, 0, 0);
58994+ fd = sys_open((__force char __user *)name, 0, 0);
58995 if (fd < 0) {
58996 printk(KERN_ERR "md: open failed - cannot start "
58997 "array %s\n", name);
58998@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
58999 * array without it
59000 */
59001 sys_close(fd);
59002- fd = sys_open(name, 0, 0);
59003+ fd = sys_open((__force char __user *)name, 0, 0);
59004 sys_ioctl(fd, BLKRRPART, 0);
59005 }
59006 sys_close(fd);
59007@@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
59008
59009 wait_for_device_probe();
59010
59011- fd = sys_open("/dev/md0", 0, 0);
59012+ fd = sys_open((__force char __user *)"/dev/md0", 0, 0);
59013 if (fd >= 0) {
59014 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
59015 sys_close(fd);
59016diff -urNp linux-2.6.32.42/init/initramfs.c linux-2.6.32.42/init/initramfs.c
59017--- linux-2.6.32.42/init/initramfs.c 2011-03-27 14:31:47.000000000 -0400
59018+++ linux-2.6.32.42/init/initramfs.c 2011-04-17 15:56:46.000000000 -0400
59019@@ -74,7 +74,7 @@ static void __init free_hash(void)
59020 }
59021 }
59022
59023-static long __init do_utime(char __user *filename, time_t mtime)
59024+static long __init do_utime(__force char __user *filename, time_t mtime)
59025 {
59026 struct timespec t[2];
59027
59028@@ -109,7 +109,7 @@ static void __init dir_utime(void)
59029 struct dir_entry *de, *tmp;
59030 list_for_each_entry_safe(de, tmp, &dir_list, list) {
59031 list_del(&de->list);
59032- do_utime(de->name, de->mtime);
59033+ do_utime((__force char __user *)de->name, de->mtime);
59034 kfree(de->name);
59035 kfree(de);
59036 }
59037@@ -271,7 +271,7 @@ static int __init maybe_link(void)
59038 if (nlink >= 2) {
59039 char *old = find_link(major, minor, ino, mode, collected);
59040 if (old)
59041- return (sys_link(old, collected) < 0) ? -1 : 1;
59042+ return (sys_link((__force char __user *)old, (__force char __user *)collected) < 0) ? -1 : 1;
59043 }
59044 return 0;
59045 }
59046@@ -280,11 +280,11 @@ static void __init clean_path(char *path
59047 {
59048 struct stat st;
59049
59050- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
59051+ if (!sys_newlstat((__force char __user *)path, (__force struct stat __user *)&st) && (st.st_mode^mode) & S_IFMT) {
59052 if (S_ISDIR(st.st_mode))
59053- sys_rmdir(path);
59054+ sys_rmdir((__force char __user *)path);
59055 else
59056- sys_unlink(path);
59057+ sys_unlink((__force char __user *)path);
59058 }
59059 }
59060
59061@@ -305,7 +305,7 @@ static int __init do_name(void)
59062 int openflags = O_WRONLY|O_CREAT;
59063 if (ml != 1)
59064 openflags |= O_TRUNC;
59065- wfd = sys_open(collected, openflags, mode);
59066+ wfd = sys_open((__force char __user *)collected, openflags, mode);
59067
59068 if (wfd >= 0) {
59069 sys_fchown(wfd, uid, gid);
59070@@ -317,17 +317,17 @@ static int __init do_name(void)
59071 }
59072 }
59073 } else if (S_ISDIR(mode)) {
59074- sys_mkdir(collected, mode);
59075- sys_chown(collected, uid, gid);
59076- sys_chmod(collected, mode);
59077+ sys_mkdir((__force char __user *)collected, mode);
59078+ sys_chown((__force char __user *)collected, uid, gid);
59079+ sys_chmod((__force char __user *)collected, mode);
59080 dir_add(collected, mtime);
59081 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
59082 S_ISFIFO(mode) || S_ISSOCK(mode)) {
59083 if (maybe_link() == 0) {
59084- sys_mknod(collected, mode, rdev);
59085- sys_chown(collected, uid, gid);
59086- sys_chmod(collected, mode);
59087- do_utime(collected, mtime);
59088+ sys_mknod((__force char __user *)collected, mode, rdev);
59089+ sys_chown((__force char __user *)collected, uid, gid);
59090+ sys_chmod((__force char __user *)collected, mode);
59091+ do_utime((__force char __user *)collected, mtime);
59092 }
59093 }
59094 return 0;
59095@@ -336,15 +336,15 @@ static int __init do_name(void)
59096 static int __init do_copy(void)
59097 {
59098 if (count >= body_len) {
59099- sys_write(wfd, victim, body_len);
59100+ sys_write(wfd, (__force char __user *)victim, body_len);
59101 sys_close(wfd);
59102- do_utime(vcollected, mtime);
59103+ do_utime((__force char __user *)vcollected, mtime);
59104 kfree(vcollected);
59105 eat(body_len);
59106 state = SkipIt;
59107 return 0;
59108 } else {
59109- sys_write(wfd, victim, count);
59110+ sys_write(wfd, (__force char __user *)victim, count);
59111 body_len -= count;
59112 eat(count);
59113 return 1;
59114@@ -355,9 +355,9 @@ static int __init do_symlink(void)
59115 {
59116 collected[N_ALIGN(name_len) + body_len] = '\0';
59117 clean_path(collected, 0);
59118- sys_symlink(collected + N_ALIGN(name_len), collected);
59119- sys_lchown(collected, uid, gid);
59120- do_utime(collected, mtime);
59121+ sys_symlink((__force char __user *)collected + N_ALIGN(name_len), (__force char __user *)collected);
59122+ sys_lchown((__force char __user *)collected, uid, gid);
59123+ do_utime((__force char __user *)collected, mtime);
59124 state = SkipIt;
59125 next_state = Reset;
59126 return 0;
59127diff -urNp linux-2.6.32.42/init/Kconfig linux-2.6.32.42/init/Kconfig
59128--- linux-2.6.32.42/init/Kconfig 2011-05-10 22:12:01.000000000 -0400
59129+++ linux-2.6.32.42/init/Kconfig 2011-05-10 22:12:34.000000000 -0400
59130@@ -1004,7 +1004,7 @@ config SLUB_DEBUG
59131
59132 config COMPAT_BRK
59133 bool "Disable heap randomization"
59134- default y
59135+ default n
59136 help
59137 Randomizing heap placement makes heap exploits harder, but it
59138 also breaks ancient binaries (including anything libc5 based).
59139diff -urNp linux-2.6.32.42/init/main.c linux-2.6.32.42/init/main.c
59140--- linux-2.6.32.42/init/main.c 2011-05-10 22:12:01.000000000 -0400
59141+++ linux-2.6.32.42/init/main.c 2011-05-22 23:02:06.000000000 -0400
59142@@ -97,6 +97,7 @@ static inline void mark_rodata_ro(void)
59143 #ifdef CONFIG_TC
59144 extern void tc_init(void);
59145 #endif
59146+extern void grsecurity_init(void);
59147
59148 enum system_states system_state __read_mostly;
59149 EXPORT_SYMBOL(system_state);
59150@@ -183,6 +184,49 @@ static int __init set_reset_devices(char
59151
59152 __setup("reset_devices", set_reset_devices);
59153
59154+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
59155+extern char pax_enter_kernel_user[];
59156+extern char pax_exit_kernel_user[];
59157+extern pgdval_t clone_pgd_mask;
59158+#endif
59159+
59160+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
59161+static int __init setup_pax_nouderef(char *str)
59162+{
59163+#ifdef CONFIG_X86_32
59164+ unsigned int cpu;
59165+ struct desc_struct *gdt;
59166+
59167+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
59168+ gdt = get_cpu_gdt_table(cpu);
59169+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
59170+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
59171+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
59172+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
59173+ }
59174+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
59175+#else
59176+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
59177+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
59178+ clone_pgd_mask = ~(pgdval_t)0UL;
59179+#endif
59180+
59181+ return 0;
59182+}
59183+early_param("pax_nouderef", setup_pax_nouderef);
59184+#endif
59185+
59186+#ifdef CONFIG_PAX_SOFTMODE
59187+unsigned int pax_softmode;
59188+
59189+static int __init setup_pax_softmode(char *str)
59190+{
59191+ get_option(&str, &pax_softmode);
59192+ return 1;
59193+}
59194+__setup("pax_softmode=", setup_pax_softmode);
59195+#endif
59196+
59197 static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
59198 char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
59199 static const char *panic_later, *panic_param;
59200@@ -705,52 +749,53 @@ int initcall_debug;
59201 core_param(initcall_debug, initcall_debug, bool, 0644);
59202
59203 static char msgbuf[64];
59204-static struct boot_trace_call call;
59205-static struct boot_trace_ret ret;
59206+static struct boot_trace_call trace_call;
59207+static struct boot_trace_ret trace_ret;
59208
59209 int do_one_initcall(initcall_t fn)
59210 {
59211 int count = preempt_count();
59212 ktime_t calltime, delta, rettime;
59213+ const char *msg1 = "", *msg2 = "";
59214
59215 if (initcall_debug) {
59216- call.caller = task_pid_nr(current);
59217- printk("calling %pF @ %i\n", fn, call.caller);
59218+ trace_call.caller = task_pid_nr(current);
59219+ printk("calling %pF @ %i\n", fn, trace_call.caller);
59220 calltime = ktime_get();
59221- trace_boot_call(&call, fn);
59222+ trace_boot_call(&trace_call, fn);
59223 enable_boot_trace();
59224 }
59225
59226- ret.result = fn();
59227+ trace_ret.result = fn();
59228
59229 if (initcall_debug) {
59230 disable_boot_trace();
59231 rettime = ktime_get();
59232 delta = ktime_sub(rettime, calltime);
59233- ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
59234- trace_boot_ret(&ret, fn);
59235+ trace_ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
59236+ trace_boot_ret(&trace_ret, fn);
59237 printk("initcall %pF returned %d after %Ld usecs\n", fn,
59238- ret.result, ret.duration);
59239+ trace_ret.result, trace_ret.duration);
59240 }
59241
59242 msgbuf[0] = 0;
59243
59244- if (ret.result && ret.result != -ENODEV && initcall_debug)
59245- sprintf(msgbuf, "error code %d ", ret.result);
59246+ if (trace_ret.result && trace_ret.result != -ENODEV && initcall_debug)
59247+ sprintf(msgbuf, "error code %d ", trace_ret.result);
59248
59249 if (preempt_count() != count) {
59250- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
59251+ msg1 = " preemption imbalance";
59252 preempt_count() = count;
59253 }
59254 if (irqs_disabled()) {
59255- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
59256+ msg2 = " disabled interrupts";
59257 local_irq_enable();
59258 }
59259- if (msgbuf[0]) {
59260- printk("initcall %pF returned with %s\n", fn, msgbuf);
59261+ if (msgbuf[0] || *msg1 || *msg2) {
59262+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
59263 }
59264
59265- return ret.result;
59266+ return trace_ret.result;
59267 }
59268
59269
59270@@ -893,11 +938,13 @@ static int __init kernel_init(void * unu
59271 if (!ramdisk_execute_command)
59272 ramdisk_execute_command = "/init";
59273
59274- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
59275+ if (sys_access((__force const char __user *) ramdisk_execute_command, 0) != 0) {
59276 ramdisk_execute_command = NULL;
59277 prepare_namespace();
59278 }
59279
59280+ grsecurity_init();
59281+
59282 /*
59283 * Ok, we have completed the initial bootup, and
59284 * we're essentially up and running. Get rid of the
59285diff -urNp linux-2.6.32.42/init/noinitramfs.c linux-2.6.32.42/init/noinitramfs.c
59286--- linux-2.6.32.42/init/noinitramfs.c 2011-03-27 14:31:47.000000000 -0400
59287+++ linux-2.6.32.42/init/noinitramfs.c 2011-04-17 15:56:46.000000000 -0400
59288@@ -29,7 +29,7 @@ static int __init default_rootfs(void)
59289 {
59290 int err;
59291
59292- err = sys_mkdir("/dev", 0755);
59293+ err = sys_mkdir((const char __user *)"/dev", 0755);
59294 if (err < 0)
59295 goto out;
59296
59297@@ -39,7 +39,7 @@ static int __init default_rootfs(void)
59298 if (err < 0)
59299 goto out;
59300
59301- err = sys_mkdir("/root", 0700);
59302+ err = sys_mkdir((const char __user *)"/root", 0700);
59303 if (err < 0)
59304 goto out;
59305
59306diff -urNp linux-2.6.32.42/ipc/mqueue.c linux-2.6.32.42/ipc/mqueue.c
59307--- linux-2.6.32.42/ipc/mqueue.c 2011-03-27 14:31:47.000000000 -0400
59308+++ linux-2.6.32.42/ipc/mqueue.c 2011-04-17 15:56:46.000000000 -0400
59309@@ -150,6 +150,7 @@ static struct inode *mqueue_get_inode(st
59310 mq_bytes = (mq_msg_tblsz +
59311 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
59312
59313+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
59314 spin_lock(&mq_lock);
59315 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
59316 u->mq_bytes + mq_bytes >
59317diff -urNp linux-2.6.32.42/ipc/sem.c linux-2.6.32.42/ipc/sem.c
59318--- linux-2.6.32.42/ipc/sem.c 2011-03-27 14:31:47.000000000 -0400
59319+++ linux-2.6.32.42/ipc/sem.c 2011-05-16 21:46:57.000000000 -0400
59320@@ -671,6 +671,8 @@ static int semctl_main(struct ipc_namesp
59321 ushort* sem_io = fast_sem_io;
59322 int nsems;
59323
59324+ pax_track_stack();
59325+
59326 sma = sem_lock_check(ns, semid);
59327 if (IS_ERR(sma))
59328 return PTR_ERR(sma);
59329@@ -1071,6 +1073,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
59330 unsigned long jiffies_left = 0;
59331 struct ipc_namespace *ns;
59332
59333+ pax_track_stack();
59334+
59335 ns = current->nsproxy->ipc_ns;
59336
59337 if (nsops < 1 || semid < 0)
59338diff -urNp linux-2.6.32.42/ipc/shm.c linux-2.6.32.42/ipc/shm.c
59339--- linux-2.6.32.42/ipc/shm.c 2011-03-27 14:31:47.000000000 -0400
59340+++ linux-2.6.32.42/ipc/shm.c 2011-04-17 15:56:46.000000000 -0400
59341@@ -70,6 +70,14 @@ static void shm_destroy (struct ipc_name
59342 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
59343 #endif
59344
59345+#ifdef CONFIG_GRKERNSEC
59346+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
59347+ const time_t shm_createtime, const uid_t cuid,
59348+ const int shmid);
59349+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
59350+ const time_t shm_createtime);
59351+#endif
59352+
59353 void shm_init_ns(struct ipc_namespace *ns)
59354 {
59355 ns->shm_ctlmax = SHMMAX;
59356@@ -396,6 +404,14 @@ static int newseg(struct ipc_namespace *
59357 shp->shm_lprid = 0;
59358 shp->shm_atim = shp->shm_dtim = 0;
59359 shp->shm_ctim = get_seconds();
59360+#ifdef CONFIG_GRKERNSEC
59361+ {
59362+ struct timespec timeval;
59363+ do_posix_clock_monotonic_gettime(&timeval);
59364+
59365+ shp->shm_createtime = timeval.tv_sec;
59366+ }
59367+#endif
59368 shp->shm_segsz = size;
59369 shp->shm_nattch = 0;
59370 shp->shm_file = file;
59371@@ -880,9 +896,21 @@ long do_shmat(int shmid, char __user *sh
59372 if (err)
59373 goto out_unlock;
59374
59375+#ifdef CONFIG_GRKERNSEC
59376+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
59377+ shp->shm_perm.cuid, shmid) ||
59378+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
59379+ err = -EACCES;
59380+ goto out_unlock;
59381+ }
59382+#endif
59383+
59384 path.dentry = dget(shp->shm_file->f_path.dentry);
59385 path.mnt = shp->shm_file->f_path.mnt;
59386 shp->shm_nattch++;
59387+#ifdef CONFIG_GRKERNSEC
59388+ shp->shm_lapid = current->pid;
59389+#endif
59390 size = i_size_read(path.dentry->d_inode);
59391 shm_unlock(shp);
59392
59393diff -urNp linux-2.6.32.42/kernel/acct.c linux-2.6.32.42/kernel/acct.c
59394--- linux-2.6.32.42/kernel/acct.c 2011-03-27 14:31:47.000000000 -0400
59395+++ linux-2.6.32.42/kernel/acct.c 2011-04-17 15:56:46.000000000 -0400
59396@@ -579,7 +579,7 @@ static void do_acct_process(struct bsd_a
59397 */
59398 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
59399 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
59400- file->f_op->write(file, (char *)&ac,
59401+ file->f_op->write(file, (__force char __user *)&ac,
59402 sizeof(acct_t), &file->f_pos);
59403 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
59404 set_fs(fs);
59405diff -urNp linux-2.6.32.42/kernel/audit.c linux-2.6.32.42/kernel/audit.c
59406--- linux-2.6.32.42/kernel/audit.c 2011-03-27 14:31:47.000000000 -0400
59407+++ linux-2.6.32.42/kernel/audit.c 2011-05-04 17:56:28.000000000 -0400
59408@@ -110,7 +110,7 @@ u32 audit_sig_sid = 0;
59409 3) suppressed due to audit_rate_limit
59410 4) suppressed due to audit_backlog_limit
59411 */
59412-static atomic_t audit_lost = ATOMIC_INIT(0);
59413+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
59414
59415 /* The netlink socket. */
59416 static struct sock *audit_sock;
59417@@ -232,7 +232,7 @@ void audit_log_lost(const char *message)
59418 unsigned long now;
59419 int print;
59420
59421- atomic_inc(&audit_lost);
59422+ atomic_inc_unchecked(&audit_lost);
59423
59424 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
59425
59426@@ -251,7 +251,7 @@ void audit_log_lost(const char *message)
59427 printk(KERN_WARNING
59428 "audit: audit_lost=%d audit_rate_limit=%d "
59429 "audit_backlog_limit=%d\n",
59430- atomic_read(&audit_lost),
59431+ atomic_read_unchecked(&audit_lost),
59432 audit_rate_limit,
59433 audit_backlog_limit);
59434 audit_panic(message);
59435@@ -691,7 +691,7 @@ static int audit_receive_msg(struct sk_b
59436 status_set.pid = audit_pid;
59437 status_set.rate_limit = audit_rate_limit;
59438 status_set.backlog_limit = audit_backlog_limit;
59439- status_set.lost = atomic_read(&audit_lost);
59440+ status_set.lost = atomic_read_unchecked(&audit_lost);
59441 status_set.backlog = skb_queue_len(&audit_skb_queue);
59442 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
59443 &status_set, sizeof(status_set));
59444@@ -891,8 +891,10 @@ static int audit_receive_msg(struct sk_b
59445 spin_unlock_irq(&tsk->sighand->siglock);
59446 }
59447 read_unlock(&tasklist_lock);
59448- audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_TTY_GET, 0, 0,
59449- &s, sizeof(s));
59450+
59451+ if (!err)
59452+ audit_send_reply(NETLINK_CB(skb).pid, seq,
59453+ AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
59454 break;
59455 }
59456 case AUDIT_TTY_SET: {
59457diff -urNp linux-2.6.32.42/kernel/auditsc.c linux-2.6.32.42/kernel/auditsc.c
59458--- linux-2.6.32.42/kernel/auditsc.c 2011-03-27 14:31:47.000000000 -0400
59459+++ linux-2.6.32.42/kernel/auditsc.c 2011-05-04 17:56:28.000000000 -0400
59460@@ -2113,7 +2113,7 @@ int auditsc_get_stamp(struct audit_conte
59461 }
59462
59463 /* global counter which is incremented every time something logs in */
59464-static atomic_t session_id = ATOMIC_INIT(0);
59465+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
59466
59467 /**
59468 * audit_set_loginuid - set a task's audit_context loginuid
59469@@ -2126,7 +2126,7 @@ static atomic_t session_id = ATOMIC_INIT
59470 */
59471 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
59472 {
59473- unsigned int sessionid = atomic_inc_return(&session_id);
59474+ unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
59475 struct audit_context *context = task->audit_context;
59476
59477 if (context && context->in_syscall) {
59478diff -urNp linux-2.6.32.42/kernel/capability.c linux-2.6.32.42/kernel/capability.c
59479--- linux-2.6.32.42/kernel/capability.c 2011-03-27 14:31:47.000000000 -0400
59480+++ linux-2.6.32.42/kernel/capability.c 2011-04-17 15:56:46.000000000 -0400
59481@@ -305,10 +305,26 @@ int capable(int cap)
59482 BUG();
59483 }
59484
59485- if (security_capable(cap) == 0) {
59486+ if (security_capable(cap) == 0 && gr_is_capable(cap)) {
59487 current->flags |= PF_SUPERPRIV;
59488 return 1;
59489 }
59490 return 0;
59491 }
59492+
59493+int capable_nolog(int cap)
59494+{
59495+ if (unlikely(!cap_valid(cap))) {
59496+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
59497+ BUG();
59498+ }
59499+
59500+ if (security_capable(cap) == 0 && gr_is_capable_nolog(cap)) {
59501+ current->flags |= PF_SUPERPRIV;
59502+ return 1;
59503+ }
59504+ return 0;
59505+}
59506+
59507 EXPORT_SYMBOL(capable);
59508+EXPORT_SYMBOL(capable_nolog);
59509diff -urNp linux-2.6.32.42/kernel/cgroup.c linux-2.6.32.42/kernel/cgroup.c
59510--- linux-2.6.32.42/kernel/cgroup.c 2011-03-27 14:31:47.000000000 -0400
59511+++ linux-2.6.32.42/kernel/cgroup.c 2011-05-16 21:46:57.000000000 -0400
59512@@ -536,6 +536,8 @@ static struct css_set *find_css_set(
59513 struct hlist_head *hhead;
59514 struct cg_cgroup_link *link;
59515
59516+ pax_track_stack();
59517+
59518 /* First see if we already have a cgroup group that matches
59519 * the desired set */
59520 read_lock(&css_set_lock);
59521diff -urNp linux-2.6.32.42/kernel/configs.c linux-2.6.32.42/kernel/configs.c
59522--- linux-2.6.32.42/kernel/configs.c 2011-03-27 14:31:47.000000000 -0400
59523+++ linux-2.6.32.42/kernel/configs.c 2011-04-17 15:56:46.000000000 -0400
59524@@ -73,8 +73,19 @@ static int __init ikconfig_init(void)
59525 struct proc_dir_entry *entry;
59526
59527 /* create the current config file */
59528+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
59529+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
59530+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
59531+ &ikconfig_file_ops);
59532+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59533+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
59534+ &ikconfig_file_ops);
59535+#endif
59536+#else
59537 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
59538 &ikconfig_file_ops);
59539+#endif
59540+
59541 if (!entry)
59542 return -ENOMEM;
59543
59544diff -urNp linux-2.6.32.42/kernel/cpu.c linux-2.6.32.42/kernel/cpu.c
59545--- linux-2.6.32.42/kernel/cpu.c 2011-03-27 14:31:47.000000000 -0400
59546+++ linux-2.6.32.42/kernel/cpu.c 2011-04-17 15:56:46.000000000 -0400
59547@@ -19,7 +19,7 @@
59548 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
59549 static DEFINE_MUTEX(cpu_add_remove_lock);
59550
59551-static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
59552+static RAW_NOTIFIER_HEAD(cpu_chain);
59553
59554 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
59555 * Should always be manipulated under cpu_add_remove_lock
59556diff -urNp linux-2.6.32.42/kernel/cred.c linux-2.6.32.42/kernel/cred.c
59557--- linux-2.6.32.42/kernel/cred.c 2011-03-27 14:31:47.000000000 -0400
59558+++ linux-2.6.32.42/kernel/cred.c 2011-05-17 19:26:34.000000000 -0400
59559@@ -160,6 +160,8 @@ static void put_cred_rcu(struct rcu_head
59560 */
59561 void __put_cred(struct cred *cred)
59562 {
59563+ pax_track_stack();
59564+
59565 kdebug("__put_cred(%p{%d,%d})", cred,
59566 atomic_read(&cred->usage),
59567 read_cred_subscribers(cred));
59568@@ -184,6 +186,8 @@ void exit_creds(struct task_struct *tsk)
59569 {
59570 struct cred *cred;
59571
59572+ pax_track_stack();
59573+
59574 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
59575 atomic_read(&tsk->cred->usage),
59576 read_cred_subscribers(tsk->cred));
59577@@ -222,6 +226,8 @@ const struct cred *get_task_cred(struct
59578 {
59579 const struct cred *cred;
59580
59581+ pax_track_stack();
59582+
59583 rcu_read_lock();
59584
59585 do {
59586@@ -241,6 +247,8 @@ struct cred *cred_alloc_blank(void)
59587 {
59588 struct cred *new;
59589
59590+ pax_track_stack();
59591+
59592 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
59593 if (!new)
59594 return NULL;
59595@@ -289,6 +297,8 @@ struct cred *prepare_creds(void)
59596 const struct cred *old;
59597 struct cred *new;
59598
59599+ pax_track_stack();
59600+
59601 validate_process_creds();
59602
59603 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
59604@@ -335,6 +345,8 @@ struct cred *prepare_exec_creds(void)
59605 struct thread_group_cred *tgcred = NULL;
59606 struct cred *new;
59607
59608+ pax_track_stack();
59609+
59610 #ifdef CONFIG_KEYS
59611 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
59612 if (!tgcred)
59613@@ -441,6 +453,8 @@ int copy_creds(struct task_struct *p, un
59614 struct cred *new;
59615 int ret;
59616
59617+ pax_track_stack();
59618+
59619 mutex_init(&p->cred_guard_mutex);
59620
59621 if (
59622@@ -528,6 +542,8 @@ int commit_creds(struct cred *new)
59623 struct task_struct *task = current;
59624 const struct cred *old = task->real_cred;
59625
59626+ pax_track_stack();
59627+
59628 kdebug("commit_creds(%p{%d,%d})", new,
59629 atomic_read(&new->usage),
59630 read_cred_subscribers(new));
59631@@ -544,6 +560,8 @@ int commit_creds(struct cred *new)
59632
59633 get_cred(new); /* we will require a ref for the subj creds too */
59634
59635+ gr_set_role_label(task, new->uid, new->gid);
59636+
59637 /* dumpability changes */
59638 if (old->euid != new->euid ||
59639 old->egid != new->egid ||
59640@@ -606,6 +624,8 @@ EXPORT_SYMBOL(commit_creds);
59641 */
59642 void abort_creds(struct cred *new)
59643 {
59644+ pax_track_stack();
59645+
59646 kdebug("abort_creds(%p{%d,%d})", new,
59647 atomic_read(&new->usage),
59648 read_cred_subscribers(new));
59649@@ -629,6 +649,8 @@ const struct cred *override_creds(const
59650 {
59651 const struct cred *old = current->cred;
59652
59653+ pax_track_stack();
59654+
59655 kdebug("override_creds(%p{%d,%d})", new,
59656 atomic_read(&new->usage),
59657 read_cred_subscribers(new));
59658@@ -658,6 +680,8 @@ void revert_creds(const struct cred *old
59659 {
59660 const struct cred *override = current->cred;
59661
59662+ pax_track_stack();
59663+
59664 kdebug("revert_creds(%p{%d,%d})", old,
59665 atomic_read(&old->usage),
59666 read_cred_subscribers(old));
59667@@ -704,6 +728,8 @@ struct cred *prepare_kernel_cred(struct
59668 const struct cred *old;
59669 struct cred *new;
59670
59671+ pax_track_stack();
59672+
59673 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
59674 if (!new)
59675 return NULL;
59676@@ -758,6 +784,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
59677 */
59678 int set_security_override(struct cred *new, u32 secid)
59679 {
59680+ pax_track_stack();
59681+
59682 return security_kernel_act_as(new, secid);
59683 }
59684 EXPORT_SYMBOL(set_security_override);
59685@@ -777,6 +805,8 @@ int set_security_override_from_ctx(struc
59686 u32 secid;
59687 int ret;
59688
59689+ pax_track_stack();
59690+
59691 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
59692 if (ret < 0)
59693 return ret;
59694diff -urNp linux-2.6.32.42/kernel/exit.c linux-2.6.32.42/kernel/exit.c
59695--- linux-2.6.32.42/kernel/exit.c 2011-03-27 14:31:47.000000000 -0400
59696+++ linux-2.6.32.42/kernel/exit.c 2011-04-17 15:56:46.000000000 -0400
59697@@ -55,6 +55,10 @@
59698 #include <asm/pgtable.h>
59699 #include <asm/mmu_context.h>
59700
59701+#ifdef CONFIG_GRKERNSEC
59702+extern rwlock_t grsec_exec_file_lock;
59703+#endif
59704+
59705 static void exit_mm(struct task_struct * tsk);
59706
59707 static void __unhash_process(struct task_struct *p)
59708@@ -174,6 +178,8 @@ void release_task(struct task_struct * p
59709 struct task_struct *leader;
59710 int zap_leader;
59711 repeat:
59712+ gr_del_task_from_ip_table(p);
59713+
59714 tracehook_prepare_release_task(p);
59715 /* don't need to get the RCU readlock here - the process is dead and
59716 * can't be modifying its own credentials */
59717@@ -341,11 +347,22 @@ static void reparent_to_kthreadd(void)
59718 {
59719 write_lock_irq(&tasklist_lock);
59720
59721+#ifdef CONFIG_GRKERNSEC
59722+ write_lock(&grsec_exec_file_lock);
59723+ if (current->exec_file) {
59724+ fput(current->exec_file);
59725+ current->exec_file = NULL;
59726+ }
59727+ write_unlock(&grsec_exec_file_lock);
59728+#endif
59729+
59730 ptrace_unlink(current);
59731 /* Reparent to init */
59732 current->real_parent = current->parent = kthreadd_task;
59733 list_move_tail(&current->sibling, &current->real_parent->children);
59734
59735+ gr_set_kernel_label(current);
59736+
59737 /* Set the exit signal to SIGCHLD so we signal init on exit */
59738 current->exit_signal = SIGCHLD;
59739
59740@@ -397,7 +414,7 @@ int allow_signal(int sig)
59741 * know it'll be handled, so that they don't get converted to
59742 * SIGKILL or just silently dropped.
59743 */
59744- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
59745+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
59746 recalc_sigpending();
59747 spin_unlock_irq(&current->sighand->siglock);
59748 return 0;
59749@@ -433,6 +450,17 @@ void daemonize(const char *name, ...)
59750 vsnprintf(current->comm, sizeof(current->comm), name, args);
59751 va_end(args);
59752
59753+#ifdef CONFIG_GRKERNSEC
59754+ write_lock(&grsec_exec_file_lock);
59755+ if (current->exec_file) {
59756+ fput(current->exec_file);
59757+ current->exec_file = NULL;
59758+ }
59759+ write_unlock(&grsec_exec_file_lock);
59760+#endif
59761+
59762+ gr_set_kernel_label(current);
59763+
59764 /*
59765 * If we were started as result of loading a module, close all of the
59766 * user space pages. We don't need them, and if we didn't close them
59767@@ -897,17 +925,17 @@ NORET_TYPE void do_exit(long code)
59768 struct task_struct *tsk = current;
59769 int group_dead;
59770
59771- profile_task_exit(tsk);
59772-
59773- WARN_ON(atomic_read(&tsk->fs_excl));
59774-
59775+ /*
59776+ * Check this first since set_fs() below depends on
59777+ * current_thread_info(), which we better not access when we're in
59778+ * interrupt context. Other than that, we want to do the set_fs()
59779+ * as early as possible.
59780+ */
59781 if (unlikely(in_interrupt()))
59782 panic("Aiee, killing interrupt handler!");
59783- if (unlikely(!tsk->pid))
59784- panic("Attempted to kill the idle task!");
59785
59786 /*
59787- * If do_exit is called because this processes oopsed, it's possible
59788+ * If do_exit is called because this processes Oops'ed, it's possible
59789 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
59790 * continuing. Amongst other possible reasons, this is to prevent
59791 * mm_release()->clear_child_tid() from writing to a user-controlled
59792@@ -915,6 +943,13 @@ NORET_TYPE void do_exit(long code)
59793 */
59794 set_fs(USER_DS);
59795
59796+ profile_task_exit(tsk);
59797+
59798+ WARN_ON(atomic_read(&tsk->fs_excl));
59799+
59800+ if (unlikely(!tsk->pid))
59801+ panic("Attempted to kill the idle task!");
59802+
59803 tracehook_report_exit(&code);
59804
59805 validate_creds_for_do_exit(tsk);
59806@@ -973,6 +1008,9 @@ NORET_TYPE void do_exit(long code)
59807 tsk->exit_code = code;
59808 taskstats_exit(tsk, group_dead);
59809
59810+ gr_acl_handle_psacct(tsk, code);
59811+ gr_acl_handle_exit();
59812+
59813 exit_mm(tsk);
59814
59815 if (group_dead)
59816@@ -1188,7 +1226,7 @@ static int wait_task_zombie(struct wait_
59817
59818 if (unlikely(wo->wo_flags & WNOWAIT)) {
59819 int exit_code = p->exit_code;
59820- int why, status;
59821+ int why;
59822
59823 get_task_struct(p);
59824 read_unlock(&tasklist_lock);
59825diff -urNp linux-2.6.32.42/kernel/fork.c linux-2.6.32.42/kernel/fork.c
59826--- linux-2.6.32.42/kernel/fork.c 2011-03-27 14:31:47.000000000 -0400
59827+++ linux-2.6.32.42/kernel/fork.c 2011-04-17 15:56:46.000000000 -0400
59828@@ -253,7 +253,7 @@ static struct task_struct *dup_task_stru
59829 *stackend = STACK_END_MAGIC; /* for overflow detection */
59830
59831 #ifdef CONFIG_CC_STACKPROTECTOR
59832- tsk->stack_canary = get_random_int();
59833+ tsk->stack_canary = pax_get_random_long();
59834 #endif
59835
59836 /* One for us, one for whoever does the "release_task()" (usually parent) */
59837@@ -293,8 +293,8 @@ static int dup_mmap(struct mm_struct *mm
59838 mm->locked_vm = 0;
59839 mm->mmap = NULL;
59840 mm->mmap_cache = NULL;
59841- mm->free_area_cache = oldmm->mmap_base;
59842- mm->cached_hole_size = ~0UL;
59843+ mm->free_area_cache = oldmm->free_area_cache;
59844+ mm->cached_hole_size = oldmm->cached_hole_size;
59845 mm->map_count = 0;
59846 cpumask_clear(mm_cpumask(mm));
59847 mm->mm_rb = RB_ROOT;
59848@@ -335,6 +335,7 @@ static int dup_mmap(struct mm_struct *mm
59849 tmp->vm_flags &= ~VM_LOCKED;
59850 tmp->vm_mm = mm;
59851 tmp->vm_next = tmp->vm_prev = NULL;
59852+ tmp->vm_mirror = NULL;
59853 anon_vma_link(tmp);
59854 file = tmp->vm_file;
59855 if (file) {
59856@@ -384,6 +385,31 @@ static int dup_mmap(struct mm_struct *mm
59857 if (retval)
59858 goto out;
59859 }
59860+
59861+#ifdef CONFIG_PAX_SEGMEXEC
59862+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
59863+ struct vm_area_struct *mpnt_m;
59864+
59865+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
59866+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
59867+
59868+ if (!mpnt->vm_mirror)
59869+ continue;
59870+
59871+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
59872+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
59873+ mpnt->vm_mirror = mpnt_m;
59874+ } else {
59875+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
59876+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
59877+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
59878+ mpnt->vm_mirror->vm_mirror = mpnt;
59879+ }
59880+ }
59881+ BUG_ON(mpnt_m);
59882+ }
59883+#endif
59884+
59885 /* a new mm has just been created */
59886 arch_dup_mmap(oldmm, mm);
59887 retval = 0;
59888@@ -734,13 +760,14 @@ static int copy_fs(unsigned long clone_f
59889 write_unlock(&fs->lock);
59890 return -EAGAIN;
59891 }
59892- fs->users++;
59893+ atomic_inc(&fs->users);
59894 write_unlock(&fs->lock);
59895 return 0;
59896 }
59897 tsk->fs = copy_fs_struct(fs);
59898 if (!tsk->fs)
59899 return -ENOMEM;
59900+ gr_set_chroot_entries(tsk, &tsk->fs->root);
59901 return 0;
59902 }
59903
59904@@ -1033,10 +1060,13 @@ static struct task_struct *copy_process(
59905 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
59906 #endif
59907 retval = -EAGAIN;
59908+
59909+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
59910+
59911 if (atomic_read(&p->real_cred->user->processes) >=
59912 p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
59913- if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
59914- p->real_cred->user != INIT_USER)
59915+ if (p->real_cred->user != INIT_USER &&
59916+ !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
59917 goto bad_fork_free;
59918 }
59919
59920@@ -1183,6 +1213,8 @@ static struct task_struct *copy_process(
59921 goto bad_fork_free_pid;
59922 }
59923
59924+ gr_copy_label(p);
59925+
59926 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
59927 /*
59928 * Clear TID on mm_release()?
59929@@ -1333,6 +1365,8 @@ bad_fork_cleanup_count:
59930 bad_fork_free:
59931 free_task(p);
59932 fork_out:
59933+ gr_log_forkfail(retval);
59934+
59935 return ERR_PTR(retval);
59936 }
59937
59938@@ -1426,6 +1460,8 @@ long do_fork(unsigned long clone_flags,
59939 if (clone_flags & CLONE_PARENT_SETTID)
59940 put_user(nr, parent_tidptr);
59941
59942+ gr_handle_brute_check();
59943+
59944 if (clone_flags & CLONE_VFORK) {
59945 p->vfork_done = &vfork;
59946 init_completion(&vfork);
59947@@ -1558,7 +1594,7 @@ static int unshare_fs(unsigned long unsh
59948 return 0;
59949
59950 /* don't need lock here; in the worst case we'll do useless copy */
59951- if (fs->users == 1)
59952+ if (atomic_read(&fs->users) == 1)
59953 return 0;
59954
59955 *new_fsp = copy_fs_struct(fs);
59956@@ -1681,7 +1717,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
59957 fs = current->fs;
59958 write_lock(&fs->lock);
59959 current->fs = new_fs;
59960- if (--fs->users)
59961+ gr_set_chroot_entries(current, &current->fs->root);
59962+ if (atomic_dec_return(&fs->users))
59963 new_fs = NULL;
59964 else
59965 new_fs = fs;
59966diff -urNp linux-2.6.32.42/kernel/futex.c linux-2.6.32.42/kernel/futex.c
59967--- linux-2.6.32.42/kernel/futex.c 2011-03-27 14:31:47.000000000 -0400
59968+++ linux-2.6.32.42/kernel/futex.c 2011-05-16 21:46:57.000000000 -0400
59969@@ -54,6 +54,7 @@
59970 #include <linux/mount.h>
59971 #include <linux/pagemap.h>
59972 #include <linux/syscalls.h>
59973+#include <linux/ptrace.h>
59974 #include <linux/signal.h>
59975 #include <linux/module.h>
59976 #include <linux/magic.h>
59977@@ -221,6 +222,11 @@ get_futex_key(u32 __user *uaddr, int fsh
59978 struct page *page;
59979 int err;
59980
59981+#ifdef CONFIG_PAX_SEGMEXEC
59982+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
59983+ return -EFAULT;
59984+#endif
59985+
59986 /*
59987 * The futex address must be "naturally" aligned.
59988 */
59989@@ -1789,6 +1795,8 @@ static int futex_wait(u32 __user *uaddr,
59990 struct futex_q q;
59991 int ret;
59992
59993+ pax_track_stack();
59994+
59995 if (!bitset)
59996 return -EINVAL;
59997
59998@@ -1841,7 +1849,7 @@ retry:
59999
60000 restart = &current_thread_info()->restart_block;
60001 restart->fn = futex_wait_restart;
60002- restart->futex.uaddr = (u32 *)uaddr;
60003+ restart->futex.uaddr = uaddr;
60004 restart->futex.val = val;
60005 restart->futex.time = abs_time->tv64;
60006 restart->futex.bitset = bitset;
60007@@ -2203,6 +2211,8 @@ static int futex_wait_requeue_pi(u32 __u
60008 struct futex_q q;
60009 int res, ret;
60010
60011+ pax_track_stack();
60012+
60013 if (!bitset)
60014 return -EINVAL;
60015
60016@@ -2377,7 +2387,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
60017 {
60018 struct robust_list_head __user *head;
60019 unsigned long ret;
60020+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
60021 const struct cred *cred = current_cred(), *pcred;
60022+#endif
60023
60024 if (!futex_cmpxchg_enabled)
60025 return -ENOSYS;
60026@@ -2393,11 +2405,16 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
60027 if (!p)
60028 goto err_unlock;
60029 ret = -EPERM;
60030+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60031+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
60032+ goto err_unlock;
60033+#else
60034 pcred = __task_cred(p);
60035 if (cred->euid != pcred->euid &&
60036 cred->euid != pcred->uid &&
60037 !capable(CAP_SYS_PTRACE))
60038 goto err_unlock;
60039+#endif
60040 head = p->robust_list;
60041 rcu_read_unlock();
60042 }
60043@@ -2459,7 +2476,7 @@ retry:
60044 */
60045 static inline int fetch_robust_entry(struct robust_list __user **entry,
60046 struct robust_list __user * __user *head,
60047- int *pi)
60048+ unsigned int *pi)
60049 {
60050 unsigned long uentry;
60051
60052@@ -2640,6 +2657,7 @@ static int __init futex_init(void)
60053 {
60054 u32 curval;
60055 int i;
60056+ mm_segment_t oldfs;
60057
60058 /*
60059 * This will fail and we want it. Some arch implementations do
60060@@ -2651,7 +2669,10 @@ static int __init futex_init(void)
60061 * implementation, the non functional ones will return
60062 * -ENOSYS.
60063 */
60064+ oldfs = get_fs();
60065+ set_fs(USER_DS);
60066 curval = cmpxchg_futex_value_locked(NULL, 0, 0);
60067+ set_fs(oldfs);
60068 if (curval == -EFAULT)
60069 futex_cmpxchg_enabled = 1;
60070
60071diff -urNp linux-2.6.32.42/kernel/futex_compat.c linux-2.6.32.42/kernel/futex_compat.c
60072--- linux-2.6.32.42/kernel/futex_compat.c 2011-03-27 14:31:47.000000000 -0400
60073+++ linux-2.6.32.42/kernel/futex_compat.c 2011-04-17 15:56:46.000000000 -0400
60074@@ -10,6 +10,7 @@
60075 #include <linux/compat.h>
60076 #include <linux/nsproxy.h>
60077 #include <linux/futex.h>
60078+#include <linux/ptrace.h>
60079
60080 #include <asm/uaccess.h>
60081
60082@@ -135,7 +136,10 @@ compat_sys_get_robust_list(int pid, comp
60083 {
60084 struct compat_robust_list_head __user *head;
60085 unsigned long ret;
60086- const struct cred *cred = current_cred(), *pcred;
60087+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
60088+ const struct cred *cred = current_cred();
60089+ const struct cred *pcred;
60090+#endif
60091
60092 if (!futex_cmpxchg_enabled)
60093 return -ENOSYS;
60094@@ -151,11 +155,16 @@ compat_sys_get_robust_list(int pid, comp
60095 if (!p)
60096 goto err_unlock;
60097 ret = -EPERM;
60098+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60099+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
60100+ goto err_unlock;
60101+#else
60102 pcred = __task_cred(p);
60103 if (cred->euid != pcred->euid &&
60104 cred->euid != pcred->uid &&
60105 !capable(CAP_SYS_PTRACE))
60106 goto err_unlock;
60107+#endif
60108 head = p->compat_robust_list;
60109 read_unlock(&tasklist_lock);
60110 }
60111diff -urNp linux-2.6.32.42/kernel/gcov/base.c linux-2.6.32.42/kernel/gcov/base.c
60112--- linux-2.6.32.42/kernel/gcov/base.c 2011-03-27 14:31:47.000000000 -0400
60113+++ linux-2.6.32.42/kernel/gcov/base.c 2011-04-17 15:56:46.000000000 -0400
60114@@ -102,11 +102,6 @@ void gcov_enable_events(void)
60115 }
60116
60117 #ifdef CONFIG_MODULES
60118-static inline int within(void *addr, void *start, unsigned long size)
60119-{
60120- return ((addr >= start) && (addr < start + size));
60121-}
60122-
60123 /* Update list and generate events when modules are unloaded. */
60124 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
60125 void *data)
60126@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
60127 prev = NULL;
60128 /* Remove entries located in module from linked list. */
60129 for (info = gcov_info_head; info; info = info->next) {
60130- if (within(info, mod->module_core, mod->core_size)) {
60131+ if (within_module_core_rw((unsigned long)info, mod)) {
60132 if (prev)
60133 prev->next = info->next;
60134 else
60135diff -urNp linux-2.6.32.42/kernel/hrtimer.c linux-2.6.32.42/kernel/hrtimer.c
60136--- linux-2.6.32.42/kernel/hrtimer.c 2011-03-27 14:31:47.000000000 -0400
60137+++ linux-2.6.32.42/kernel/hrtimer.c 2011-04-17 15:56:46.000000000 -0400
60138@@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
60139 local_irq_restore(flags);
60140 }
60141
60142-static void run_hrtimer_softirq(struct softirq_action *h)
60143+static void run_hrtimer_softirq(void)
60144 {
60145 hrtimer_peek_ahead_timers();
60146 }
60147diff -urNp linux-2.6.32.42/kernel/kallsyms.c linux-2.6.32.42/kernel/kallsyms.c
60148--- linux-2.6.32.42/kernel/kallsyms.c 2011-03-27 14:31:47.000000000 -0400
60149+++ linux-2.6.32.42/kernel/kallsyms.c 2011-04-17 15:56:46.000000000 -0400
60150@@ -11,6 +11,9 @@
60151 * Changed the compression method from stem compression to "table lookup"
60152 * compression (see scripts/kallsyms.c for a more complete description)
60153 */
60154+#ifdef CONFIG_GRKERNSEC_HIDESYM
60155+#define __INCLUDED_BY_HIDESYM 1
60156+#endif
60157 #include <linux/kallsyms.h>
60158 #include <linux/module.h>
60159 #include <linux/init.h>
60160@@ -51,12 +54,33 @@ extern const unsigned long kallsyms_mark
60161
60162 static inline int is_kernel_inittext(unsigned long addr)
60163 {
60164+ if (system_state != SYSTEM_BOOTING)
60165+ return 0;
60166+
60167 if (addr >= (unsigned long)_sinittext
60168 && addr <= (unsigned long)_einittext)
60169 return 1;
60170 return 0;
60171 }
60172
60173+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
60174+#ifdef CONFIG_MODULES
60175+static inline int is_module_text(unsigned long addr)
60176+{
60177+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
60178+ return 1;
60179+
60180+ addr = ktla_ktva(addr);
60181+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
60182+}
60183+#else
60184+static inline int is_module_text(unsigned long addr)
60185+{
60186+ return 0;
60187+}
60188+#endif
60189+#endif
60190+
60191 static inline int is_kernel_text(unsigned long addr)
60192 {
60193 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
60194@@ -67,13 +91,28 @@ static inline int is_kernel_text(unsigne
60195
60196 static inline int is_kernel(unsigned long addr)
60197 {
60198+
60199+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
60200+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
60201+ return 1;
60202+
60203+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
60204+#else
60205 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
60206+#endif
60207+
60208 return 1;
60209 return in_gate_area_no_task(addr);
60210 }
60211
60212 static int is_ksym_addr(unsigned long addr)
60213 {
60214+
60215+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
60216+ if (is_module_text(addr))
60217+ return 0;
60218+#endif
60219+
60220 if (all_var)
60221 return is_kernel(addr);
60222
60223@@ -413,7 +452,6 @@ static unsigned long get_ksymbol_core(st
60224
60225 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
60226 {
60227- iter->name[0] = '\0';
60228 iter->nameoff = get_symbol_offset(new_pos);
60229 iter->pos = new_pos;
60230 }
60231@@ -461,6 +499,11 @@ static int s_show(struct seq_file *m, vo
60232 {
60233 struct kallsym_iter *iter = m->private;
60234
60235+#ifdef CONFIG_GRKERNSEC_HIDESYM
60236+ if (current_uid())
60237+ return 0;
60238+#endif
60239+
60240 /* Some debugging symbols have no name. Ignore them. */
60241 if (!iter->name[0])
60242 return 0;
60243@@ -501,7 +544,7 @@ static int kallsyms_open(struct inode *i
60244 struct kallsym_iter *iter;
60245 int ret;
60246
60247- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
60248+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
60249 if (!iter)
60250 return -ENOMEM;
60251 reset_iter(iter, 0);
60252diff -urNp linux-2.6.32.42/kernel/kgdb.c linux-2.6.32.42/kernel/kgdb.c
60253--- linux-2.6.32.42/kernel/kgdb.c 2011-04-17 17:00:52.000000000 -0400
60254+++ linux-2.6.32.42/kernel/kgdb.c 2011-05-04 17:56:20.000000000 -0400
60255@@ -86,7 +86,7 @@ static int kgdb_io_module_registered;
60256 /* Guard for recursive entry */
60257 static int exception_level;
60258
60259-static struct kgdb_io *kgdb_io_ops;
60260+static const struct kgdb_io *kgdb_io_ops;
60261 static DEFINE_SPINLOCK(kgdb_registration_lock);
60262
60263 /* kgdb console driver is loaded */
60264@@ -123,7 +123,7 @@ atomic_t kgdb_active = ATOMIC_INIT(-1)
60265 */
60266 static atomic_t passive_cpu_wait[NR_CPUS];
60267 static atomic_t cpu_in_kgdb[NR_CPUS];
60268-atomic_t kgdb_setting_breakpoint;
60269+atomic_unchecked_t kgdb_setting_breakpoint;
60270
60271 struct task_struct *kgdb_usethread;
60272 struct task_struct *kgdb_contthread;
60273@@ -140,7 +140,7 @@ static unsigned long gdb_regs[(NUMREGBY
60274 sizeof(unsigned long)];
60275
60276 /* to keep track of the CPU which is doing the single stepping*/
60277-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
60278+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
60279
60280 /*
60281 * If you are debugging a problem where roundup (the collection of
60282@@ -815,7 +815,7 @@ static int kgdb_io_ready(int print_wait)
60283 return 0;
60284 if (kgdb_connected)
60285 return 1;
60286- if (atomic_read(&kgdb_setting_breakpoint))
60287+ if (atomic_read_unchecked(&kgdb_setting_breakpoint))
60288 return 1;
60289 if (print_wait)
60290 printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
60291@@ -1426,8 +1426,8 @@ acquirelock:
60292 * instance of the exception handler wanted to come into the
60293 * debugger on a different CPU via a single step
60294 */
60295- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
60296- atomic_read(&kgdb_cpu_doing_single_step) != cpu) {
60297+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
60298+ atomic_read_unchecked(&kgdb_cpu_doing_single_step) != cpu) {
60299
60300 atomic_set(&kgdb_active, -1);
60301 touch_softlockup_watchdog();
60302@@ -1634,7 +1634,7 @@ static void kgdb_initial_breakpoint(void
60303 *
60304 * Register it with the KGDB core.
60305 */
60306-int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops)
60307+int kgdb_register_io_module(const struct kgdb_io *new_kgdb_io_ops)
60308 {
60309 int err;
60310
60311@@ -1679,7 +1679,7 @@ EXPORT_SYMBOL_GPL(kgdb_register_io_modul
60312 *
60313 * Unregister it with the KGDB core.
60314 */
60315-void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops)
60316+void kgdb_unregister_io_module(const struct kgdb_io *old_kgdb_io_ops)
60317 {
60318 BUG_ON(kgdb_connected);
60319
60320@@ -1712,11 +1712,11 @@ EXPORT_SYMBOL_GPL(kgdb_unregister_io_mod
60321 */
60322 void kgdb_breakpoint(void)
60323 {
60324- atomic_set(&kgdb_setting_breakpoint, 1);
60325+ atomic_set_unchecked(&kgdb_setting_breakpoint, 1);
60326 wmb(); /* Sync point before breakpoint */
60327 arch_kgdb_breakpoint();
60328 wmb(); /* Sync point after breakpoint */
60329- atomic_set(&kgdb_setting_breakpoint, 0);
60330+ atomic_set_unchecked(&kgdb_setting_breakpoint, 0);
60331 }
60332 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
60333
60334diff -urNp linux-2.6.32.42/kernel/kmod.c linux-2.6.32.42/kernel/kmod.c
60335--- linux-2.6.32.42/kernel/kmod.c 2011-03-27 14:31:47.000000000 -0400
60336+++ linux-2.6.32.42/kernel/kmod.c 2011-04-17 15:56:46.000000000 -0400
60337@@ -65,13 +65,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
60338 * If module auto-loading support is disabled then this function
60339 * becomes a no-operation.
60340 */
60341-int __request_module(bool wait, const char *fmt, ...)
60342+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
60343 {
60344- va_list args;
60345 char module_name[MODULE_NAME_LEN];
60346 unsigned int max_modprobes;
60347 int ret;
60348- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
60349+ char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
60350 static char *envp[] = { "HOME=/",
60351 "TERM=linux",
60352 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
60353@@ -84,12 +83,24 @@ int __request_module(bool wait, const ch
60354 if (ret)
60355 return ret;
60356
60357- va_start(args, fmt);
60358- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
60359- va_end(args);
60360+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
60361 if (ret >= MODULE_NAME_LEN)
60362 return -ENAMETOOLONG;
60363
60364+#ifdef CONFIG_GRKERNSEC_MODHARDEN
60365+ if (!current_uid()) {
60366+ /* hack to workaround consolekit/udisks stupidity */
60367+ read_lock(&tasklist_lock);
60368+ if (!strcmp(current->comm, "mount") &&
60369+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
60370+ read_unlock(&tasklist_lock);
60371+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
60372+ return -EPERM;
60373+ }
60374+ read_unlock(&tasklist_lock);
60375+ }
60376+#endif
60377+
60378 /* If modprobe needs a service that is in a module, we get a recursive
60379 * loop. Limit the number of running kmod threads to max_threads/2 or
60380 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
60381@@ -121,6 +132,48 @@ int __request_module(bool wait, const ch
60382 atomic_dec(&kmod_concurrent);
60383 return ret;
60384 }
60385+
60386+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
60387+{
60388+ va_list args;
60389+ int ret;
60390+
60391+ va_start(args, fmt);
60392+ ret = ____request_module(wait, module_param, fmt, args);
60393+ va_end(args);
60394+
60395+ return ret;
60396+}
60397+
60398+int __request_module(bool wait, const char *fmt, ...)
60399+{
60400+ va_list args;
60401+ int ret;
60402+
60403+#ifdef CONFIG_GRKERNSEC_MODHARDEN
60404+ if (current_uid()) {
60405+ char module_param[MODULE_NAME_LEN];
60406+
60407+ memset(module_param, 0, sizeof(module_param));
60408+
60409+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
60410+
60411+ va_start(args, fmt);
60412+ ret = ____request_module(wait, module_param, fmt, args);
60413+ va_end(args);
60414+
60415+ return ret;
60416+ }
60417+#endif
60418+
60419+ va_start(args, fmt);
60420+ ret = ____request_module(wait, NULL, fmt, args);
60421+ va_end(args);
60422+
60423+ return ret;
60424+}
60425+
60426+
60427 EXPORT_SYMBOL(__request_module);
60428 #endif /* CONFIG_MODULES */
60429
60430diff -urNp linux-2.6.32.42/kernel/kprobes.c linux-2.6.32.42/kernel/kprobes.c
60431--- linux-2.6.32.42/kernel/kprobes.c 2011-03-27 14:31:47.000000000 -0400
60432+++ linux-2.6.32.42/kernel/kprobes.c 2011-04-17 15:56:46.000000000 -0400
60433@@ -183,7 +183,7 @@ static kprobe_opcode_t __kprobes *__get_
60434 * kernel image and loaded module images reside. This is required
60435 * so x86_64 can correctly handle the %rip-relative fixups.
60436 */
60437- kip->insns = module_alloc(PAGE_SIZE);
60438+ kip->insns = module_alloc_exec(PAGE_SIZE);
60439 if (!kip->insns) {
60440 kfree(kip);
60441 return NULL;
60442@@ -220,7 +220,7 @@ static int __kprobes collect_one_slot(st
60443 */
60444 if (!list_is_singular(&kprobe_insn_pages)) {
60445 list_del(&kip->list);
60446- module_free(NULL, kip->insns);
60447+ module_free_exec(NULL, kip->insns);
60448 kfree(kip);
60449 }
60450 return 1;
60451@@ -1189,7 +1189,7 @@ static int __init init_kprobes(void)
60452 {
60453 int i, err = 0;
60454 unsigned long offset = 0, size = 0;
60455- char *modname, namebuf[128];
60456+ char *modname, namebuf[KSYM_NAME_LEN];
60457 const char *symbol_name;
60458 void *addr;
60459 struct kprobe_blackpoint *kb;
60460@@ -1304,7 +1304,7 @@ static int __kprobes show_kprobe_addr(st
60461 const char *sym = NULL;
60462 unsigned int i = *(loff_t *) v;
60463 unsigned long offset = 0;
60464- char *modname, namebuf[128];
60465+ char *modname, namebuf[KSYM_NAME_LEN];
60466
60467 head = &kprobe_table[i];
60468 preempt_disable();
60469diff -urNp linux-2.6.32.42/kernel/lockdep.c linux-2.6.32.42/kernel/lockdep.c
60470--- linux-2.6.32.42/kernel/lockdep.c 2011-06-25 12:55:35.000000000 -0400
60471+++ linux-2.6.32.42/kernel/lockdep.c 2011-06-25 12:56:37.000000000 -0400
60472@@ -421,20 +421,20 @@ static struct stack_trace lockdep_init_t
60473 /*
60474 * Various lockdep statistics:
60475 */
60476-atomic_t chain_lookup_hits;
60477-atomic_t chain_lookup_misses;
60478-atomic_t hardirqs_on_events;
60479-atomic_t hardirqs_off_events;
60480-atomic_t redundant_hardirqs_on;
60481-atomic_t redundant_hardirqs_off;
60482-atomic_t softirqs_on_events;
60483-atomic_t softirqs_off_events;
60484-atomic_t redundant_softirqs_on;
60485-atomic_t redundant_softirqs_off;
60486-atomic_t nr_unused_locks;
60487-atomic_t nr_cyclic_checks;
60488-atomic_t nr_find_usage_forwards_checks;
60489-atomic_t nr_find_usage_backwards_checks;
60490+atomic_unchecked_t chain_lookup_hits;
60491+atomic_unchecked_t chain_lookup_misses;
60492+atomic_unchecked_t hardirqs_on_events;
60493+atomic_unchecked_t hardirqs_off_events;
60494+atomic_unchecked_t redundant_hardirqs_on;
60495+atomic_unchecked_t redundant_hardirqs_off;
60496+atomic_unchecked_t softirqs_on_events;
60497+atomic_unchecked_t softirqs_off_events;
60498+atomic_unchecked_t redundant_softirqs_on;
60499+atomic_unchecked_t redundant_softirqs_off;
60500+atomic_unchecked_t nr_unused_locks;
60501+atomic_unchecked_t nr_cyclic_checks;
60502+atomic_unchecked_t nr_find_usage_forwards_checks;
60503+atomic_unchecked_t nr_find_usage_backwards_checks;
60504 #endif
60505
60506 /*
60507@@ -577,6 +577,10 @@ static int static_obj(void *obj)
60508 int i;
60509 #endif
60510
60511+#ifdef CONFIG_PAX_KERNEXEC
60512+ start = ktla_ktva(start);
60513+#endif
60514+
60515 /*
60516 * static variable?
60517 */
60518@@ -592,8 +596,7 @@ static int static_obj(void *obj)
60519 */
60520 for_each_possible_cpu(i) {
60521 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
60522- end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
60523- + per_cpu_offset(i);
60524+ end = start + PERCPU_ENOUGH_ROOM;
60525
60526 if ((addr >= start) && (addr < end))
60527 return 1;
60528@@ -710,6 +713,7 @@ register_lock_class(struct lockdep_map *
60529 if (!static_obj(lock->key)) {
60530 debug_locks_off();
60531 printk("INFO: trying to register non-static key.\n");
60532+ printk("lock:%pS key:%pS.\n", lock, lock->key);
60533 printk("the code is fine but needs lockdep annotation.\n");
60534 printk("turning off the locking correctness validator.\n");
60535 dump_stack();
60536@@ -2751,7 +2755,7 @@ static int __lock_acquire(struct lockdep
60537 if (!class)
60538 return 0;
60539 }
60540- debug_atomic_inc((atomic_t *)&class->ops);
60541+ debug_atomic_inc((atomic_unchecked_t *)&class->ops);
60542 if (very_verbose(class)) {
60543 printk("\nacquire class [%p] %s", class->key, class->name);
60544 if (class->name_version > 1)
60545diff -urNp linux-2.6.32.42/kernel/lockdep_internals.h linux-2.6.32.42/kernel/lockdep_internals.h
60546--- linux-2.6.32.42/kernel/lockdep_internals.h 2011-03-27 14:31:47.000000000 -0400
60547+++ linux-2.6.32.42/kernel/lockdep_internals.h 2011-04-17 15:56:46.000000000 -0400
60548@@ -113,26 +113,26 @@ lockdep_count_backward_deps(struct lock_
60549 /*
60550 * Various lockdep statistics:
60551 */
60552-extern atomic_t chain_lookup_hits;
60553-extern atomic_t chain_lookup_misses;
60554-extern atomic_t hardirqs_on_events;
60555-extern atomic_t hardirqs_off_events;
60556-extern atomic_t redundant_hardirqs_on;
60557-extern atomic_t redundant_hardirqs_off;
60558-extern atomic_t softirqs_on_events;
60559-extern atomic_t softirqs_off_events;
60560-extern atomic_t redundant_softirqs_on;
60561-extern atomic_t redundant_softirqs_off;
60562-extern atomic_t nr_unused_locks;
60563-extern atomic_t nr_cyclic_checks;
60564-extern atomic_t nr_cyclic_check_recursions;
60565-extern atomic_t nr_find_usage_forwards_checks;
60566-extern atomic_t nr_find_usage_forwards_recursions;
60567-extern atomic_t nr_find_usage_backwards_checks;
60568-extern atomic_t nr_find_usage_backwards_recursions;
60569-# define debug_atomic_inc(ptr) atomic_inc(ptr)
60570-# define debug_atomic_dec(ptr) atomic_dec(ptr)
60571-# define debug_atomic_read(ptr) atomic_read(ptr)
60572+extern atomic_unchecked_t chain_lookup_hits;
60573+extern atomic_unchecked_t chain_lookup_misses;
60574+extern atomic_unchecked_t hardirqs_on_events;
60575+extern atomic_unchecked_t hardirqs_off_events;
60576+extern atomic_unchecked_t redundant_hardirqs_on;
60577+extern atomic_unchecked_t redundant_hardirqs_off;
60578+extern atomic_unchecked_t softirqs_on_events;
60579+extern atomic_unchecked_t softirqs_off_events;
60580+extern atomic_unchecked_t redundant_softirqs_on;
60581+extern atomic_unchecked_t redundant_softirqs_off;
60582+extern atomic_unchecked_t nr_unused_locks;
60583+extern atomic_unchecked_t nr_cyclic_checks;
60584+extern atomic_unchecked_t nr_cyclic_check_recursions;
60585+extern atomic_unchecked_t nr_find_usage_forwards_checks;
60586+extern atomic_unchecked_t nr_find_usage_forwards_recursions;
60587+extern atomic_unchecked_t nr_find_usage_backwards_checks;
60588+extern atomic_unchecked_t nr_find_usage_backwards_recursions;
60589+# define debug_atomic_inc(ptr) atomic_inc_unchecked(ptr)
60590+# define debug_atomic_dec(ptr) atomic_dec_unchecked(ptr)
60591+# define debug_atomic_read(ptr) atomic_read_unchecked(ptr)
60592 #else
60593 # define debug_atomic_inc(ptr) do { } while (0)
60594 # define debug_atomic_dec(ptr) do { } while (0)
60595diff -urNp linux-2.6.32.42/kernel/lockdep_proc.c linux-2.6.32.42/kernel/lockdep_proc.c
60596--- linux-2.6.32.42/kernel/lockdep_proc.c 2011-03-27 14:31:47.000000000 -0400
60597+++ linux-2.6.32.42/kernel/lockdep_proc.c 2011-04-17 15:56:46.000000000 -0400
60598@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
60599
60600 static void print_name(struct seq_file *m, struct lock_class *class)
60601 {
60602- char str[128];
60603+ char str[KSYM_NAME_LEN];
60604 const char *name = class->name;
60605
60606 if (!name) {
60607diff -urNp linux-2.6.32.42/kernel/module.c linux-2.6.32.42/kernel/module.c
60608--- linux-2.6.32.42/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
60609+++ linux-2.6.32.42/kernel/module.c 2011-04-29 18:52:40.000000000 -0400
60610@@ -55,6 +55,7 @@
60611 #include <linux/async.h>
60612 #include <linux/percpu.h>
60613 #include <linux/kmemleak.h>
60614+#include <linux/grsecurity.h>
60615
60616 #define CREATE_TRACE_POINTS
60617 #include <trace/events/module.h>
60618@@ -89,7 +90,8 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq
60619 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
60620
60621 /* Bounds of module allocation, for speeding __module_address */
60622-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
60623+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
60624+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
60625
60626 int register_module_notifier(struct notifier_block * nb)
60627 {
60628@@ -245,7 +247,7 @@ bool each_symbol(bool (*fn)(const struct
60629 return true;
60630
60631 list_for_each_entry_rcu(mod, &modules, list) {
60632- struct symsearch arr[] = {
60633+ struct symsearch modarr[] = {
60634 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
60635 NOT_GPL_ONLY, false },
60636 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
60637@@ -267,7 +269,7 @@ bool each_symbol(bool (*fn)(const struct
60638 #endif
60639 };
60640
60641- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
60642+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
60643 return true;
60644 }
60645 return false;
60646@@ -442,7 +444,7 @@ static void *percpu_modalloc(unsigned lo
60647 void *ptr;
60648 int cpu;
60649
60650- if (align > PAGE_SIZE) {
60651+ if (align-1 >= PAGE_SIZE) {
60652 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
60653 name, align, PAGE_SIZE);
60654 align = PAGE_SIZE;
60655@@ -1158,7 +1160,7 @@ static const struct kernel_symbol *resol
60656 * /sys/module/foo/sections stuff
60657 * J. Corbet <corbet@lwn.net>
60658 */
60659-#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS)
60660+#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
60661
60662 static inline bool sect_empty(const Elf_Shdr *sect)
60663 {
60664@@ -1545,7 +1547,8 @@ static void free_module(struct module *m
60665 destroy_params(mod->kp, mod->num_kp);
60666
60667 /* This may be NULL, but that's OK */
60668- module_free(mod, mod->module_init);
60669+ module_free(mod, mod->module_init_rw);
60670+ module_free_exec(mod, mod->module_init_rx);
60671 kfree(mod->args);
60672 if (mod->percpu)
60673 percpu_modfree(mod->percpu);
60674@@ -1554,10 +1557,12 @@ static void free_module(struct module *m
60675 percpu_modfree(mod->refptr);
60676 #endif
60677 /* Free lock-classes: */
60678- lockdep_free_key_range(mod->module_core, mod->core_size);
60679+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
60680+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
60681
60682 /* Finally, free the core (containing the module structure) */
60683- module_free(mod, mod->module_core);
60684+ module_free_exec(mod, mod->module_core_rx);
60685+ module_free(mod, mod->module_core_rw);
60686
60687 #ifdef CONFIG_MPU
60688 update_protections(current->mm);
60689@@ -1628,8 +1633,32 @@ static int simplify_symbols(Elf_Shdr *se
60690 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
60691 int ret = 0;
60692 const struct kernel_symbol *ksym;
60693+#ifdef CONFIG_GRKERNSEC_MODHARDEN
60694+ int is_fs_load = 0;
60695+ int register_filesystem_found = 0;
60696+ char *p;
60697+
60698+ p = strstr(mod->args, "grsec_modharden_fs");
60699+
60700+ if (p) {
60701+ char *endptr = p + strlen("grsec_modharden_fs");
60702+ /* copy \0 as well */
60703+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
60704+ is_fs_load = 1;
60705+ }
60706+#endif
60707+
60708
60709 for (i = 1; i < n; i++) {
60710+#ifdef CONFIG_GRKERNSEC_MODHARDEN
60711+ const char *name = strtab + sym[i].st_name;
60712+
60713+ /* it's a real shame this will never get ripped and copied
60714+ upstream! ;(
60715+ */
60716+ if (is_fs_load && !strcmp(name, "register_filesystem"))
60717+ register_filesystem_found = 1;
60718+#endif
60719 switch (sym[i].st_shndx) {
60720 case SHN_COMMON:
60721 /* We compiled with -fno-common. These are not
60722@@ -1651,7 +1680,9 @@ static int simplify_symbols(Elf_Shdr *se
60723 strtab + sym[i].st_name, mod);
60724 /* Ok if resolved. */
60725 if (ksym) {
60726+ pax_open_kernel();
60727 sym[i].st_value = ksym->value;
60728+ pax_close_kernel();
60729 break;
60730 }
60731
60732@@ -1670,11 +1701,20 @@ static int simplify_symbols(Elf_Shdr *se
60733 secbase = (unsigned long)mod->percpu;
60734 else
60735 secbase = sechdrs[sym[i].st_shndx].sh_addr;
60736+ pax_open_kernel();
60737 sym[i].st_value += secbase;
60738+ pax_close_kernel();
60739 break;
60740 }
60741 }
60742
60743+#ifdef CONFIG_GRKERNSEC_MODHARDEN
60744+ if (is_fs_load && !register_filesystem_found) {
60745+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
60746+ ret = -EPERM;
60747+ }
60748+#endif
60749+
60750 return ret;
60751 }
60752
60753@@ -1731,11 +1771,12 @@ static void layout_sections(struct modul
60754 || s->sh_entsize != ~0UL
60755 || strstarts(secstrings + s->sh_name, ".init"))
60756 continue;
60757- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
60758+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
60759+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
60760+ else
60761+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
60762 DEBUGP("\t%s\n", secstrings + s->sh_name);
60763 }
60764- if (m == 0)
60765- mod->core_text_size = mod->core_size;
60766 }
60767
60768 DEBUGP("Init section allocation order:\n");
60769@@ -1748,12 +1789,13 @@ static void layout_sections(struct modul
60770 || s->sh_entsize != ~0UL
60771 || !strstarts(secstrings + s->sh_name, ".init"))
60772 continue;
60773- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
60774- | INIT_OFFSET_MASK);
60775+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
60776+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
60777+ else
60778+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
60779+ s->sh_entsize |= INIT_OFFSET_MASK;
60780 DEBUGP("\t%s\n", secstrings + s->sh_name);
60781 }
60782- if (m == 0)
60783- mod->init_text_size = mod->init_size;
60784 }
60785 }
60786
60787@@ -1857,9 +1899,8 @@ static int is_exported(const char *name,
60788
60789 /* As per nm */
60790 static char elf_type(const Elf_Sym *sym,
60791- Elf_Shdr *sechdrs,
60792- const char *secstrings,
60793- struct module *mod)
60794+ const Elf_Shdr *sechdrs,
60795+ const char *secstrings)
60796 {
60797 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
60798 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
60799@@ -1934,7 +1975,7 @@ static unsigned long layout_symtab(struc
60800
60801 /* Put symbol section at end of init part of module. */
60802 symsect->sh_flags |= SHF_ALLOC;
60803- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
60804+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
60805 symindex) | INIT_OFFSET_MASK;
60806 DEBUGP("\t%s\n", secstrings + symsect->sh_name);
60807
60808@@ -1951,19 +1992,19 @@ static unsigned long layout_symtab(struc
60809 }
60810
60811 /* Append room for core symbols at end of core part. */
60812- symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
60813- mod->core_size = symoffs + ndst * sizeof(Elf_Sym);
60814+ symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
60815+ mod->core_size_rx = symoffs + ndst * sizeof(Elf_Sym);
60816
60817 /* Put string table section at end of init part of module. */
60818 strsect->sh_flags |= SHF_ALLOC;
60819- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
60820+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
60821 strindex) | INIT_OFFSET_MASK;
60822 DEBUGP("\t%s\n", secstrings + strsect->sh_name);
60823
60824 /* Append room for core symbols' strings at end of core part. */
60825- *pstroffs = mod->core_size;
60826+ *pstroffs = mod->core_size_rx;
60827 __set_bit(0, strmap);
60828- mod->core_size += bitmap_weight(strmap, strsect->sh_size);
60829+ mod->core_size_rx += bitmap_weight(strmap, strsect->sh_size);
60830
60831 return symoffs;
60832 }
60833@@ -1987,12 +2028,14 @@ static void add_kallsyms(struct module *
60834 mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
60835 mod->strtab = (void *)sechdrs[strindex].sh_addr;
60836
60837+ pax_open_kernel();
60838+
60839 /* Set types up while we still have access to sections. */
60840 for (i = 0; i < mod->num_symtab; i++)
60841 mod->symtab[i].st_info
60842- = elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
60843+ = elf_type(&mod->symtab[i], sechdrs, secstrings);
60844
60845- mod->core_symtab = dst = mod->module_core + symoffs;
60846+ mod->core_symtab = dst = mod->module_core_rx + symoffs;
60847 src = mod->symtab;
60848 *dst = *src;
60849 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
60850@@ -2004,10 +2047,12 @@ static void add_kallsyms(struct module *
60851 }
60852 mod->core_num_syms = ndst;
60853
60854- mod->core_strtab = s = mod->module_core + stroffs;
60855+ mod->core_strtab = s = mod->module_core_rx + stroffs;
60856 for (*s = 0, i = 1; i < sechdrs[strindex].sh_size; ++i)
60857 if (test_bit(i, strmap))
60858 *++s = mod->strtab[i];
60859+
60860+ pax_close_kernel();
60861 }
60862 #else
60863 static inline unsigned long layout_symtab(struct module *mod,
60864@@ -2044,16 +2089,30 @@ static void dynamic_debug_setup(struct _
60865 #endif
60866 }
60867
60868-static void *module_alloc_update_bounds(unsigned long size)
60869+static void *module_alloc_update_bounds_rw(unsigned long size)
60870 {
60871 void *ret = module_alloc(size);
60872
60873 if (ret) {
60874 /* Update module bounds. */
60875- if ((unsigned long)ret < module_addr_min)
60876- module_addr_min = (unsigned long)ret;
60877- if ((unsigned long)ret + size > module_addr_max)
60878- module_addr_max = (unsigned long)ret + size;
60879+ if ((unsigned long)ret < module_addr_min_rw)
60880+ module_addr_min_rw = (unsigned long)ret;
60881+ if ((unsigned long)ret + size > module_addr_max_rw)
60882+ module_addr_max_rw = (unsigned long)ret + size;
60883+ }
60884+ return ret;
60885+}
60886+
60887+static void *module_alloc_update_bounds_rx(unsigned long size)
60888+{
60889+ void *ret = module_alloc_exec(size);
60890+
60891+ if (ret) {
60892+ /* Update module bounds. */
60893+ if ((unsigned long)ret < module_addr_min_rx)
60894+ module_addr_min_rx = (unsigned long)ret;
60895+ if ((unsigned long)ret + size > module_addr_max_rx)
60896+ module_addr_max_rx = (unsigned long)ret + size;
60897 }
60898 return ret;
60899 }
60900@@ -2065,8 +2124,8 @@ static void kmemleak_load_module(struct
60901 unsigned int i;
60902
60903 /* only scan the sections containing data */
60904- kmemleak_scan_area(mod->module_core, (unsigned long)mod -
60905- (unsigned long)mod->module_core,
60906+ kmemleak_scan_area(mod->module_core_rw, (unsigned long)mod -
60907+ (unsigned long)mod->module_core_rw,
60908 sizeof(struct module), GFP_KERNEL);
60909
60910 for (i = 1; i < hdr->e_shnum; i++) {
60911@@ -2076,8 +2135,8 @@ static void kmemleak_load_module(struct
60912 && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
60913 continue;
60914
60915- kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
60916- (unsigned long)mod->module_core,
60917+ kmemleak_scan_area(mod->module_core_rw, sechdrs[i].sh_addr -
60918+ (unsigned long)mod->module_core_rw,
60919 sechdrs[i].sh_size, GFP_KERNEL);
60920 }
60921 }
60922@@ -2263,7 +2322,7 @@ static noinline struct module *load_modu
60923 secstrings, &stroffs, strmap);
60924
60925 /* Do the allocs. */
60926- ptr = module_alloc_update_bounds(mod->core_size);
60927+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
60928 /*
60929 * The pointer to this block is stored in the module structure
60930 * which is inside the block. Just mark it as not being a
60931@@ -2274,23 +2333,47 @@ static noinline struct module *load_modu
60932 err = -ENOMEM;
60933 goto free_percpu;
60934 }
60935- memset(ptr, 0, mod->core_size);
60936- mod->module_core = ptr;
60937+ memset(ptr, 0, mod->core_size_rw);
60938+ mod->module_core_rw = ptr;
60939
60940- ptr = module_alloc_update_bounds(mod->init_size);
60941+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
60942 /*
60943 * The pointer to this block is stored in the module structure
60944 * which is inside the block. This block doesn't need to be
60945 * scanned as it contains data and code that will be freed
60946 * after the module is initialized.
60947 */
60948- kmemleak_ignore(ptr);
60949- if (!ptr && mod->init_size) {
60950+ kmemleak_not_leak(ptr);
60951+ if (!ptr && mod->init_size_rw) {
60952+ err = -ENOMEM;
60953+ goto free_core_rw;
60954+ }
60955+ memset(ptr, 0, mod->init_size_rw);
60956+ mod->module_init_rw = ptr;
60957+
60958+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
60959+ kmemleak_not_leak(ptr);
60960+ if (!ptr) {
60961 err = -ENOMEM;
60962- goto free_core;
60963+ goto free_init_rw;
60964 }
60965- memset(ptr, 0, mod->init_size);
60966- mod->module_init = ptr;
60967+
60968+ pax_open_kernel();
60969+ memset(ptr, 0, mod->core_size_rx);
60970+ pax_close_kernel();
60971+ mod->module_core_rx = ptr;
60972+
60973+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
60974+ kmemleak_not_leak(ptr);
60975+ if (!ptr && mod->init_size_rx) {
60976+ err = -ENOMEM;
60977+ goto free_core_rx;
60978+ }
60979+
60980+ pax_open_kernel();
60981+ memset(ptr, 0, mod->init_size_rx);
60982+ pax_close_kernel();
60983+ mod->module_init_rx = ptr;
60984
60985 /* Transfer each section which specifies SHF_ALLOC */
60986 DEBUGP("final section addresses:\n");
60987@@ -2300,17 +2383,45 @@ static noinline struct module *load_modu
60988 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
60989 continue;
60990
60991- if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK)
60992- dest = mod->module_init
60993- + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
60994- else
60995- dest = mod->module_core + sechdrs[i].sh_entsize;
60996+ if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) {
60997+ if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
60998+ dest = mod->module_init_rw
60999+ + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
61000+ else
61001+ dest = mod->module_init_rx
61002+ + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
61003+ } else {
61004+ if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
61005+ dest = mod->module_core_rw + sechdrs[i].sh_entsize;
61006+ else
61007+ dest = mod->module_core_rx + sechdrs[i].sh_entsize;
61008+ }
61009+
61010+ if (sechdrs[i].sh_type != SHT_NOBITS) {
61011
61012- if (sechdrs[i].sh_type != SHT_NOBITS)
61013- memcpy(dest, (void *)sechdrs[i].sh_addr,
61014- sechdrs[i].sh_size);
61015+#ifdef CONFIG_PAX_KERNEXEC
61016+#ifdef CONFIG_X86_64
61017+ if ((sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_EXECINSTR))
61018+ set_memory_x((unsigned long)dest, (sechdrs[i].sh_size + PAGE_SIZE) >> PAGE_SHIFT);
61019+#endif
61020+ if (!(sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_ALLOC)) {
61021+ pax_open_kernel();
61022+ memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
61023+ pax_close_kernel();
61024+ } else
61025+#endif
61026+
61027+ memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
61028+ }
61029 /* Update sh_addr to point to copy in image. */
61030- sechdrs[i].sh_addr = (unsigned long)dest;
61031+
61032+#ifdef CONFIG_PAX_KERNEXEC
61033+ if (sechdrs[i].sh_flags & SHF_EXECINSTR)
61034+ sechdrs[i].sh_addr = ktva_ktla((unsigned long)dest);
61035+ else
61036+#endif
61037+
61038+ sechdrs[i].sh_addr = (unsigned long)dest;
61039 DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name);
61040 }
61041 /* Module has been moved. */
61042@@ -2322,7 +2433,7 @@ static noinline struct module *load_modu
61043 mod->name);
61044 if (!mod->refptr) {
61045 err = -ENOMEM;
61046- goto free_init;
61047+ goto free_init_rx;
61048 }
61049 #endif
61050 /* Now we've moved module, initialize linked lists, etc. */
61051@@ -2351,6 +2462,31 @@ static noinline struct module *load_modu
61052 /* Set up MODINFO_ATTR fields */
61053 setup_modinfo(mod, sechdrs, infoindex);
61054
61055+ mod->args = args;
61056+
61057+#ifdef CONFIG_GRKERNSEC_MODHARDEN
61058+ {
61059+ char *p, *p2;
61060+
61061+ if (strstr(mod->args, "grsec_modharden_netdev")) {
61062+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
61063+ err = -EPERM;
61064+ goto cleanup;
61065+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
61066+ p += strlen("grsec_modharden_normal");
61067+ p2 = strstr(p, "_");
61068+ if (p2) {
61069+ *p2 = '\0';
61070+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
61071+ *p2 = '_';
61072+ }
61073+ err = -EPERM;
61074+ goto cleanup;
61075+ }
61076+ }
61077+#endif
61078+
61079+
61080 /* Fix up syms, so that st_value is a pointer to location. */
61081 err = simplify_symbols(sechdrs, symindex, strtab, versindex, pcpuindex,
61082 mod);
61083@@ -2431,8 +2567,8 @@ static noinline struct module *load_modu
61084
61085 /* Now do relocations. */
61086 for (i = 1; i < hdr->e_shnum; i++) {
61087- const char *strtab = (char *)sechdrs[strindex].sh_addr;
61088 unsigned int info = sechdrs[i].sh_info;
61089+ strtab = (char *)sechdrs[strindex].sh_addr;
61090
61091 /* Not a valid relocation section? */
61092 if (info >= hdr->e_shnum)
61093@@ -2493,16 +2629,15 @@ static noinline struct module *load_modu
61094 * Do it before processing of module parameters, so the module
61095 * can provide parameter accessor functions of its own.
61096 */
61097- if (mod->module_init)
61098- flush_icache_range((unsigned long)mod->module_init,
61099- (unsigned long)mod->module_init
61100- + mod->init_size);
61101- flush_icache_range((unsigned long)mod->module_core,
61102- (unsigned long)mod->module_core + mod->core_size);
61103+ if (mod->module_init_rx)
61104+ flush_icache_range((unsigned long)mod->module_init_rx,
61105+ (unsigned long)mod->module_init_rx
61106+ + mod->init_size_rx);
61107+ flush_icache_range((unsigned long)mod->module_core_rx,
61108+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
61109
61110 set_fs(old_fs);
61111
61112- mod->args = args;
61113 if (section_addr(hdr, sechdrs, secstrings, "__obsparm"))
61114 printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
61115 mod->name);
61116@@ -2546,12 +2681,16 @@ static noinline struct module *load_modu
61117 free_unload:
61118 module_unload_free(mod);
61119 #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
61120+ free_init_rx:
61121 percpu_modfree(mod->refptr);
61122- free_init:
61123 #endif
61124- module_free(mod, mod->module_init);
61125- free_core:
61126- module_free(mod, mod->module_core);
61127+ module_free_exec(mod, mod->module_init_rx);
61128+ free_core_rx:
61129+ module_free_exec(mod, mod->module_core_rx);
61130+ free_init_rw:
61131+ module_free(mod, mod->module_init_rw);
61132+ free_core_rw:
61133+ module_free(mod, mod->module_core_rw);
61134 /* mod will be freed with core. Don't access it beyond this line! */
61135 free_percpu:
61136 if (percpu)
61137@@ -2653,10 +2792,12 @@ SYSCALL_DEFINE3(init_module, void __user
61138 mod->symtab = mod->core_symtab;
61139 mod->strtab = mod->core_strtab;
61140 #endif
61141- module_free(mod, mod->module_init);
61142- mod->module_init = NULL;
61143- mod->init_size = 0;
61144- mod->init_text_size = 0;
61145+ module_free(mod, mod->module_init_rw);
61146+ module_free_exec(mod, mod->module_init_rx);
61147+ mod->module_init_rw = NULL;
61148+ mod->module_init_rx = NULL;
61149+ mod->init_size_rw = 0;
61150+ mod->init_size_rx = 0;
61151 mutex_unlock(&module_mutex);
61152
61153 return 0;
61154@@ -2687,10 +2828,16 @@ static const char *get_ksymbol(struct mo
61155 unsigned long nextval;
61156
61157 /* At worse, next value is at end of module */
61158- if (within_module_init(addr, mod))
61159- nextval = (unsigned long)mod->module_init+mod->init_text_size;
61160+ if (within_module_init_rx(addr, mod))
61161+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
61162+ else if (within_module_init_rw(addr, mod))
61163+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
61164+ else if (within_module_core_rx(addr, mod))
61165+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
61166+ else if (within_module_core_rw(addr, mod))
61167+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
61168 else
61169- nextval = (unsigned long)mod->module_core+mod->core_text_size;
61170+ return NULL;
61171
61172 /* Scan for closest preceeding symbol, and next symbol. (ELF
61173 starts real symbols at 1). */
61174@@ -2936,7 +3083,7 @@ static int m_show(struct seq_file *m, vo
61175 char buf[8];
61176
61177 seq_printf(m, "%s %u",
61178- mod->name, mod->init_size + mod->core_size);
61179+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
61180 print_unload_info(m, mod);
61181
61182 /* Informative for users. */
61183@@ -2945,7 +3092,7 @@ static int m_show(struct seq_file *m, vo
61184 mod->state == MODULE_STATE_COMING ? "Loading":
61185 "Live");
61186 /* Used by oprofile and other similar tools. */
61187- seq_printf(m, " 0x%p", mod->module_core);
61188+ seq_printf(m, " 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
61189
61190 /* Taints info */
61191 if (mod->taints)
61192@@ -2981,7 +3128,17 @@ static const struct file_operations proc
61193
61194 static int __init proc_modules_init(void)
61195 {
61196+#ifndef CONFIG_GRKERNSEC_HIDESYM
61197+#ifdef CONFIG_GRKERNSEC_PROC_USER
61198+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
61199+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61200+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
61201+#else
61202 proc_create("modules", 0, NULL, &proc_modules_operations);
61203+#endif
61204+#else
61205+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
61206+#endif
61207 return 0;
61208 }
61209 module_init(proc_modules_init);
61210@@ -3040,12 +3197,12 @@ struct module *__module_address(unsigned
61211 {
61212 struct module *mod;
61213
61214- if (addr < module_addr_min || addr > module_addr_max)
61215+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
61216+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
61217 return NULL;
61218
61219 list_for_each_entry_rcu(mod, &modules, list)
61220- if (within_module_core(addr, mod)
61221- || within_module_init(addr, mod))
61222+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
61223 return mod;
61224 return NULL;
61225 }
61226@@ -3079,11 +3236,20 @@ bool is_module_text_address(unsigned lon
61227 */
61228 struct module *__module_text_address(unsigned long addr)
61229 {
61230- struct module *mod = __module_address(addr);
61231+ struct module *mod;
61232+
61233+#ifdef CONFIG_X86_32
61234+ addr = ktla_ktva(addr);
61235+#endif
61236+
61237+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
61238+ return NULL;
61239+
61240+ mod = __module_address(addr);
61241+
61242 if (mod) {
61243 /* Make sure it's within the text section. */
61244- if (!within(addr, mod->module_init, mod->init_text_size)
61245- && !within(addr, mod->module_core, mod->core_text_size))
61246+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
61247 mod = NULL;
61248 }
61249 return mod;
61250diff -urNp linux-2.6.32.42/kernel/mutex.c linux-2.6.32.42/kernel/mutex.c
61251--- linux-2.6.32.42/kernel/mutex.c 2011-03-27 14:31:47.000000000 -0400
61252+++ linux-2.6.32.42/kernel/mutex.c 2011-04-17 15:56:46.000000000 -0400
61253@@ -169,7 +169,7 @@ __mutex_lock_common(struct mutex *lock,
61254 */
61255
61256 for (;;) {
61257- struct thread_info *owner;
61258+ struct task_struct *owner;
61259
61260 /*
61261 * If we own the BKL, then don't spin. The owner of
61262@@ -214,7 +214,7 @@ __mutex_lock_common(struct mutex *lock,
61263 spin_lock_mutex(&lock->wait_lock, flags);
61264
61265 debug_mutex_lock_common(lock, &waiter);
61266- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
61267+ debug_mutex_add_waiter(lock, &waiter, task);
61268
61269 /* add waiting tasks to the end of the waitqueue (FIFO): */
61270 list_add_tail(&waiter.list, &lock->wait_list);
61271@@ -243,8 +243,7 @@ __mutex_lock_common(struct mutex *lock,
61272 * TASK_UNINTERRUPTIBLE case.)
61273 */
61274 if (unlikely(signal_pending_state(state, task))) {
61275- mutex_remove_waiter(lock, &waiter,
61276- task_thread_info(task));
61277+ mutex_remove_waiter(lock, &waiter, task);
61278 mutex_release(&lock->dep_map, 1, ip);
61279 spin_unlock_mutex(&lock->wait_lock, flags);
61280
61281@@ -265,7 +264,7 @@ __mutex_lock_common(struct mutex *lock,
61282 done:
61283 lock_acquired(&lock->dep_map, ip);
61284 /* got the lock - rejoice! */
61285- mutex_remove_waiter(lock, &waiter, current_thread_info());
61286+ mutex_remove_waiter(lock, &waiter, task);
61287 mutex_set_owner(lock);
61288
61289 /* set it to 0 if there are no waiters left: */
61290diff -urNp linux-2.6.32.42/kernel/mutex-debug.c linux-2.6.32.42/kernel/mutex-debug.c
61291--- linux-2.6.32.42/kernel/mutex-debug.c 2011-03-27 14:31:47.000000000 -0400
61292+++ linux-2.6.32.42/kernel/mutex-debug.c 2011-04-17 15:56:46.000000000 -0400
61293@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
61294 }
61295
61296 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
61297- struct thread_info *ti)
61298+ struct task_struct *task)
61299 {
61300 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
61301
61302 /* Mark the current thread as blocked on the lock: */
61303- ti->task->blocked_on = waiter;
61304+ task->blocked_on = waiter;
61305 }
61306
61307 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
61308- struct thread_info *ti)
61309+ struct task_struct *task)
61310 {
61311 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
61312- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
61313- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
61314- ti->task->blocked_on = NULL;
61315+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
61316+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
61317+ task->blocked_on = NULL;
61318
61319 list_del_init(&waiter->list);
61320 waiter->task = NULL;
61321@@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lo
61322 return;
61323
61324 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
61325- DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
61326+ DEBUG_LOCKS_WARN_ON(lock->owner != current);
61327 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
61328 mutex_clear_owner(lock);
61329 }
61330diff -urNp linux-2.6.32.42/kernel/mutex-debug.h linux-2.6.32.42/kernel/mutex-debug.h
61331--- linux-2.6.32.42/kernel/mutex-debug.h 2011-03-27 14:31:47.000000000 -0400
61332+++ linux-2.6.32.42/kernel/mutex-debug.h 2011-04-17 15:56:46.000000000 -0400
61333@@ -20,16 +20,16 @@ extern void debug_mutex_wake_waiter(stru
61334 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
61335 extern void debug_mutex_add_waiter(struct mutex *lock,
61336 struct mutex_waiter *waiter,
61337- struct thread_info *ti);
61338+ struct task_struct *task);
61339 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
61340- struct thread_info *ti);
61341+ struct task_struct *task);
61342 extern void debug_mutex_unlock(struct mutex *lock);
61343 extern void debug_mutex_init(struct mutex *lock, const char *name,
61344 struct lock_class_key *key);
61345
61346 static inline void mutex_set_owner(struct mutex *lock)
61347 {
61348- lock->owner = current_thread_info();
61349+ lock->owner = current;
61350 }
61351
61352 static inline void mutex_clear_owner(struct mutex *lock)
61353diff -urNp linux-2.6.32.42/kernel/mutex.h linux-2.6.32.42/kernel/mutex.h
61354--- linux-2.6.32.42/kernel/mutex.h 2011-03-27 14:31:47.000000000 -0400
61355+++ linux-2.6.32.42/kernel/mutex.h 2011-04-17 15:56:46.000000000 -0400
61356@@ -19,7 +19,7 @@
61357 #ifdef CONFIG_SMP
61358 static inline void mutex_set_owner(struct mutex *lock)
61359 {
61360- lock->owner = current_thread_info();
61361+ lock->owner = current;
61362 }
61363
61364 static inline void mutex_clear_owner(struct mutex *lock)
61365diff -urNp linux-2.6.32.42/kernel/panic.c linux-2.6.32.42/kernel/panic.c
61366--- linux-2.6.32.42/kernel/panic.c 2011-03-27 14:31:47.000000000 -0400
61367+++ linux-2.6.32.42/kernel/panic.c 2011-04-17 15:56:46.000000000 -0400
61368@@ -352,7 +352,7 @@ static void warn_slowpath_common(const c
61369 const char *board;
61370
61371 printk(KERN_WARNING "------------[ cut here ]------------\n");
61372- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
61373+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
61374 board = dmi_get_system_info(DMI_PRODUCT_NAME);
61375 if (board)
61376 printk(KERN_WARNING "Hardware name: %s\n", board);
61377@@ -392,7 +392,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
61378 */
61379 void __stack_chk_fail(void)
61380 {
61381- panic("stack-protector: Kernel stack is corrupted in: %p\n",
61382+ dump_stack();
61383+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
61384 __builtin_return_address(0));
61385 }
61386 EXPORT_SYMBOL(__stack_chk_fail);
61387diff -urNp linux-2.6.32.42/kernel/params.c linux-2.6.32.42/kernel/params.c
61388--- linux-2.6.32.42/kernel/params.c 2011-03-27 14:31:47.000000000 -0400
61389+++ linux-2.6.32.42/kernel/params.c 2011-04-17 15:56:46.000000000 -0400
61390@@ -725,7 +725,7 @@ static ssize_t module_attr_store(struct
61391 return ret;
61392 }
61393
61394-static struct sysfs_ops module_sysfs_ops = {
61395+static const struct sysfs_ops module_sysfs_ops = {
61396 .show = module_attr_show,
61397 .store = module_attr_store,
61398 };
61399@@ -739,7 +739,7 @@ static int uevent_filter(struct kset *ks
61400 return 0;
61401 }
61402
61403-static struct kset_uevent_ops module_uevent_ops = {
61404+static const struct kset_uevent_ops module_uevent_ops = {
61405 .filter = uevent_filter,
61406 };
61407
61408diff -urNp linux-2.6.32.42/kernel/perf_event.c linux-2.6.32.42/kernel/perf_event.c
61409--- linux-2.6.32.42/kernel/perf_event.c 2011-04-17 17:00:52.000000000 -0400
61410+++ linux-2.6.32.42/kernel/perf_event.c 2011-05-04 17:56:28.000000000 -0400
61411@@ -77,7 +77,7 @@ int sysctl_perf_event_mlock __read_mostl
61412 */
61413 int sysctl_perf_event_sample_rate __read_mostly = 100000;
61414
61415-static atomic64_t perf_event_id;
61416+static atomic64_unchecked_t perf_event_id;
61417
61418 /*
61419 * Lock for (sysadmin-configurable) event reservations:
61420@@ -1094,9 +1094,9 @@ static void __perf_event_sync_stat(struc
61421 * In order to keep per-task stats reliable we need to flip the event
61422 * values when we flip the contexts.
61423 */
61424- value = atomic64_read(&next_event->count);
61425- value = atomic64_xchg(&event->count, value);
61426- atomic64_set(&next_event->count, value);
61427+ value = atomic64_read_unchecked(&next_event->count);
61428+ value = atomic64_xchg_unchecked(&event->count, value);
61429+ atomic64_set_unchecked(&next_event->count, value);
61430
61431 swap(event->total_time_enabled, next_event->total_time_enabled);
61432 swap(event->total_time_running, next_event->total_time_running);
61433@@ -1552,7 +1552,7 @@ static u64 perf_event_read(struct perf_e
61434 update_event_times(event);
61435 }
61436
61437- return atomic64_read(&event->count);
61438+ return atomic64_read_unchecked(&event->count);
61439 }
61440
61441 /*
61442@@ -1790,11 +1790,11 @@ static int perf_event_read_group(struct
61443 values[n++] = 1 + leader->nr_siblings;
61444 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
61445 values[n++] = leader->total_time_enabled +
61446- atomic64_read(&leader->child_total_time_enabled);
61447+ atomic64_read_unchecked(&leader->child_total_time_enabled);
61448 }
61449 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
61450 values[n++] = leader->total_time_running +
61451- atomic64_read(&leader->child_total_time_running);
61452+ atomic64_read_unchecked(&leader->child_total_time_running);
61453 }
61454
61455 size = n * sizeof(u64);
61456@@ -1829,11 +1829,11 @@ static int perf_event_read_one(struct pe
61457 values[n++] = perf_event_read_value(event);
61458 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
61459 values[n++] = event->total_time_enabled +
61460- atomic64_read(&event->child_total_time_enabled);
61461+ atomic64_read_unchecked(&event->child_total_time_enabled);
61462 }
61463 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
61464 values[n++] = event->total_time_running +
61465- atomic64_read(&event->child_total_time_running);
61466+ atomic64_read_unchecked(&event->child_total_time_running);
61467 }
61468 if (read_format & PERF_FORMAT_ID)
61469 values[n++] = primary_event_id(event);
61470@@ -1903,7 +1903,7 @@ static unsigned int perf_poll(struct fil
61471 static void perf_event_reset(struct perf_event *event)
61472 {
61473 (void)perf_event_read(event);
61474- atomic64_set(&event->count, 0);
61475+ atomic64_set_unchecked(&event->count, 0);
61476 perf_event_update_userpage(event);
61477 }
61478
61479@@ -2079,15 +2079,15 @@ void perf_event_update_userpage(struct p
61480 ++userpg->lock;
61481 barrier();
61482 userpg->index = perf_event_index(event);
61483- userpg->offset = atomic64_read(&event->count);
61484+ userpg->offset = atomic64_read_unchecked(&event->count);
61485 if (event->state == PERF_EVENT_STATE_ACTIVE)
61486- userpg->offset -= atomic64_read(&event->hw.prev_count);
61487+ userpg->offset -= atomic64_read_unchecked(&event->hw.prev_count);
61488
61489 userpg->time_enabled = event->total_time_enabled +
61490- atomic64_read(&event->child_total_time_enabled);
61491+ atomic64_read_unchecked(&event->child_total_time_enabled);
61492
61493 userpg->time_running = event->total_time_running +
61494- atomic64_read(&event->child_total_time_running);
61495+ atomic64_read_unchecked(&event->child_total_time_running);
61496
61497 barrier();
61498 ++userpg->lock;
61499@@ -2903,14 +2903,14 @@ static void perf_output_read_one(struct
61500 u64 values[4];
61501 int n = 0;
61502
61503- values[n++] = atomic64_read(&event->count);
61504+ values[n++] = atomic64_read_unchecked(&event->count);
61505 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
61506 values[n++] = event->total_time_enabled +
61507- atomic64_read(&event->child_total_time_enabled);
61508+ atomic64_read_unchecked(&event->child_total_time_enabled);
61509 }
61510 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
61511 values[n++] = event->total_time_running +
61512- atomic64_read(&event->child_total_time_running);
61513+ atomic64_read_unchecked(&event->child_total_time_running);
61514 }
61515 if (read_format & PERF_FORMAT_ID)
61516 values[n++] = primary_event_id(event);
61517@@ -2940,7 +2940,7 @@ static void perf_output_read_group(struc
61518 if (leader != event)
61519 leader->pmu->read(leader);
61520
61521- values[n++] = atomic64_read(&leader->count);
61522+ values[n++] = atomic64_read_unchecked(&leader->count);
61523 if (read_format & PERF_FORMAT_ID)
61524 values[n++] = primary_event_id(leader);
61525
61526@@ -2952,7 +2952,7 @@ static void perf_output_read_group(struc
61527 if (sub != event)
61528 sub->pmu->read(sub);
61529
61530- values[n++] = atomic64_read(&sub->count);
61531+ values[n++] = atomic64_read_unchecked(&sub->count);
61532 if (read_format & PERF_FORMAT_ID)
61533 values[n++] = primary_event_id(sub);
61534
61535@@ -3787,7 +3787,7 @@ static void perf_swevent_add(struct perf
61536 {
61537 struct hw_perf_event *hwc = &event->hw;
61538
61539- atomic64_add(nr, &event->count);
61540+ atomic64_add_unchecked(nr, &event->count);
61541
61542 if (!hwc->sample_period)
61543 return;
61544@@ -4044,9 +4044,9 @@ static void cpu_clock_perf_event_update(
61545 u64 now;
61546
61547 now = cpu_clock(cpu);
61548- prev = atomic64_read(&event->hw.prev_count);
61549- atomic64_set(&event->hw.prev_count, now);
61550- atomic64_add(now - prev, &event->count);
61551+ prev = atomic64_read_unchecked(&event->hw.prev_count);
61552+ atomic64_set_unchecked(&event->hw.prev_count, now);
61553+ atomic64_add_unchecked(now - prev, &event->count);
61554 }
61555
61556 static int cpu_clock_perf_event_enable(struct perf_event *event)
61557@@ -4054,7 +4054,7 @@ static int cpu_clock_perf_event_enable(s
61558 struct hw_perf_event *hwc = &event->hw;
61559 int cpu = raw_smp_processor_id();
61560
61561- atomic64_set(&hwc->prev_count, cpu_clock(cpu));
61562+ atomic64_set_unchecked(&hwc->prev_count, cpu_clock(cpu));
61563 perf_swevent_start_hrtimer(event);
61564
61565 return 0;
61566@@ -4086,9 +4086,9 @@ static void task_clock_perf_event_update
61567 u64 prev;
61568 s64 delta;
61569
61570- prev = atomic64_xchg(&event->hw.prev_count, now);
61571+ prev = atomic64_xchg_unchecked(&event->hw.prev_count, now);
61572 delta = now - prev;
61573- atomic64_add(delta, &event->count);
61574+ atomic64_add_unchecked(delta, &event->count);
61575 }
61576
61577 static int task_clock_perf_event_enable(struct perf_event *event)
61578@@ -4098,7 +4098,7 @@ static int task_clock_perf_event_enable(
61579
61580 now = event->ctx->time;
61581
61582- atomic64_set(&hwc->prev_count, now);
61583+ atomic64_set_unchecked(&hwc->prev_count, now);
61584
61585 perf_swevent_start_hrtimer(event);
61586
61587@@ -4293,7 +4293,7 @@ perf_event_alloc(struct perf_event_attr
61588 event->parent = parent_event;
61589
61590 event->ns = get_pid_ns(current->nsproxy->pid_ns);
61591- event->id = atomic64_inc_return(&perf_event_id);
61592+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
61593
61594 event->state = PERF_EVENT_STATE_INACTIVE;
61595
61596@@ -4724,15 +4724,15 @@ static void sync_child_event(struct perf
61597 if (child_event->attr.inherit_stat)
61598 perf_event_read_event(child_event, child);
61599
61600- child_val = atomic64_read(&child_event->count);
61601+ child_val = atomic64_read_unchecked(&child_event->count);
61602
61603 /*
61604 * Add back the child's count to the parent's count:
61605 */
61606- atomic64_add(child_val, &parent_event->count);
61607- atomic64_add(child_event->total_time_enabled,
61608+ atomic64_add_unchecked(child_val, &parent_event->count);
61609+ atomic64_add_unchecked(child_event->total_time_enabled,
61610 &parent_event->child_total_time_enabled);
61611- atomic64_add(child_event->total_time_running,
61612+ atomic64_add_unchecked(child_event->total_time_running,
61613 &parent_event->child_total_time_running);
61614
61615 /*
61616diff -urNp linux-2.6.32.42/kernel/pid.c linux-2.6.32.42/kernel/pid.c
61617--- linux-2.6.32.42/kernel/pid.c 2011-04-22 19:16:29.000000000 -0400
61618+++ linux-2.6.32.42/kernel/pid.c 2011-04-18 19:22:38.000000000 -0400
61619@@ -33,6 +33,7 @@
61620 #include <linux/rculist.h>
61621 #include <linux/bootmem.h>
61622 #include <linux/hash.h>
61623+#include <linux/security.h>
61624 #include <linux/pid_namespace.h>
61625 #include <linux/init_task.h>
61626 #include <linux/syscalls.h>
61627@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
61628
61629 int pid_max = PID_MAX_DEFAULT;
61630
61631-#define RESERVED_PIDS 300
61632+#define RESERVED_PIDS 500
61633
61634 int pid_max_min = RESERVED_PIDS + 1;
61635 int pid_max_max = PID_MAX_LIMIT;
61636@@ -383,7 +384,14 @@ EXPORT_SYMBOL(pid_task);
61637 */
61638 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
61639 {
61640- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
61641+ struct task_struct *task;
61642+
61643+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
61644+
61645+ if (gr_pid_is_chrooted(task))
61646+ return NULL;
61647+
61648+ return task;
61649 }
61650
61651 struct task_struct *find_task_by_vpid(pid_t vnr)
61652diff -urNp linux-2.6.32.42/kernel/posix-cpu-timers.c linux-2.6.32.42/kernel/posix-cpu-timers.c
61653--- linux-2.6.32.42/kernel/posix-cpu-timers.c 2011-03-27 14:31:47.000000000 -0400
61654+++ linux-2.6.32.42/kernel/posix-cpu-timers.c 2011-04-17 15:56:46.000000000 -0400
61655@@ -6,6 +6,7 @@
61656 #include <linux/posix-timers.h>
61657 #include <linux/errno.h>
61658 #include <linux/math64.h>
61659+#include <linux/security.h>
61660 #include <asm/uaccess.h>
61661 #include <linux/kernel_stat.h>
61662 #include <trace/events/timer.h>
61663diff -urNp linux-2.6.32.42/kernel/posix-timers.c linux-2.6.32.42/kernel/posix-timers.c
61664--- linux-2.6.32.42/kernel/posix-timers.c 2011-03-27 14:31:47.000000000 -0400
61665+++ linux-2.6.32.42/kernel/posix-timers.c 2011-05-16 21:46:57.000000000 -0400
61666@@ -42,6 +42,7 @@
61667 #include <linux/compiler.h>
61668 #include <linux/idr.h>
61669 #include <linux/posix-timers.h>
61670+#include <linux/grsecurity.h>
61671 #include <linux/syscalls.h>
61672 #include <linux/wait.h>
61673 #include <linux/workqueue.h>
61674@@ -296,6 +297,8 @@ static __init int init_posix_timers(void
61675 .nsleep = no_nsleep,
61676 };
61677
61678+ pax_track_stack();
61679+
61680 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
61681 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
61682 register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
61683@@ -948,6 +951,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
61684 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
61685 return -EFAULT;
61686
61687+ /* only the CLOCK_REALTIME clock can be set, all other clocks
61688+ have their clock_set fptr set to a nosettime dummy function
61689+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
61690+ call common_clock_set, which calls do_sys_settimeofday, which
61691+ we hook
61692+ */
61693+
61694 return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
61695 }
61696
61697diff -urNp linux-2.6.32.42/kernel/power/hibernate.c linux-2.6.32.42/kernel/power/hibernate.c
61698--- linux-2.6.32.42/kernel/power/hibernate.c 2011-03-27 14:31:47.000000000 -0400
61699+++ linux-2.6.32.42/kernel/power/hibernate.c 2011-04-17 15:56:46.000000000 -0400
61700@@ -48,14 +48,14 @@ enum {
61701
61702 static int hibernation_mode = HIBERNATION_SHUTDOWN;
61703
61704-static struct platform_hibernation_ops *hibernation_ops;
61705+static const struct platform_hibernation_ops *hibernation_ops;
61706
61707 /**
61708 * hibernation_set_ops - set the global hibernate operations
61709 * @ops: the hibernation operations to use in subsequent hibernation transitions
61710 */
61711
61712-void hibernation_set_ops(struct platform_hibernation_ops *ops)
61713+void hibernation_set_ops(const struct platform_hibernation_ops *ops)
61714 {
61715 if (ops && !(ops->begin && ops->end && ops->pre_snapshot
61716 && ops->prepare && ops->finish && ops->enter && ops->pre_restore
61717diff -urNp linux-2.6.32.42/kernel/power/poweroff.c linux-2.6.32.42/kernel/power/poweroff.c
61718--- linux-2.6.32.42/kernel/power/poweroff.c 2011-03-27 14:31:47.000000000 -0400
61719+++ linux-2.6.32.42/kernel/power/poweroff.c 2011-04-17 15:56:46.000000000 -0400
61720@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
61721 .enable_mask = SYSRQ_ENABLE_BOOT,
61722 };
61723
61724-static int pm_sysrq_init(void)
61725+static int __init pm_sysrq_init(void)
61726 {
61727 register_sysrq_key('o', &sysrq_poweroff_op);
61728 return 0;
61729diff -urNp linux-2.6.32.42/kernel/power/process.c linux-2.6.32.42/kernel/power/process.c
61730--- linux-2.6.32.42/kernel/power/process.c 2011-03-27 14:31:47.000000000 -0400
61731+++ linux-2.6.32.42/kernel/power/process.c 2011-04-17 15:56:46.000000000 -0400
61732@@ -37,12 +37,15 @@ static int try_to_freeze_tasks(bool sig_
61733 struct timeval start, end;
61734 u64 elapsed_csecs64;
61735 unsigned int elapsed_csecs;
61736+ bool timedout = false;
61737
61738 do_gettimeofday(&start);
61739
61740 end_time = jiffies + TIMEOUT;
61741 do {
61742 todo = 0;
61743+ if (time_after(jiffies, end_time))
61744+ timedout = true;
61745 read_lock(&tasklist_lock);
61746 do_each_thread(g, p) {
61747 if (frozen(p) || !freezeable(p))
61748@@ -57,15 +60,17 @@ static int try_to_freeze_tasks(bool sig_
61749 * It is "frozen enough". If the task does wake
61750 * up, it will immediately call try_to_freeze.
61751 */
61752- if (!task_is_stopped_or_traced(p) &&
61753- !freezer_should_skip(p))
61754+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
61755 todo++;
61756+ if (timedout) {
61757+ printk(KERN_ERR "Task refusing to freeze:\n");
61758+ sched_show_task(p);
61759+ }
61760+ }
61761 } while_each_thread(g, p);
61762 read_unlock(&tasklist_lock);
61763 yield(); /* Yield is okay here */
61764- if (time_after(jiffies, end_time))
61765- break;
61766- } while (todo);
61767+ } while (todo && !timedout);
61768
61769 do_gettimeofday(&end);
61770 elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
61771diff -urNp linux-2.6.32.42/kernel/power/suspend.c linux-2.6.32.42/kernel/power/suspend.c
61772--- linux-2.6.32.42/kernel/power/suspend.c 2011-03-27 14:31:47.000000000 -0400
61773+++ linux-2.6.32.42/kernel/power/suspend.c 2011-04-17 15:56:46.000000000 -0400
61774@@ -23,13 +23,13 @@ const char *const pm_states[PM_SUSPEND_M
61775 [PM_SUSPEND_MEM] = "mem",
61776 };
61777
61778-static struct platform_suspend_ops *suspend_ops;
61779+static const struct platform_suspend_ops *suspend_ops;
61780
61781 /**
61782 * suspend_set_ops - Set the global suspend method table.
61783 * @ops: Pointer to ops structure.
61784 */
61785-void suspend_set_ops(struct platform_suspend_ops *ops)
61786+void suspend_set_ops(const struct platform_suspend_ops *ops)
61787 {
61788 mutex_lock(&pm_mutex);
61789 suspend_ops = ops;
61790diff -urNp linux-2.6.32.42/kernel/printk.c linux-2.6.32.42/kernel/printk.c
61791--- linux-2.6.32.42/kernel/printk.c 2011-03-27 14:31:47.000000000 -0400
61792+++ linux-2.6.32.42/kernel/printk.c 2011-04-17 15:56:46.000000000 -0400
61793@@ -278,6 +278,11 @@ int do_syslog(int type, char __user *buf
61794 char c;
61795 int error = 0;
61796
61797+#ifdef CONFIG_GRKERNSEC_DMESG
61798+ if (grsec_enable_dmesg && !capable(CAP_SYS_ADMIN))
61799+ return -EPERM;
61800+#endif
61801+
61802 error = security_syslog(type);
61803 if (error)
61804 return error;
61805diff -urNp linux-2.6.32.42/kernel/profile.c linux-2.6.32.42/kernel/profile.c
61806--- linux-2.6.32.42/kernel/profile.c 2011-03-27 14:31:47.000000000 -0400
61807+++ linux-2.6.32.42/kernel/profile.c 2011-05-04 17:56:28.000000000 -0400
61808@@ -39,7 +39,7 @@ struct profile_hit {
61809 /* Oprofile timer tick hook */
61810 static int (*timer_hook)(struct pt_regs *) __read_mostly;
61811
61812-static atomic_t *prof_buffer;
61813+static atomic_unchecked_t *prof_buffer;
61814 static unsigned long prof_len, prof_shift;
61815
61816 int prof_on __read_mostly;
61817@@ -283,7 +283,7 @@ static void profile_flip_buffers(void)
61818 hits[i].pc = 0;
61819 continue;
61820 }
61821- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
61822+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
61823 hits[i].hits = hits[i].pc = 0;
61824 }
61825 }
61826@@ -346,9 +346,9 @@ void profile_hits(int type, void *__pc,
61827 * Add the current hit(s) and flush the write-queue out
61828 * to the global buffer:
61829 */
61830- atomic_add(nr_hits, &prof_buffer[pc]);
61831+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
61832 for (i = 0; i < NR_PROFILE_HIT; ++i) {
61833- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
61834+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
61835 hits[i].pc = hits[i].hits = 0;
61836 }
61837 out:
61838@@ -426,7 +426,7 @@ void profile_hits(int type, void *__pc,
61839 if (prof_on != type || !prof_buffer)
61840 return;
61841 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
61842- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
61843+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
61844 }
61845 #endif /* !CONFIG_SMP */
61846 EXPORT_SYMBOL_GPL(profile_hits);
61847@@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
61848 return -EFAULT;
61849 buf++; p++; count--; read++;
61850 }
61851- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
61852+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
61853 if (copy_to_user(buf, (void *)pnt, count))
61854 return -EFAULT;
61855 read += count;
61856@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
61857 }
61858 #endif
61859 profile_discard_flip_buffers();
61860- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
61861+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
61862 return count;
61863 }
61864
61865diff -urNp linux-2.6.32.42/kernel/ptrace.c linux-2.6.32.42/kernel/ptrace.c
61866--- linux-2.6.32.42/kernel/ptrace.c 2011-03-27 14:31:47.000000000 -0400
61867+++ linux-2.6.32.42/kernel/ptrace.c 2011-05-22 23:02:06.000000000 -0400
61868@@ -117,7 +117,8 @@ int ptrace_check_attach(struct task_stru
61869 return ret;
61870 }
61871
61872-int __ptrace_may_access(struct task_struct *task, unsigned int mode)
61873+static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
61874+ unsigned int log)
61875 {
61876 const struct cred *cred = current_cred(), *tcred;
61877
61878@@ -141,7 +142,9 @@ int __ptrace_may_access(struct task_stru
61879 cred->gid != tcred->egid ||
61880 cred->gid != tcred->sgid ||
61881 cred->gid != tcred->gid) &&
61882- !capable(CAP_SYS_PTRACE)) {
61883+ ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
61884+ (log && !capable(CAP_SYS_PTRACE)))
61885+ ) {
61886 rcu_read_unlock();
61887 return -EPERM;
61888 }
61889@@ -149,7 +152,9 @@ int __ptrace_may_access(struct task_stru
61890 smp_rmb();
61891 if (task->mm)
61892 dumpable = get_dumpable(task->mm);
61893- if (!dumpable && !capable(CAP_SYS_PTRACE))
61894+ if (!dumpable &&
61895+ ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
61896+ (log && !capable(CAP_SYS_PTRACE))))
61897 return -EPERM;
61898
61899 return security_ptrace_access_check(task, mode);
61900@@ -159,7 +164,16 @@ bool ptrace_may_access(struct task_struc
61901 {
61902 int err;
61903 task_lock(task);
61904- err = __ptrace_may_access(task, mode);
61905+ err = __ptrace_may_access(task, mode, 0);
61906+ task_unlock(task);
61907+ return !err;
61908+}
61909+
61910+bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
61911+{
61912+ int err;
61913+ task_lock(task);
61914+ err = __ptrace_may_access(task, mode, 1);
61915 task_unlock(task);
61916 return !err;
61917 }
61918@@ -186,7 +200,7 @@ int ptrace_attach(struct task_struct *ta
61919 goto out;
61920
61921 task_lock(task);
61922- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
61923+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
61924 task_unlock(task);
61925 if (retval)
61926 goto unlock_creds;
61927@@ -199,7 +213,7 @@ int ptrace_attach(struct task_struct *ta
61928 goto unlock_tasklist;
61929
61930 task->ptrace = PT_PTRACED;
61931- if (capable(CAP_SYS_PTRACE))
61932+ if (capable_nolog(CAP_SYS_PTRACE))
61933 task->ptrace |= PT_PTRACE_CAP;
61934
61935 __ptrace_link(task, current);
61936@@ -351,6 +365,8 @@ int ptrace_readdata(struct task_struct *
61937 {
61938 int copied = 0;
61939
61940+ pax_track_stack();
61941+
61942 while (len > 0) {
61943 char buf[128];
61944 int this_len, retval;
61945@@ -376,6 +392,8 @@ int ptrace_writedata(struct task_struct
61946 {
61947 int copied = 0;
61948
61949+ pax_track_stack();
61950+
61951 while (len > 0) {
61952 char buf[128];
61953 int this_len, retval;
61954@@ -517,6 +535,8 @@ int ptrace_request(struct task_struct *c
61955 int ret = -EIO;
61956 siginfo_t siginfo;
61957
61958+ pax_track_stack();
61959+
61960 switch (request) {
61961 case PTRACE_PEEKTEXT:
61962 case PTRACE_PEEKDATA:
61963@@ -532,18 +552,18 @@ int ptrace_request(struct task_struct *c
61964 ret = ptrace_setoptions(child, data);
61965 break;
61966 case PTRACE_GETEVENTMSG:
61967- ret = put_user(child->ptrace_message, (unsigned long __user *) data);
61968+ ret = put_user(child->ptrace_message, (__force unsigned long __user *) data);
61969 break;
61970
61971 case PTRACE_GETSIGINFO:
61972 ret = ptrace_getsiginfo(child, &siginfo);
61973 if (!ret)
61974- ret = copy_siginfo_to_user((siginfo_t __user *) data,
61975+ ret = copy_siginfo_to_user((__force siginfo_t __user *) data,
61976 &siginfo);
61977 break;
61978
61979 case PTRACE_SETSIGINFO:
61980- if (copy_from_user(&siginfo, (siginfo_t __user *) data,
61981+ if (copy_from_user(&siginfo, (__force siginfo_t __user *) data,
61982 sizeof siginfo))
61983 ret = -EFAULT;
61984 else
61985@@ -621,14 +641,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
61986 goto out;
61987 }
61988
61989+ if (gr_handle_ptrace(child, request)) {
61990+ ret = -EPERM;
61991+ goto out_put_task_struct;
61992+ }
61993+
61994 if (request == PTRACE_ATTACH) {
61995 ret = ptrace_attach(child);
61996 /*
61997 * Some architectures need to do book-keeping after
61998 * a ptrace attach.
61999 */
62000- if (!ret)
62001+ if (!ret) {
62002 arch_ptrace_attach(child);
62003+ gr_audit_ptrace(child);
62004+ }
62005 goto out_put_task_struct;
62006 }
62007
62008@@ -653,7 +680,7 @@ int generic_ptrace_peekdata(struct task_
62009 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
62010 if (copied != sizeof(tmp))
62011 return -EIO;
62012- return put_user(tmp, (unsigned long __user *)data);
62013+ return put_user(tmp, (__force unsigned long __user *)data);
62014 }
62015
62016 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
62017@@ -675,6 +702,8 @@ int compat_ptrace_request(struct task_st
62018 siginfo_t siginfo;
62019 int ret;
62020
62021+ pax_track_stack();
62022+
62023 switch (request) {
62024 case PTRACE_PEEKTEXT:
62025 case PTRACE_PEEKDATA:
62026@@ -740,14 +769,21 @@ asmlinkage long compat_sys_ptrace(compat
62027 goto out;
62028 }
62029
62030+ if (gr_handle_ptrace(child, request)) {
62031+ ret = -EPERM;
62032+ goto out_put_task_struct;
62033+ }
62034+
62035 if (request == PTRACE_ATTACH) {
62036 ret = ptrace_attach(child);
62037 /*
62038 * Some architectures need to do book-keeping after
62039 * a ptrace attach.
62040 */
62041- if (!ret)
62042+ if (!ret) {
62043 arch_ptrace_attach(child);
62044+ gr_audit_ptrace(child);
62045+ }
62046 goto out_put_task_struct;
62047 }
62048
62049diff -urNp linux-2.6.32.42/kernel/rcutorture.c linux-2.6.32.42/kernel/rcutorture.c
62050--- linux-2.6.32.42/kernel/rcutorture.c 2011-03-27 14:31:47.000000000 -0400
62051+++ linux-2.6.32.42/kernel/rcutorture.c 2011-05-04 17:56:28.000000000 -0400
62052@@ -118,12 +118,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
62053 { 0 };
62054 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
62055 { 0 };
62056-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
62057-static atomic_t n_rcu_torture_alloc;
62058-static atomic_t n_rcu_torture_alloc_fail;
62059-static atomic_t n_rcu_torture_free;
62060-static atomic_t n_rcu_torture_mberror;
62061-static atomic_t n_rcu_torture_error;
62062+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
62063+static atomic_unchecked_t n_rcu_torture_alloc;
62064+static atomic_unchecked_t n_rcu_torture_alloc_fail;
62065+static atomic_unchecked_t n_rcu_torture_free;
62066+static atomic_unchecked_t n_rcu_torture_mberror;
62067+static atomic_unchecked_t n_rcu_torture_error;
62068 static long n_rcu_torture_timers;
62069 static struct list_head rcu_torture_removed;
62070 static cpumask_var_t shuffle_tmp_mask;
62071@@ -187,11 +187,11 @@ rcu_torture_alloc(void)
62072
62073 spin_lock_bh(&rcu_torture_lock);
62074 if (list_empty(&rcu_torture_freelist)) {
62075- atomic_inc(&n_rcu_torture_alloc_fail);
62076+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
62077 spin_unlock_bh(&rcu_torture_lock);
62078 return NULL;
62079 }
62080- atomic_inc(&n_rcu_torture_alloc);
62081+ atomic_inc_unchecked(&n_rcu_torture_alloc);
62082 p = rcu_torture_freelist.next;
62083 list_del_init(p);
62084 spin_unlock_bh(&rcu_torture_lock);
62085@@ -204,7 +204,7 @@ rcu_torture_alloc(void)
62086 static void
62087 rcu_torture_free(struct rcu_torture *p)
62088 {
62089- atomic_inc(&n_rcu_torture_free);
62090+ atomic_inc_unchecked(&n_rcu_torture_free);
62091 spin_lock_bh(&rcu_torture_lock);
62092 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
62093 spin_unlock_bh(&rcu_torture_lock);
62094@@ -319,7 +319,7 @@ rcu_torture_cb(struct rcu_head *p)
62095 i = rp->rtort_pipe_count;
62096 if (i > RCU_TORTURE_PIPE_LEN)
62097 i = RCU_TORTURE_PIPE_LEN;
62098- atomic_inc(&rcu_torture_wcount[i]);
62099+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
62100 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
62101 rp->rtort_mbtest = 0;
62102 rcu_torture_free(rp);
62103@@ -359,7 +359,7 @@ static void rcu_sync_torture_deferred_fr
62104 i = rp->rtort_pipe_count;
62105 if (i > RCU_TORTURE_PIPE_LEN)
62106 i = RCU_TORTURE_PIPE_LEN;
62107- atomic_inc(&rcu_torture_wcount[i]);
62108+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
62109 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
62110 rp->rtort_mbtest = 0;
62111 list_del(&rp->rtort_free);
62112@@ -653,7 +653,7 @@ rcu_torture_writer(void *arg)
62113 i = old_rp->rtort_pipe_count;
62114 if (i > RCU_TORTURE_PIPE_LEN)
62115 i = RCU_TORTURE_PIPE_LEN;
62116- atomic_inc(&rcu_torture_wcount[i]);
62117+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
62118 old_rp->rtort_pipe_count++;
62119 cur_ops->deferred_free(old_rp);
62120 }
62121@@ -718,7 +718,7 @@ static void rcu_torture_timer(unsigned l
62122 return;
62123 }
62124 if (p->rtort_mbtest == 0)
62125- atomic_inc(&n_rcu_torture_mberror);
62126+ atomic_inc_unchecked(&n_rcu_torture_mberror);
62127 spin_lock(&rand_lock);
62128 cur_ops->read_delay(&rand);
62129 n_rcu_torture_timers++;
62130@@ -776,7 +776,7 @@ rcu_torture_reader(void *arg)
62131 continue;
62132 }
62133 if (p->rtort_mbtest == 0)
62134- atomic_inc(&n_rcu_torture_mberror);
62135+ atomic_inc_unchecked(&n_rcu_torture_mberror);
62136 cur_ops->read_delay(&rand);
62137 preempt_disable();
62138 pipe_count = p->rtort_pipe_count;
62139@@ -834,17 +834,17 @@ rcu_torture_printk(char *page)
62140 rcu_torture_current,
62141 rcu_torture_current_version,
62142 list_empty(&rcu_torture_freelist),
62143- atomic_read(&n_rcu_torture_alloc),
62144- atomic_read(&n_rcu_torture_alloc_fail),
62145- atomic_read(&n_rcu_torture_free),
62146- atomic_read(&n_rcu_torture_mberror),
62147+ atomic_read_unchecked(&n_rcu_torture_alloc),
62148+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
62149+ atomic_read_unchecked(&n_rcu_torture_free),
62150+ atomic_read_unchecked(&n_rcu_torture_mberror),
62151 n_rcu_torture_timers);
62152- if (atomic_read(&n_rcu_torture_mberror) != 0)
62153+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0)
62154 cnt += sprintf(&page[cnt], " !!!");
62155 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
62156 if (i > 1) {
62157 cnt += sprintf(&page[cnt], "!!! ");
62158- atomic_inc(&n_rcu_torture_error);
62159+ atomic_inc_unchecked(&n_rcu_torture_error);
62160 WARN_ON_ONCE(1);
62161 }
62162 cnt += sprintf(&page[cnt], "Reader Pipe: ");
62163@@ -858,7 +858,7 @@ rcu_torture_printk(char *page)
62164 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
62165 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
62166 cnt += sprintf(&page[cnt], " %d",
62167- atomic_read(&rcu_torture_wcount[i]));
62168+ atomic_read_unchecked(&rcu_torture_wcount[i]));
62169 }
62170 cnt += sprintf(&page[cnt], "\n");
62171 if (cur_ops->stats)
62172@@ -1084,7 +1084,7 @@ rcu_torture_cleanup(void)
62173
62174 if (cur_ops->cleanup)
62175 cur_ops->cleanup();
62176- if (atomic_read(&n_rcu_torture_error))
62177+ if (atomic_read_unchecked(&n_rcu_torture_error))
62178 rcu_torture_print_module_parms("End of test: FAILURE");
62179 else
62180 rcu_torture_print_module_parms("End of test: SUCCESS");
62181@@ -1138,13 +1138,13 @@ rcu_torture_init(void)
62182
62183 rcu_torture_current = NULL;
62184 rcu_torture_current_version = 0;
62185- atomic_set(&n_rcu_torture_alloc, 0);
62186- atomic_set(&n_rcu_torture_alloc_fail, 0);
62187- atomic_set(&n_rcu_torture_free, 0);
62188- atomic_set(&n_rcu_torture_mberror, 0);
62189- atomic_set(&n_rcu_torture_error, 0);
62190+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
62191+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
62192+ atomic_set_unchecked(&n_rcu_torture_free, 0);
62193+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
62194+ atomic_set_unchecked(&n_rcu_torture_error, 0);
62195 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
62196- atomic_set(&rcu_torture_wcount[i], 0);
62197+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
62198 for_each_possible_cpu(cpu) {
62199 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
62200 per_cpu(rcu_torture_count, cpu)[i] = 0;
62201diff -urNp linux-2.6.32.42/kernel/rcutree.c linux-2.6.32.42/kernel/rcutree.c
62202--- linux-2.6.32.42/kernel/rcutree.c 2011-03-27 14:31:47.000000000 -0400
62203+++ linux-2.6.32.42/kernel/rcutree.c 2011-04-17 15:56:46.000000000 -0400
62204@@ -1303,7 +1303,7 @@ __rcu_process_callbacks(struct rcu_state
62205 /*
62206 * Do softirq processing for the current CPU.
62207 */
62208-static void rcu_process_callbacks(struct softirq_action *unused)
62209+static void rcu_process_callbacks(void)
62210 {
62211 /*
62212 * Memory references from any prior RCU read-side critical sections
62213diff -urNp linux-2.6.32.42/kernel/rcutree_plugin.h linux-2.6.32.42/kernel/rcutree_plugin.h
62214--- linux-2.6.32.42/kernel/rcutree_plugin.h 2011-03-27 14:31:47.000000000 -0400
62215+++ linux-2.6.32.42/kernel/rcutree_plugin.h 2011-04-17 15:56:46.000000000 -0400
62216@@ -145,7 +145,7 @@ static void rcu_preempt_note_context_swi
62217 */
62218 void __rcu_read_lock(void)
62219 {
62220- ACCESS_ONCE(current->rcu_read_lock_nesting)++;
62221+ ACCESS_ONCE_RW(current->rcu_read_lock_nesting)++;
62222 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
62223 }
62224 EXPORT_SYMBOL_GPL(__rcu_read_lock);
62225@@ -251,7 +251,7 @@ void __rcu_read_unlock(void)
62226 struct task_struct *t = current;
62227
62228 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
62229- if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
62230+ if (--ACCESS_ONCE_RW(t->rcu_read_lock_nesting) == 0 &&
62231 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
62232 rcu_read_unlock_special(t);
62233 }
62234diff -urNp linux-2.6.32.42/kernel/relay.c linux-2.6.32.42/kernel/relay.c
62235--- linux-2.6.32.42/kernel/relay.c 2011-03-27 14:31:47.000000000 -0400
62236+++ linux-2.6.32.42/kernel/relay.c 2011-05-16 21:46:57.000000000 -0400
62237@@ -1222,7 +1222,7 @@ static int subbuf_splice_actor(struct fi
62238 unsigned int flags,
62239 int *nonpad_ret)
62240 {
62241- unsigned int pidx, poff, total_len, subbuf_pages, nr_pages, ret;
62242+ unsigned int pidx, poff, total_len, subbuf_pages, nr_pages;
62243 struct rchan_buf *rbuf = in->private_data;
62244 unsigned int subbuf_size = rbuf->chan->subbuf_size;
62245 uint64_t pos = (uint64_t) *ppos;
62246@@ -1241,6 +1241,9 @@ static int subbuf_splice_actor(struct fi
62247 .ops = &relay_pipe_buf_ops,
62248 .spd_release = relay_page_release,
62249 };
62250+ ssize_t ret;
62251+
62252+ pax_track_stack();
62253
62254 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
62255 return 0;
62256diff -urNp linux-2.6.32.42/kernel/resource.c linux-2.6.32.42/kernel/resource.c
62257--- linux-2.6.32.42/kernel/resource.c 2011-03-27 14:31:47.000000000 -0400
62258+++ linux-2.6.32.42/kernel/resource.c 2011-04-17 15:56:46.000000000 -0400
62259@@ -132,8 +132,18 @@ static const struct file_operations proc
62260
62261 static int __init ioresources_init(void)
62262 {
62263+#ifdef CONFIG_GRKERNSEC_PROC_ADD
62264+#ifdef CONFIG_GRKERNSEC_PROC_USER
62265+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
62266+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
62267+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62268+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
62269+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
62270+#endif
62271+#else
62272 proc_create("ioports", 0, NULL, &proc_ioports_operations);
62273 proc_create("iomem", 0, NULL, &proc_iomem_operations);
62274+#endif
62275 return 0;
62276 }
62277 __initcall(ioresources_init);
62278diff -urNp linux-2.6.32.42/kernel/rtmutex.c linux-2.6.32.42/kernel/rtmutex.c
62279--- linux-2.6.32.42/kernel/rtmutex.c 2011-03-27 14:31:47.000000000 -0400
62280+++ linux-2.6.32.42/kernel/rtmutex.c 2011-04-17 15:56:46.000000000 -0400
62281@@ -511,7 +511,7 @@ static void wakeup_next_waiter(struct rt
62282 */
62283 spin_lock_irqsave(&pendowner->pi_lock, flags);
62284
62285- WARN_ON(!pendowner->pi_blocked_on);
62286+ BUG_ON(!pendowner->pi_blocked_on);
62287 WARN_ON(pendowner->pi_blocked_on != waiter);
62288 WARN_ON(pendowner->pi_blocked_on->lock != lock);
62289
62290diff -urNp linux-2.6.32.42/kernel/rtmutex-tester.c linux-2.6.32.42/kernel/rtmutex-tester.c
62291--- linux-2.6.32.42/kernel/rtmutex-tester.c 2011-03-27 14:31:47.000000000 -0400
62292+++ linux-2.6.32.42/kernel/rtmutex-tester.c 2011-05-04 17:56:28.000000000 -0400
62293@@ -21,7 +21,7 @@
62294 #define MAX_RT_TEST_MUTEXES 8
62295
62296 static spinlock_t rttest_lock;
62297-static atomic_t rttest_event;
62298+static atomic_unchecked_t rttest_event;
62299
62300 struct test_thread_data {
62301 int opcode;
62302@@ -64,7 +64,7 @@ static int handle_op(struct test_thread_
62303
62304 case RTTEST_LOCKCONT:
62305 td->mutexes[td->opdata] = 1;
62306- td->event = atomic_add_return(1, &rttest_event);
62307+ td->event = atomic_add_return_unchecked(1, &rttest_event);
62308 return 0;
62309
62310 case RTTEST_RESET:
62311@@ -82,7 +82,7 @@ static int handle_op(struct test_thread_
62312 return 0;
62313
62314 case RTTEST_RESETEVENT:
62315- atomic_set(&rttest_event, 0);
62316+ atomic_set_unchecked(&rttest_event, 0);
62317 return 0;
62318
62319 default:
62320@@ -99,9 +99,9 @@ static int handle_op(struct test_thread_
62321 return ret;
62322
62323 td->mutexes[id] = 1;
62324- td->event = atomic_add_return(1, &rttest_event);
62325+ td->event = atomic_add_return_unchecked(1, &rttest_event);
62326 rt_mutex_lock(&mutexes[id]);
62327- td->event = atomic_add_return(1, &rttest_event);
62328+ td->event = atomic_add_return_unchecked(1, &rttest_event);
62329 td->mutexes[id] = 4;
62330 return 0;
62331
62332@@ -112,9 +112,9 @@ static int handle_op(struct test_thread_
62333 return ret;
62334
62335 td->mutexes[id] = 1;
62336- td->event = atomic_add_return(1, &rttest_event);
62337+ td->event = atomic_add_return_unchecked(1, &rttest_event);
62338 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
62339- td->event = atomic_add_return(1, &rttest_event);
62340+ td->event = atomic_add_return_unchecked(1, &rttest_event);
62341 td->mutexes[id] = ret ? 0 : 4;
62342 return ret ? -EINTR : 0;
62343
62344@@ -123,9 +123,9 @@ static int handle_op(struct test_thread_
62345 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
62346 return ret;
62347
62348- td->event = atomic_add_return(1, &rttest_event);
62349+ td->event = atomic_add_return_unchecked(1, &rttest_event);
62350 rt_mutex_unlock(&mutexes[id]);
62351- td->event = atomic_add_return(1, &rttest_event);
62352+ td->event = atomic_add_return_unchecked(1, &rttest_event);
62353 td->mutexes[id] = 0;
62354 return 0;
62355
62356@@ -187,7 +187,7 @@ void schedule_rt_mutex_test(struct rt_mu
62357 break;
62358
62359 td->mutexes[dat] = 2;
62360- td->event = atomic_add_return(1, &rttest_event);
62361+ td->event = atomic_add_return_unchecked(1, &rttest_event);
62362 break;
62363
62364 case RTTEST_LOCKBKL:
62365@@ -208,7 +208,7 @@ void schedule_rt_mutex_test(struct rt_mu
62366 return;
62367
62368 td->mutexes[dat] = 3;
62369- td->event = atomic_add_return(1, &rttest_event);
62370+ td->event = atomic_add_return_unchecked(1, &rttest_event);
62371 break;
62372
62373 case RTTEST_LOCKNOWAIT:
62374@@ -220,7 +220,7 @@ void schedule_rt_mutex_test(struct rt_mu
62375 return;
62376
62377 td->mutexes[dat] = 1;
62378- td->event = atomic_add_return(1, &rttest_event);
62379+ td->event = atomic_add_return_unchecked(1, &rttest_event);
62380 return;
62381
62382 case RTTEST_LOCKBKL:
62383diff -urNp linux-2.6.32.42/kernel/sched.c linux-2.6.32.42/kernel/sched.c
62384--- linux-2.6.32.42/kernel/sched.c 2011-03-27 14:31:47.000000000 -0400
62385+++ linux-2.6.32.42/kernel/sched.c 2011-05-22 23:02:06.000000000 -0400
62386@@ -5043,7 +5043,7 @@ out:
62387 * In CONFIG_NO_HZ case, the idle load balance owner will do the
62388 * rebalancing for all the cpus for whom scheduler ticks are stopped.
62389 */
62390-static void run_rebalance_domains(struct softirq_action *h)
62391+static void run_rebalance_domains(void)
62392 {
62393 int this_cpu = smp_processor_id();
62394 struct rq *this_rq = cpu_rq(this_cpu);
62395@@ -5700,6 +5700,8 @@ asmlinkage void __sched schedule(void)
62396 struct rq *rq;
62397 int cpu;
62398
62399+ pax_track_stack();
62400+
62401 need_resched:
62402 preempt_disable();
62403 cpu = smp_processor_id();
62404@@ -5770,7 +5772,7 @@ EXPORT_SYMBOL(schedule);
62405 * Look out! "owner" is an entirely speculative pointer
62406 * access and not reliable.
62407 */
62408-int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
62409+int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
62410 {
62411 unsigned int cpu;
62412 struct rq *rq;
62413@@ -5784,10 +5786,10 @@ int mutex_spin_on_owner(struct mutex *lo
62414 * DEBUG_PAGEALLOC could have unmapped it if
62415 * the mutex owner just released it and exited.
62416 */
62417- if (probe_kernel_address(&owner->cpu, cpu))
62418+ if (probe_kernel_address(&task_thread_info(owner)->cpu, cpu))
62419 return 0;
62420 #else
62421- cpu = owner->cpu;
62422+ cpu = task_thread_info(owner)->cpu;
62423 #endif
62424
62425 /*
62426@@ -5816,7 +5818,7 @@ int mutex_spin_on_owner(struct mutex *lo
62427 /*
62428 * Is that owner really running on that cpu?
62429 */
62430- if (task_thread_info(rq->curr) != owner || need_resched())
62431+ if (rq->curr != owner || need_resched())
62432 return 0;
62433
62434 cpu_relax();
62435@@ -6359,6 +6361,8 @@ int can_nice(const struct task_struct *p
62436 /* convert nice value [19,-20] to rlimit style value [1,40] */
62437 int nice_rlim = 20 - nice;
62438
62439+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
62440+
62441 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
62442 capable(CAP_SYS_NICE));
62443 }
62444@@ -6392,7 +6396,8 @@ SYSCALL_DEFINE1(nice, int, increment)
62445 if (nice > 19)
62446 nice = 19;
62447
62448- if (increment < 0 && !can_nice(current, nice))
62449+ if (increment < 0 && (!can_nice(current, nice) ||
62450+ gr_handle_chroot_nice()))
62451 return -EPERM;
62452
62453 retval = security_task_setnice(current, nice);
62454@@ -8774,7 +8779,7 @@ static void init_sched_groups_power(int
62455 long power;
62456 int weight;
62457
62458- WARN_ON(!sd || !sd->groups);
62459+ BUG_ON(!sd || !sd->groups);
62460
62461 if (cpu != group_first_cpu(sd->groups))
62462 return;
62463diff -urNp linux-2.6.32.42/kernel/signal.c linux-2.6.32.42/kernel/signal.c
62464--- linux-2.6.32.42/kernel/signal.c 2011-04-17 17:00:52.000000000 -0400
62465+++ linux-2.6.32.42/kernel/signal.c 2011-05-22 23:02:06.000000000 -0400
62466@@ -41,12 +41,12 @@
62467
62468 static struct kmem_cache *sigqueue_cachep;
62469
62470-static void __user *sig_handler(struct task_struct *t, int sig)
62471+static __sighandler_t sig_handler(struct task_struct *t, int sig)
62472 {
62473 return t->sighand->action[sig - 1].sa.sa_handler;
62474 }
62475
62476-static int sig_handler_ignored(void __user *handler, int sig)
62477+static int sig_handler_ignored(__sighandler_t handler, int sig)
62478 {
62479 /* Is it explicitly or implicitly ignored? */
62480 return handler == SIG_IGN ||
62481@@ -56,7 +56,7 @@ static int sig_handler_ignored(void __us
62482 static int sig_task_ignored(struct task_struct *t, int sig,
62483 int from_ancestor_ns)
62484 {
62485- void __user *handler;
62486+ __sighandler_t handler;
62487
62488 handler = sig_handler(t, sig);
62489
62490@@ -207,6 +207,9 @@ static struct sigqueue *__sigqueue_alloc
62491 */
62492 user = get_uid(__task_cred(t)->user);
62493 atomic_inc(&user->sigpending);
62494+
62495+ if (!override_rlimit)
62496+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
62497 if (override_rlimit ||
62498 atomic_read(&user->sigpending) <=
62499 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
62500@@ -327,7 +330,7 @@ flush_signal_handlers(struct task_struct
62501
62502 int unhandled_signal(struct task_struct *tsk, int sig)
62503 {
62504- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
62505+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
62506 if (is_global_init(tsk))
62507 return 1;
62508 if (handler != SIG_IGN && handler != SIG_DFL)
62509@@ -627,6 +630,9 @@ static int check_kill_permission(int sig
62510 }
62511 }
62512
62513+ if (gr_handle_signal(t, sig))
62514+ return -EPERM;
62515+
62516 return security_task_kill(t, info, sig, 0);
62517 }
62518
62519@@ -968,7 +974,7 @@ __group_send_sig_info(int sig, struct si
62520 return send_signal(sig, info, p, 1);
62521 }
62522
62523-static int
62524+int
62525 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
62526 {
62527 return send_signal(sig, info, t, 0);
62528@@ -1005,6 +1011,7 @@ force_sig_info(int sig, struct siginfo *
62529 unsigned long int flags;
62530 int ret, blocked, ignored;
62531 struct k_sigaction *action;
62532+ int is_unhandled = 0;
62533
62534 spin_lock_irqsave(&t->sighand->siglock, flags);
62535 action = &t->sighand->action[sig-1];
62536@@ -1019,9 +1026,18 @@ force_sig_info(int sig, struct siginfo *
62537 }
62538 if (action->sa.sa_handler == SIG_DFL)
62539 t->signal->flags &= ~SIGNAL_UNKILLABLE;
62540+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
62541+ is_unhandled = 1;
62542 ret = specific_send_sig_info(sig, info, t);
62543 spin_unlock_irqrestore(&t->sighand->siglock, flags);
62544
62545+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
62546+ normal operation */
62547+ if (is_unhandled) {
62548+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
62549+ gr_handle_crash(t, sig);
62550+ }
62551+
62552 return ret;
62553 }
62554
62555@@ -1081,8 +1097,11 @@ int group_send_sig_info(int sig, struct
62556 {
62557 int ret = check_kill_permission(sig, info, p);
62558
62559- if (!ret && sig)
62560+ if (!ret && sig) {
62561 ret = do_send_sig_info(sig, info, p, true);
62562+ if (!ret)
62563+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
62564+ }
62565
62566 return ret;
62567 }
62568@@ -1644,6 +1663,8 @@ void ptrace_notify(int exit_code)
62569 {
62570 siginfo_t info;
62571
62572+ pax_track_stack();
62573+
62574 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
62575
62576 memset(&info, 0, sizeof info);
62577diff -urNp linux-2.6.32.42/kernel/smp.c linux-2.6.32.42/kernel/smp.c
62578--- linux-2.6.32.42/kernel/smp.c 2011-03-27 14:31:47.000000000 -0400
62579+++ linux-2.6.32.42/kernel/smp.c 2011-04-17 15:56:46.000000000 -0400
62580@@ -522,22 +522,22 @@ int smp_call_function(void (*func)(void
62581 }
62582 EXPORT_SYMBOL(smp_call_function);
62583
62584-void ipi_call_lock(void)
62585+void ipi_call_lock(void) __acquires(call_function.lock)
62586 {
62587 spin_lock(&call_function.lock);
62588 }
62589
62590-void ipi_call_unlock(void)
62591+void ipi_call_unlock(void) __releases(call_function.lock)
62592 {
62593 spin_unlock(&call_function.lock);
62594 }
62595
62596-void ipi_call_lock_irq(void)
62597+void ipi_call_lock_irq(void) __acquires(call_function.lock)
62598 {
62599 spin_lock_irq(&call_function.lock);
62600 }
62601
62602-void ipi_call_unlock_irq(void)
62603+void ipi_call_unlock_irq(void) __releases(call_function.lock)
62604 {
62605 spin_unlock_irq(&call_function.lock);
62606 }
62607diff -urNp linux-2.6.32.42/kernel/softirq.c linux-2.6.32.42/kernel/softirq.c
62608--- linux-2.6.32.42/kernel/softirq.c 2011-03-27 14:31:47.000000000 -0400
62609+++ linux-2.6.32.42/kernel/softirq.c 2011-04-17 15:56:46.000000000 -0400
62610@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
62611
62612 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
62613
62614-char *softirq_to_name[NR_SOFTIRQS] = {
62615+const char * const softirq_to_name[NR_SOFTIRQS] = {
62616 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
62617 "TASKLET", "SCHED", "HRTIMER", "RCU"
62618 };
62619@@ -206,7 +206,7 @@ EXPORT_SYMBOL(local_bh_enable_ip);
62620
62621 asmlinkage void __do_softirq(void)
62622 {
62623- struct softirq_action *h;
62624+ const struct softirq_action *h;
62625 __u32 pending;
62626 int max_restart = MAX_SOFTIRQ_RESTART;
62627 int cpu;
62628@@ -233,7 +233,7 @@ restart:
62629 kstat_incr_softirqs_this_cpu(h - softirq_vec);
62630
62631 trace_softirq_entry(h, softirq_vec);
62632- h->action(h);
62633+ h->action();
62634 trace_softirq_exit(h, softirq_vec);
62635 if (unlikely(prev_count != preempt_count())) {
62636 printk(KERN_ERR "huh, entered softirq %td %s %p"
62637@@ -363,7 +363,7 @@ void raise_softirq(unsigned int nr)
62638 local_irq_restore(flags);
62639 }
62640
62641-void open_softirq(int nr, void (*action)(struct softirq_action *))
62642+void open_softirq(int nr, void (*action)(void))
62643 {
62644 softirq_vec[nr].action = action;
62645 }
62646@@ -419,7 +419,7 @@ void __tasklet_hi_schedule_first(struct
62647
62648 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
62649
62650-static void tasklet_action(struct softirq_action *a)
62651+static void tasklet_action(void)
62652 {
62653 struct tasklet_struct *list;
62654
62655@@ -454,7 +454,7 @@ static void tasklet_action(struct softir
62656 }
62657 }
62658
62659-static void tasklet_hi_action(struct softirq_action *a)
62660+static void tasklet_hi_action(void)
62661 {
62662 struct tasklet_struct *list;
62663
62664diff -urNp linux-2.6.32.42/kernel/sys.c linux-2.6.32.42/kernel/sys.c
62665--- linux-2.6.32.42/kernel/sys.c 2011-03-27 14:31:47.000000000 -0400
62666+++ linux-2.6.32.42/kernel/sys.c 2011-04-17 15:56:46.000000000 -0400
62667@@ -133,6 +133,12 @@ static int set_one_prio(struct task_stru
62668 error = -EACCES;
62669 goto out;
62670 }
62671+
62672+ if (gr_handle_chroot_setpriority(p, niceval)) {
62673+ error = -EACCES;
62674+ goto out;
62675+ }
62676+
62677 no_nice = security_task_setnice(p, niceval);
62678 if (no_nice) {
62679 error = no_nice;
62680@@ -190,10 +196,10 @@ SYSCALL_DEFINE3(setpriority, int, which,
62681 !(user = find_user(who)))
62682 goto out_unlock; /* No processes for this user */
62683
62684- do_each_thread(g, p)
62685+ do_each_thread(g, p) {
62686 if (__task_cred(p)->uid == who)
62687 error = set_one_prio(p, niceval, error);
62688- while_each_thread(g, p);
62689+ } while_each_thread(g, p);
62690 if (who != cred->uid)
62691 free_uid(user); /* For find_user() */
62692 break;
62693@@ -253,13 +259,13 @@ SYSCALL_DEFINE2(getpriority, int, which,
62694 !(user = find_user(who)))
62695 goto out_unlock; /* No processes for this user */
62696
62697- do_each_thread(g, p)
62698+ do_each_thread(g, p) {
62699 if (__task_cred(p)->uid == who) {
62700 niceval = 20 - task_nice(p);
62701 if (niceval > retval)
62702 retval = niceval;
62703 }
62704- while_each_thread(g, p);
62705+ } while_each_thread(g, p);
62706 if (who != cred->uid)
62707 free_uid(user); /* for find_user() */
62708 break;
62709@@ -509,6 +515,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
62710 goto error;
62711 }
62712
62713+ if (gr_check_group_change(new->gid, new->egid, -1))
62714+ goto error;
62715+
62716 if (rgid != (gid_t) -1 ||
62717 (egid != (gid_t) -1 && egid != old->gid))
62718 new->sgid = new->egid;
62719@@ -542,6 +551,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
62720 goto error;
62721
62722 retval = -EPERM;
62723+
62724+ if (gr_check_group_change(gid, gid, gid))
62725+ goto error;
62726+
62727 if (capable(CAP_SETGID))
62728 new->gid = new->egid = new->sgid = new->fsgid = gid;
62729 else if (gid == old->gid || gid == old->sgid)
62730@@ -627,6 +640,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
62731 goto error;
62732 }
62733
62734+ if (gr_check_user_change(new->uid, new->euid, -1))
62735+ goto error;
62736+
62737 if (new->uid != old->uid) {
62738 retval = set_user(new);
62739 if (retval < 0)
62740@@ -675,6 +691,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
62741 goto error;
62742
62743 retval = -EPERM;
62744+
62745+ if (gr_check_crash_uid(uid))
62746+ goto error;
62747+ if (gr_check_user_change(uid, uid, uid))
62748+ goto error;
62749+
62750 if (capable(CAP_SETUID)) {
62751 new->suid = new->uid = uid;
62752 if (uid != old->uid) {
62753@@ -732,6 +754,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
62754 goto error;
62755 }
62756
62757+ if (gr_check_user_change(ruid, euid, -1))
62758+ goto error;
62759+
62760 if (ruid != (uid_t) -1) {
62761 new->uid = ruid;
62762 if (ruid != old->uid) {
62763@@ -800,6 +825,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
62764 goto error;
62765 }
62766
62767+ if (gr_check_group_change(rgid, egid, -1))
62768+ goto error;
62769+
62770 if (rgid != (gid_t) -1)
62771 new->gid = rgid;
62772 if (egid != (gid_t) -1)
62773@@ -849,6 +877,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
62774 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0)
62775 goto error;
62776
62777+ if (gr_check_user_change(-1, -1, uid))
62778+ goto error;
62779+
62780 if (uid == old->uid || uid == old->euid ||
62781 uid == old->suid || uid == old->fsuid ||
62782 capable(CAP_SETUID)) {
62783@@ -889,6 +920,9 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
62784 if (gid == old->gid || gid == old->egid ||
62785 gid == old->sgid || gid == old->fsgid ||
62786 capable(CAP_SETGID)) {
62787+ if (gr_check_group_change(-1, -1, gid))
62788+ goto error;
62789+
62790 if (gid != old_fsgid) {
62791 new->fsgid = gid;
62792 goto change_okay;
62793@@ -1454,7 +1488,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
62794 error = get_dumpable(me->mm);
62795 break;
62796 case PR_SET_DUMPABLE:
62797- if (arg2 < 0 || arg2 > 1) {
62798+ if (arg2 > 1) {
62799 error = -EINVAL;
62800 break;
62801 }
62802diff -urNp linux-2.6.32.42/kernel/sysctl.c linux-2.6.32.42/kernel/sysctl.c
62803--- linux-2.6.32.42/kernel/sysctl.c 2011-03-27 14:31:47.000000000 -0400
62804+++ linux-2.6.32.42/kernel/sysctl.c 2011-04-17 15:56:46.000000000 -0400
62805@@ -63,6 +63,13 @@
62806 static int deprecated_sysctl_warning(struct __sysctl_args *args);
62807
62808 #if defined(CONFIG_SYSCTL)
62809+#include <linux/grsecurity.h>
62810+#include <linux/grinternal.h>
62811+
62812+extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
62813+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
62814+ const int op);
62815+extern int gr_handle_chroot_sysctl(const int op);
62816
62817 /* External variables not in a header file. */
62818 extern int C_A_D;
62819@@ -168,6 +175,7 @@ static int proc_do_cad_pid(struct ctl_ta
62820 static int proc_taint(struct ctl_table *table, int write,
62821 void __user *buffer, size_t *lenp, loff_t *ppos);
62822 #endif
62823+extern ctl_table grsecurity_table[];
62824
62825 static struct ctl_table root_table[];
62826 static struct ctl_table_root sysctl_table_root;
62827@@ -200,6 +208,21 @@ extern struct ctl_table epoll_table[];
62828 int sysctl_legacy_va_layout;
62829 #endif
62830
62831+#ifdef CONFIG_PAX_SOFTMODE
62832+static ctl_table pax_table[] = {
62833+ {
62834+ .ctl_name = CTL_UNNUMBERED,
62835+ .procname = "softmode",
62836+ .data = &pax_softmode,
62837+ .maxlen = sizeof(unsigned int),
62838+ .mode = 0600,
62839+ .proc_handler = &proc_dointvec,
62840+ },
62841+
62842+ { .ctl_name = 0 }
62843+};
62844+#endif
62845+
62846 extern int prove_locking;
62847 extern int lock_stat;
62848
62849@@ -251,6 +274,24 @@ static int max_wakeup_granularity_ns = N
62850 #endif
62851
62852 static struct ctl_table kern_table[] = {
62853+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
62854+ {
62855+ .ctl_name = CTL_UNNUMBERED,
62856+ .procname = "grsecurity",
62857+ .mode = 0500,
62858+ .child = grsecurity_table,
62859+ },
62860+#endif
62861+
62862+#ifdef CONFIG_PAX_SOFTMODE
62863+ {
62864+ .ctl_name = CTL_UNNUMBERED,
62865+ .procname = "pax",
62866+ .mode = 0500,
62867+ .child = pax_table,
62868+ },
62869+#endif
62870+
62871 {
62872 .ctl_name = CTL_UNNUMBERED,
62873 .procname = "sched_child_runs_first",
62874@@ -567,8 +608,8 @@ static struct ctl_table kern_table[] = {
62875 .data = &modprobe_path,
62876 .maxlen = KMOD_PATH_LEN,
62877 .mode = 0644,
62878- .proc_handler = &proc_dostring,
62879- .strategy = &sysctl_string,
62880+ .proc_handler = &proc_dostring_modpriv,
62881+ .strategy = &sysctl_string_modpriv,
62882 },
62883 {
62884 .ctl_name = CTL_UNNUMBERED,
62885@@ -1247,6 +1288,13 @@ static struct ctl_table vm_table[] = {
62886 .mode = 0644,
62887 .proc_handler = &proc_dointvec
62888 },
62889+ {
62890+ .procname = "heap_stack_gap",
62891+ .data = &sysctl_heap_stack_gap,
62892+ .maxlen = sizeof(sysctl_heap_stack_gap),
62893+ .mode = 0644,
62894+ .proc_handler = proc_doulongvec_minmax,
62895+ },
62896 #else
62897 {
62898 .ctl_name = CTL_UNNUMBERED,
62899@@ -1803,6 +1851,8 @@ static int do_sysctl_strategy(struct ctl
62900 return 0;
62901 }
62902
62903+static int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op);
62904+
62905 static int parse_table(int __user *name, int nlen,
62906 void __user *oldval, size_t __user *oldlenp,
62907 void __user *newval, size_t newlen,
62908@@ -1821,7 +1871,7 @@ repeat:
62909 if (n == table->ctl_name) {
62910 int error;
62911 if (table->child) {
62912- if (sysctl_perm(root, table, MAY_EXEC))
62913+ if (sysctl_perm_nochk(root, table, MAY_EXEC))
62914 return -EPERM;
62915 name++;
62916 nlen--;
62917@@ -1906,6 +1956,33 @@ int sysctl_perm(struct ctl_table_root *r
62918 int error;
62919 int mode;
62920
62921+ if (table->parent != NULL && table->parent->procname != NULL &&
62922+ table->procname != NULL &&
62923+ gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
62924+ return -EACCES;
62925+ if (gr_handle_chroot_sysctl(op))
62926+ return -EACCES;
62927+ error = gr_handle_sysctl(table, op);
62928+ if (error)
62929+ return error;
62930+
62931+ error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
62932+ if (error)
62933+ return error;
62934+
62935+ if (root->permissions)
62936+ mode = root->permissions(root, current->nsproxy, table);
62937+ else
62938+ mode = table->mode;
62939+
62940+ return test_perm(mode, op);
62941+}
62942+
62943+int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op)
62944+{
62945+ int error;
62946+ int mode;
62947+
62948 error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
62949 if (error)
62950 return error;
62951@@ -2335,6 +2412,16 @@ int proc_dostring(struct ctl_table *tabl
62952 buffer, lenp, ppos);
62953 }
62954
62955+int proc_dostring_modpriv(struct ctl_table *table, int write,
62956+ void __user *buffer, size_t *lenp, loff_t *ppos)
62957+{
62958+ if (write && !capable(CAP_SYS_MODULE))
62959+ return -EPERM;
62960+
62961+ return _proc_do_string(table->data, table->maxlen, write,
62962+ buffer, lenp, ppos);
62963+}
62964+
62965
62966 static int do_proc_dointvec_conv(int *negp, unsigned long *lvalp,
62967 int *valp,
62968@@ -2609,7 +2696,7 @@ static int __do_proc_doulongvec_minmax(v
62969 vleft = table->maxlen / sizeof(unsigned long);
62970 left = *lenp;
62971
62972- for (; left && vleft--; i++, min++, max++, first=0) {
62973+ for (; left && vleft--; i++, first=0) {
62974 if (write) {
62975 while (left) {
62976 char c;
62977@@ -2910,6 +2997,12 @@ int proc_dostring(struct ctl_table *tabl
62978 return -ENOSYS;
62979 }
62980
62981+int proc_dostring_modpriv(struct ctl_table *table, int write,
62982+ void __user *buffer, size_t *lenp, loff_t *ppos)
62983+{
62984+ return -ENOSYS;
62985+}
62986+
62987 int proc_dointvec(struct ctl_table *table, int write,
62988 void __user *buffer, size_t *lenp, loff_t *ppos)
62989 {
62990@@ -3038,6 +3131,16 @@ int sysctl_string(struct ctl_table *tabl
62991 return 1;
62992 }
62993
62994+int sysctl_string_modpriv(struct ctl_table *table,
62995+ void __user *oldval, size_t __user *oldlenp,
62996+ void __user *newval, size_t newlen)
62997+{
62998+ if (newval && newlen && !capable(CAP_SYS_MODULE))
62999+ return -EPERM;
63000+
63001+ return sysctl_string(table, oldval, oldlenp, newval, newlen);
63002+}
63003+
63004 /*
63005 * This function makes sure that all of the integers in the vector
63006 * are between the minimum and maximum values given in the arrays
63007@@ -3182,6 +3285,13 @@ int sysctl_string(struct ctl_table *tabl
63008 return -ENOSYS;
63009 }
63010
63011+int sysctl_string_modpriv(struct ctl_table *table,
63012+ void __user *oldval, size_t __user *oldlenp,
63013+ void __user *newval, size_t newlen)
63014+{
63015+ return -ENOSYS;
63016+}
63017+
63018 int sysctl_intvec(struct ctl_table *table,
63019 void __user *oldval, size_t __user *oldlenp,
63020 void __user *newval, size_t newlen)
63021@@ -3246,6 +3356,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
63022 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
63023 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
63024 EXPORT_SYMBOL(proc_dostring);
63025+EXPORT_SYMBOL(proc_dostring_modpriv);
63026 EXPORT_SYMBOL(proc_doulongvec_minmax);
63027 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
63028 EXPORT_SYMBOL(register_sysctl_table);
63029@@ -3254,5 +3365,6 @@ EXPORT_SYMBOL(sysctl_intvec);
63030 EXPORT_SYMBOL(sysctl_jiffies);
63031 EXPORT_SYMBOL(sysctl_ms_jiffies);
63032 EXPORT_SYMBOL(sysctl_string);
63033+EXPORT_SYMBOL(sysctl_string_modpriv);
63034 EXPORT_SYMBOL(sysctl_data);
63035 EXPORT_SYMBOL(unregister_sysctl_table);
63036diff -urNp linux-2.6.32.42/kernel/sysctl_check.c linux-2.6.32.42/kernel/sysctl_check.c
63037--- linux-2.6.32.42/kernel/sysctl_check.c 2011-03-27 14:31:47.000000000 -0400
63038+++ linux-2.6.32.42/kernel/sysctl_check.c 2011-04-17 15:56:46.000000000 -0400
63039@@ -1489,10 +1489,12 @@ int sysctl_check_table(struct nsproxy *n
63040 } else {
63041 if ((table->strategy == sysctl_data) ||
63042 (table->strategy == sysctl_string) ||
63043+ (table->strategy == sysctl_string_modpriv) ||
63044 (table->strategy == sysctl_intvec) ||
63045 (table->strategy == sysctl_jiffies) ||
63046 (table->strategy == sysctl_ms_jiffies) ||
63047 (table->proc_handler == proc_dostring) ||
63048+ (table->proc_handler == proc_dostring_modpriv) ||
63049 (table->proc_handler == proc_dointvec) ||
63050 (table->proc_handler == proc_dointvec_minmax) ||
63051 (table->proc_handler == proc_dointvec_jiffies) ||
63052diff -urNp linux-2.6.32.42/kernel/taskstats.c linux-2.6.32.42/kernel/taskstats.c
63053--- linux-2.6.32.42/kernel/taskstats.c 2011-03-27 14:31:47.000000000 -0400
63054+++ linux-2.6.32.42/kernel/taskstats.c 2011-04-17 15:56:46.000000000 -0400
63055@@ -26,9 +26,12 @@
63056 #include <linux/cgroup.h>
63057 #include <linux/fs.h>
63058 #include <linux/file.h>
63059+#include <linux/grsecurity.h>
63060 #include <net/genetlink.h>
63061 #include <asm/atomic.h>
63062
63063+extern int gr_is_taskstats_denied(int pid);
63064+
63065 /*
63066 * Maximum length of a cpumask that can be specified in
63067 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
63068@@ -433,6 +436,9 @@ static int taskstats_user_cmd(struct sk_
63069 size_t size;
63070 cpumask_var_t mask;
63071
63072+ if (gr_is_taskstats_denied(current->pid))
63073+ return -EACCES;
63074+
63075 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
63076 return -ENOMEM;
63077
63078diff -urNp linux-2.6.32.42/kernel/time/tick-broadcast.c linux-2.6.32.42/kernel/time/tick-broadcast.c
63079--- linux-2.6.32.42/kernel/time/tick-broadcast.c 2011-05-23 16:56:59.000000000 -0400
63080+++ linux-2.6.32.42/kernel/time/tick-broadcast.c 2011-05-23 16:57:13.000000000 -0400
63081@@ -116,7 +116,7 @@ int tick_device_uses_broadcast(struct cl
63082 * then clear the broadcast bit.
63083 */
63084 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
63085- int cpu = smp_processor_id();
63086+ cpu = smp_processor_id();
63087
63088 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
63089 tick_broadcast_clear_oneshot(cpu);
63090diff -urNp linux-2.6.32.42/kernel/time/timekeeping.c linux-2.6.32.42/kernel/time/timekeeping.c
63091--- linux-2.6.32.42/kernel/time/timekeeping.c 2011-06-25 12:55:35.000000000 -0400
63092+++ linux-2.6.32.42/kernel/time/timekeeping.c 2011-06-25 12:56:37.000000000 -0400
63093@@ -14,6 +14,7 @@
63094 #include <linux/init.h>
63095 #include <linux/mm.h>
63096 #include <linux/sched.h>
63097+#include <linux/grsecurity.h>
63098 #include <linux/sysdev.h>
63099 #include <linux/clocksource.h>
63100 #include <linux/jiffies.h>
63101@@ -180,7 +181,7 @@ void update_xtime_cache(u64 nsec)
63102 */
63103 struct timespec ts = xtime;
63104 timespec_add_ns(&ts, nsec);
63105- ACCESS_ONCE(xtime_cache) = ts;
63106+ ACCESS_ONCE_RW(xtime_cache) = ts;
63107 }
63108
63109 /* must hold xtime_lock */
63110@@ -333,6 +334,8 @@ int do_settimeofday(struct timespec *tv)
63111 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
63112 return -EINVAL;
63113
63114+ gr_log_timechange();
63115+
63116 write_seqlock_irqsave(&xtime_lock, flags);
63117
63118 timekeeping_forward_now();
63119diff -urNp linux-2.6.32.42/kernel/time/timer_list.c linux-2.6.32.42/kernel/time/timer_list.c
63120--- linux-2.6.32.42/kernel/time/timer_list.c 2011-03-27 14:31:47.000000000 -0400
63121+++ linux-2.6.32.42/kernel/time/timer_list.c 2011-04-17 15:56:46.000000000 -0400
63122@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
63123
63124 static void print_name_offset(struct seq_file *m, void *sym)
63125 {
63126+#ifdef CONFIG_GRKERNSEC_HIDESYM
63127+ SEQ_printf(m, "<%p>", NULL);
63128+#else
63129 char symname[KSYM_NAME_LEN];
63130
63131 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
63132 SEQ_printf(m, "<%p>", sym);
63133 else
63134 SEQ_printf(m, "%s", symname);
63135+#endif
63136 }
63137
63138 static void
63139@@ -112,7 +116,11 @@ next_one:
63140 static void
63141 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
63142 {
63143+#ifdef CONFIG_GRKERNSEC_HIDESYM
63144+ SEQ_printf(m, " .base: %p\n", NULL);
63145+#else
63146 SEQ_printf(m, " .base: %p\n", base);
63147+#endif
63148 SEQ_printf(m, " .index: %d\n",
63149 base->index);
63150 SEQ_printf(m, " .resolution: %Lu nsecs\n",
63151@@ -289,7 +297,11 @@ static int __init init_timer_list_procfs
63152 {
63153 struct proc_dir_entry *pe;
63154
63155+#ifdef CONFIG_GRKERNSEC_PROC_ADD
63156+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
63157+#else
63158 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
63159+#endif
63160 if (!pe)
63161 return -ENOMEM;
63162 return 0;
63163diff -urNp linux-2.6.32.42/kernel/time/timer_stats.c linux-2.6.32.42/kernel/time/timer_stats.c
63164--- linux-2.6.32.42/kernel/time/timer_stats.c 2011-03-27 14:31:47.000000000 -0400
63165+++ linux-2.6.32.42/kernel/time/timer_stats.c 2011-05-04 17:56:28.000000000 -0400
63166@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
63167 static unsigned long nr_entries;
63168 static struct entry entries[MAX_ENTRIES];
63169
63170-static atomic_t overflow_count;
63171+static atomic_unchecked_t overflow_count;
63172
63173 /*
63174 * The entries are in a hash-table, for fast lookup:
63175@@ -140,7 +140,7 @@ static void reset_entries(void)
63176 nr_entries = 0;
63177 memset(entries, 0, sizeof(entries));
63178 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
63179- atomic_set(&overflow_count, 0);
63180+ atomic_set_unchecked(&overflow_count, 0);
63181 }
63182
63183 static struct entry *alloc_entry(void)
63184@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
63185 if (likely(entry))
63186 entry->count++;
63187 else
63188- atomic_inc(&overflow_count);
63189+ atomic_inc_unchecked(&overflow_count);
63190
63191 out_unlock:
63192 spin_unlock_irqrestore(lock, flags);
63193@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
63194
63195 static void print_name_offset(struct seq_file *m, unsigned long addr)
63196 {
63197+#ifdef CONFIG_GRKERNSEC_HIDESYM
63198+ seq_printf(m, "<%p>", NULL);
63199+#else
63200 char symname[KSYM_NAME_LEN];
63201
63202 if (lookup_symbol_name(addr, symname) < 0)
63203 seq_printf(m, "<%p>", (void *)addr);
63204 else
63205 seq_printf(m, "%s", symname);
63206+#endif
63207 }
63208
63209 static int tstats_show(struct seq_file *m, void *v)
63210@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
63211
63212 seq_puts(m, "Timer Stats Version: v0.2\n");
63213 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
63214- if (atomic_read(&overflow_count))
63215+ if (atomic_read_unchecked(&overflow_count))
63216 seq_printf(m, "Overflow: %d entries\n",
63217- atomic_read(&overflow_count));
63218+ atomic_read_unchecked(&overflow_count));
63219
63220 for (i = 0; i < nr_entries; i++) {
63221 entry = entries + i;
63222@@ -415,7 +419,11 @@ static int __init init_tstats_procfs(voi
63223 {
63224 struct proc_dir_entry *pe;
63225
63226+#ifdef CONFIG_GRKERNSEC_PROC_ADD
63227+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
63228+#else
63229 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
63230+#endif
63231 if (!pe)
63232 return -ENOMEM;
63233 return 0;
63234diff -urNp linux-2.6.32.42/kernel/time.c linux-2.6.32.42/kernel/time.c
63235--- linux-2.6.32.42/kernel/time.c 2011-03-27 14:31:47.000000000 -0400
63236+++ linux-2.6.32.42/kernel/time.c 2011-04-17 15:56:46.000000000 -0400
63237@@ -165,6 +165,11 @@ int do_sys_settimeofday(struct timespec
63238 return error;
63239
63240 if (tz) {
63241+ /* we log in do_settimeofday called below, so don't log twice
63242+ */
63243+ if (!tv)
63244+ gr_log_timechange();
63245+
63246 /* SMP safe, global irq locking makes it work. */
63247 sys_tz = *tz;
63248 update_vsyscall_tz();
63249@@ -240,7 +245,7 @@ EXPORT_SYMBOL(current_fs_time);
63250 * Avoid unnecessary multiplications/divisions in the
63251 * two most common HZ cases:
63252 */
63253-unsigned int inline jiffies_to_msecs(const unsigned long j)
63254+inline unsigned int jiffies_to_msecs(const unsigned long j)
63255 {
63256 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
63257 return (MSEC_PER_SEC / HZ) * j;
63258@@ -256,7 +261,7 @@ unsigned int inline jiffies_to_msecs(con
63259 }
63260 EXPORT_SYMBOL(jiffies_to_msecs);
63261
63262-unsigned int inline jiffies_to_usecs(const unsigned long j)
63263+inline unsigned int jiffies_to_usecs(const unsigned long j)
63264 {
63265 #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
63266 return (USEC_PER_SEC / HZ) * j;
63267diff -urNp linux-2.6.32.42/kernel/timer.c linux-2.6.32.42/kernel/timer.c
63268--- linux-2.6.32.42/kernel/timer.c 2011-03-27 14:31:47.000000000 -0400
63269+++ linux-2.6.32.42/kernel/timer.c 2011-04-17 15:56:46.000000000 -0400
63270@@ -1213,7 +1213,7 @@ void update_process_times(int user_tick)
63271 /*
63272 * This function runs timers and the timer-tq in bottom half context.
63273 */
63274-static void run_timer_softirq(struct softirq_action *h)
63275+static void run_timer_softirq(void)
63276 {
63277 struct tvec_base *base = __get_cpu_var(tvec_bases);
63278
63279diff -urNp linux-2.6.32.42/kernel/trace/blktrace.c linux-2.6.32.42/kernel/trace/blktrace.c
63280--- linux-2.6.32.42/kernel/trace/blktrace.c 2011-03-27 14:31:47.000000000 -0400
63281+++ linux-2.6.32.42/kernel/trace/blktrace.c 2011-05-04 17:56:28.000000000 -0400
63282@@ -313,7 +313,7 @@ static ssize_t blk_dropped_read(struct f
63283 struct blk_trace *bt = filp->private_data;
63284 char buf[16];
63285
63286- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
63287+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
63288
63289 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
63290 }
63291@@ -376,7 +376,7 @@ static int blk_subbuf_start_callback(str
63292 return 1;
63293
63294 bt = buf->chan->private_data;
63295- atomic_inc(&bt->dropped);
63296+ atomic_inc_unchecked(&bt->dropped);
63297 return 0;
63298 }
63299
63300@@ -477,7 +477,7 @@ int do_blk_trace_setup(struct request_qu
63301
63302 bt->dir = dir;
63303 bt->dev = dev;
63304- atomic_set(&bt->dropped, 0);
63305+ atomic_set_unchecked(&bt->dropped, 0);
63306
63307 ret = -EIO;
63308 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
63309diff -urNp linux-2.6.32.42/kernel/trace/ftrace.c linux-2.6.32.42/kernel/trace/ftrace.c
63310--- linux-2.6.32.42/kernel/trace/ftrace.c 2011-06-25 12:55:35.000000000 -0400
63311+++ linux-2.6.32.42/kernel/trace/ftrace.c 2011-06-25 12:56:37.000000000 -0400
63312@@ -1100,13 +1100,18 @@ ftrace_code_disable(struct module *mod,
63313
63314 ip = rec->ip;
63315
63316+ ret = ftrace_arch_code_modify_prepare();
63317+ FTRACE_WARN_ON(ret);
63318+ if (ret)
63319+ return 0;
63320+
63321 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
63322+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
63323 if (ret) {
63324 ftrace_bug(ret, ip);
63325 rec->flags |= FTRACE_FL_FAILED;
63326- return 0;
63327 }
63328- return 1;
63329+ return ret ? 0 : 1;
63330 }
63331
63332 /*
63333diff -urNp linux-2.6.32.42/kernel/trace/ring_buffer.c linux-2.6.32.42/kernel/trace/ring_buffer.c
63334--- linux-2.6.32.42/kernel/trace/ring_buffer.c 2011-03-27 14:31:47.000000000 -0400
63335+++ linux-2.6.32.42/kernel/trace/ring_buffer.c 2011-04-17 15:56:46.000000000 -0400
63336@@ -606,7 +606,7 @@ static struct list_head *rb_list_head(st
63337 * the reader page). But if the next page is a header page,
63338 * its flags will be non zero.
63339 */
63340-static int inline
63341+static inline int
63342 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
63343 struct buffer_page *page, struct list_head *list)
63344 {
63345diff -urNp linux-2.6.32.42/kernel/trace/trace.c linux-2.6.32.42/kernel/trace/trace.c
63346--- linux-2.6.32.42/kernel/trace/trace.c 2011-03-27 14:31:47.000000000 -0400
63347+++ linux-2.6.32.42/kernel/trace/trace.c 2011-05-16 21:46:57.000000000 -0400
63348@@ -3193,6 +3193,8 @@ static ssize_t tracing_splice_read_pipe(
63349 size_t rem;
63350 unsigned int i;
63351
63352+ pax_track_stack();
63353+
63354 /* copy the tracer to avoid using a global lock all around */
63355 mutex_lock(&trace_types_lock);
63356 if (unlikely(old_tracer != current_trace && current_trace)) {
63357@@ -3659,6 +3661,8 @@ tracing_buffers_splice_read(struct file
63358 int entries, size, i;
63359 size_t ret;
63360
63361+ pax_track_stack();
63362+
63363 if (*ppos & (PAGE_SIZE - 1)) {
63364 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
63365 return -EINVAL;
63366@@ -3816,10 +3820,9 @@ static const struct file_operations trac
63367 };
63368 #endif
63369
63370-static struct dentry *d_tracer;
63371-
63372 struct dentry *tracing_init_dentry(void)
63373 {
63374+ static struct dentry *d_tracer;
63375 static int once;
63376
63377 if (d_tracer)
63378@@ -3839,10 +3842,9 @@ struct dentry *tracing_init_dentry(void)
63379 return d_tracer;
63380 }
63381
63382-static struct dentry *d_percpu;
63383-
63384 struct dentry *tracing_dentry_percpu(void)
63385 {
63386+ static struct dentry *d_percpu;
63387 static int once;
63388 struct dentry *d_tracer;
63389
63390diff -urNp linux-2.6.32.42/kernel/trace/trace_events.c linux-2.6.32.42/kernel/trace/trace_events.c
63391--- linux-2.6.32.42/kernel/trace/trace_events.c 2011-03-27 14:31:47.000000000 -0400
63392+++ linux-2.6.32.42/kernel/trace/trace_events.c 2011-04-17 15:56:46.000000000 -0400
63393@@ -951,6 +951,8 @@ static LIST_HEAD(ftrace_module_file_list
63394 * Modules must own their file_operations to keep up with
63395 * reference counting.
63396 */
63397+
63398+/* cannot be const */
63399 struct ftrace_module_file_ops {
63400 struct list_head list;
63401 struct module *mod;
63402diff -urNp linux-2.6.32.42/kernel/trace/trace_mmiotrace.c linux-2.6.32.42/kernel/trace/trace_mmiotrace.c
63403--- linux-2.6.32.42/kernel/trace/trace_mmiotrace.c 2011-03-27 14:31:47.000000000 -0400
63404+++ linux-2.6.32.42/kernel/trace/trace_mmiotrace.c 2011-05-04 17:56:28.000000000 -0400
63405@@ -23,7 +23,7 @@ struct header_iter {
63406 static struct trace_array *mmio_trace_array;
63407 static bool overrun_detected;
63408 static unsigned long prev_overruns;
63409-static atomic_t dropped_count;
63410+static atomic_unchecked_t dropped_count;
63411
63412 static void mmio_reset_data(struct trace_array *tr)
63413 {
63414@@ -126,7 +126,7 @@ static void mmio_close(struct trace_iter
63415
63416 static unsigned long count_overruns(struct trace_iterator *iter)
63417 {
63418- unsigned long cnt = atomic_xchg(&dropped_count, 0);
63419+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
63420 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
63421
63422 if (over > prev_overruns)
63423@@ -316,7 +316,7 @@ static void __trace_mmiotrace_rw(struct
63424 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
63425 sizeof(*entry), 0, pc);
63426 if (!event) {
63427- atomic_inc(&dropped_count);
63428+ atomic_inc_unchecked(&dropped_count);
63429 return;
63430 }
63431 entry = ring_buffer_event_data(event);
63432@@ -346,7 +346,7 @@ static void __trace_mmiotrace_map(struct
63433 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
63434 sizeof(*entry), 0, pc);
63435 if (!event) {
63436- atomic_inc(&dropped_count);
63437+ atomic_inc_unchecked(&dropped_count);
63438 return;
63439 }
63440 entry = ring_buffer_event_data(event);
63441diff -urNp linux-2.6.32.42/kernel/trace/trace_output.c linux-2.6.32.42/kernel/trace/trace_output.c
63442--- linux-2.6.32.42/kernel/trace/trace_output.c 2011-03-27 14:31:47.000000000 -0400
63443+++ linux-2.6.32.42/kernel/trace/trace_output.c 2011-04-17 15:56:46.000000000 -0400
63444@@ -237,7 +237,7 @@ int trace_seq_path(struct trace_seq *s,
63445 return 0;
63446 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
63447 if (!IS_ERR(p)) {
63448- p = mangle_path(s->buffer + s->len, p, "\n");
63449+ p = mangle_path(s->buffer + s->len, p, "\n\\");
63450 if (p) {
63451 s->len = p - s->buffer;
63452 return 1;
63453diff -urNp linux-2.6.32.42/kernel/trace/trace_stack.c linux-2.6.32.42/kernel/trace/trace_stack.c
63454--- linux-2.6.32.42/kernel/trace/trace_stack.c 2011-03-27 14:31:47.000000000 -0400
63455+++ linux-2.6.32.42/kernel/trace/trace_stack.c 2011-04-17 15:56:46.000000000 -0400
63456@@ -50,7 +50,7 @@ static inline void check_stack(void)
63457 return;
63458
63459 /* we do not handle interrupt stacks yet */
63460- if (!object_is_on_stack(&this_size))
63461+ if (!object_starts_on_stack(&this_size))
63462 return;
63463
63464 local_irq_save(flags);
63465diff -urNp linux-2.6.32.42/kernel/trace/trace_workqueue.c linux-2.6.32.42/kernel/trace/trace_workqueue.c
63466--- linux-2.6.32.42/kernel/trace/trace_workqueue.c 2011-03-27 14:31:47.000000000 -0400
63467+++ linux-2.6.32.42/kernel/trace/trace_workqueue.c 2011-04-17 15:56:46.000000000 -0400
63468@@ -21,7 +21,7 @@ struct cpu_workqueue_stats {
63469 int cpu;
63470 pid_t pid;
63471 /* Can be inserted from interrupt or user context, need to be atomic */
63472- atomic_t inserted;
63473+ atomic_unchecked_t inserted;
63474 /*
63475 * Don't need to be atomic, works are serialized in a single workqueue thread
63476 * on a single CPU.
63477@@ -58,7 +58,7 @@ probe_workqueue_insertion(struct task_st
63478 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
63479 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
63480 if (node->pid == wq_thread->pid) {
63481- atomic_inc(&node->inserted);
63482+ atomic_inc_unchecked(&node->inserted);
63483 goto found;
63484 }
63485 }
63486@@ -205,7 +205,7 @@ static int workqueue_stat_show(struct se
63487 tsk = get_pid_task(pid, PIDTYPE_PID);
63488 if (tsk) {
63489 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
63490- atomic_read(&cws->inserted), cws->executed,
63491+ atomic_read_unchecked(&cws->inserted), cws->executed,
63492 tsk->comm);
63493 put_task_struct(tsk);
63494 }
63495diff -urNp linux-2.6.32.42/kernel/user.c linux-2.6.32.42/kernel/user.c
63496--- linux-2.6.32.42/kernel/user.c 2011-03-27 14:31:47.000000000 -0400
63497+++ linux-2.6.32.42/kernel/user.c 2011-04-17 15:56:46.000000000 -0400
63498@@ -159,6 +159,7 @@ struct user_struct *alloc_uid(struct use
63499 spin_lock_irq(&uidhash_lock);
63500 up = uid_hash_find(uid, hashent);
63501 if (up) {
63502+ put_user_ns(ns);
63503 key_put(new->uid_keyring);
63504 key_put(new->session_keyring);
63505 kmem_cache_free(uid_cachep, new);
63506diff -urNp linux-2.6.32.42/lib/bug.c linux-2.6.32.42/lib/bug.c
63507--- linux-2.6.32.42/lib/bug.c 2011-03-27 14:31:47.000000000 -0400
63508+++ linux-2.6.32.42/lib/bug.c 2011-04-17 15:56:46.000000000 -0400
63509@@ -135,6 +135,8 @@ enum bug_trap_type report_bug(unsigned l
63510 return BUG_TRAP_TYPE_NONE;
63511
63512 bug = find_bug(bugaddr);
63513+ if (!bug)
63514+ return BUG_TRAP_TYPE_NONE;
63515
63516 printk(KERN_EMERG "------------[ cut here ]------------\n");
63517
63518diff -urNp linux-2.6.32.42/lib/debugobjects.c linux-2.6.32.42/lib/debugobjects.c
63519--- linux-2.6.32.42/lib/debugobjects.c 2011-03-27 14:31:47.000000000 -0400
63520+++ linux-2.6.32.42/lib/debugobjects.c 2011-04-17 15:56:46.000000000 -0400
63521@@ -277,7 +277,7 @@ static void debug_object_is_on_stack(voi
63522 if (limit > 4)
63523 return;
63524
63525- is_on_stack = object_is_on_stack(addr);
63526+ is_on_stack = object_starts_on_stack(addr);
63527 if (is_on_stack == onstack)
63528 return;
63529
63530diff -urNp linux-2.6.32.42/lib/dma-debug.c linux-2.6.32.42/lib/dma-debug.c
63531--- linux-2.6.32.42/lib/dma-debug.c 2011-03-27 14:31:47.000000000 -0400
63532+++ linux-2.6.32.42/lib/dma-debug.c 2011-04-17 15:56:46.000000000 -0400
63533@@ -861,7 +861,7 @@ out:
63534
63535 static void check_for_stack(struct device *dev, void *addr)
63536 {
63537- if (object_is_on_stack(addr))
63538+ if (object_starts_on_stack(addr))
63539 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
63540 "stack [addr=%p]\n", addr);
63541 }
63542diff -urNp linux-2.6.32.42/lib/idr.c linux-2.6.32.42/lib/idr.c
63543--- linux-2.6.32.42/lib/idr.c 2011-03-27 14:31:47.000000000 -0400
63544+++ linux-2.6.32.42/lib/idr.c 2011-04-17 15:56:46.000000000 -0400
63545@@ -156,7 +156,7 @@ static int sub_alloc(struct idr *idp, in
63546 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
63547
63548 /* if already at the top layer, we need to grow */
63549- if (id >= 1 << (idp->layers * IDR_BITS)) {
63550+ if (id >= (1 << (idp->layers * IDR_BITS))) {
63551 *starting_id = id;
63552 return IDR_NEED_TO_GROW;
63553 }
63554diff -urNp linux-2.6.32.42/lib/inflate.c linux-2.6.32.42/lib/inflate.c
63555--- linux-2.6.32.42/lib/inflate.c 2011-03-27 14:31:47.000000000 -0400
63556+++ linux-2.6.32.42/lib/inflate.c 2011-04-17 15:56:46.000000000 -0400
63557@@ -266,7 +266,7 @@ static void free(void *where)
63558 malloc_ptr = free_mem_ptr;
63559 }
63560 #else
63561-#define malloc(a) kmalloc(a, GFP_KERNEL)
63562+#define malloc(a) kmalloc((a), GFP_KERNEL)
63563 #define free(a) kfree(a)
63564 #endif
63565
63566diff -urNp linux-2.6.32.42/lib/Kconfig.debug linux-2.6.32.42/lib/Kconfig.debug
63567--- linux-2.6.32.42/lib/Kconfig.debug 2011-03-27 14:31:47.000000000 -0400
63568+++ linux-2.6.32.42/lib/Kconfig.debug 2011-04-17 15:56:46.000000000 -0400
63569@@ -905,7 +905,7 @@ config LATENCYTOP
63570 select STACKTRACE
63571 select SCHEDSTATS
63572 select SCHED_DEBUG
63573- depends on HAVE_LATENCYTOP_SUPPORT
63574+ depends on HAVE_LATENCYTOP_SUPPORT && !GRKERNSEC_HIDESYM
63575 help
63576 Enable this option if you want to use the LatencyTOP tool
63577 to find out which userspace is blocking on what kernel operations.
63578diff -urNp linux-2.6.32.42/lib/kobject.c linux-2.6.32.42/lib/kobject.c
63579--- linux-2.6.32.42/lib/kobject.c 2011-03-27 14:31:47.000000000 -0400
63580+++ linux-2.6.32.42/lib/kobject.c 2011-04-17 15:56:46.000000000 -0400
63581@@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct ko
63582 return ret;
63583 }
63584
63585-struct sysfs_ops kobj_sysfs_ops = {
63586+const struct sysfs_ops kobj_sysfs_ops = {
63587 .show = kobj_attr_show,
63588 .store = kobj_attr_store,
63589 };
63590@@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = {
63591 * If the kset was not able to be created, NULL will be returned.
63592 */
63593 static struct kset *kset_create(const char *name,
63594- struct kset_uevent_ops *uevent_ops,
63595+ const struct kset_uevent_ops *uevent_ops,
63596 struct kobject *parent_kobj)
63597 {
63598 struct kset *kset;
63599@@ -832,7 +832,7 @@ static struct kset *kset_create(const ch
63600 * If the kset was not able to be created, NULL will be returned.
63601 */
63602 struct kset *kset_create_and_add(const char *name,
63603- struct kset_uevent_ops *uevent_ops,
63604+ const struct kset_uevent_ops *uevent_ops,
63605 struct kobject *parent_kobj)
63606 {
63607 struct kset *kset;
63608diff -urNp linux-2.6.32.42/lib/kobject_uevent.c linux-2.6.32.42/lib/kobject_uevent.c
63609--- linux-2.6.32.42/lib/kobject_uevent.c 2011-03-27 14:31:47.000000000 -0400
63610+++ linux-2.6.32.42/lib/kobject_uevent.c 2011-04-17 15:56:46.000000000 -0400
63611@@ -95,7 +95,7 @@ int kobject_uevent_env(struct kobject *k
63612 const char *subsystem;
63613 struct kobject *top_kobj;
63614 struct kset *kset;
63615- struct kset_uevent_ops *uevent_ops;
63616+ const struct kset_uevent_ops *uevent_ops;
63617 u64 seq;
63618 int i = 0;
63619 int retval = 0;
63620diff -urNp linux-2.6.32.42/lib/kref.c linux-2.6.32.42/lib/kref.c
63621--- linux-2.6.32.42/lib/kref.c 2011-03-27 14:31:47.000000000 -0400
63622+++ linux-2.6.32.42/lib/kref.c 2011-04-17 15:56:46.000000000 -0400
63623@@ -61,7 +61,7 @@ void kref_get(struct kref *kref)
63624 */
63625 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
63626 {
63627- WARN_ON(release == NULL);
63628+ BUG_ON(release == NULL);
63629 WARN_ON(release == (void (*)(struct kref *))kfree);
63630
63631 if (atomic_dec_and_test(&kref->refcount)) {
63632diff -urNp linux-2.6.32.42/lib/parser.c linux-2.6.32.42/lib/parser.c
63633--- linux-2.6.32.42/lib/parser.c 2011-03-27 14:31:47.000000000 -0400
63634+++ linux-2.6.32.42/lib/parser.c 2011-04-17 15:56:46.000000000 -0400
63635@@ -126,7 +126,7 @@ static int match_number(substring_t *s,
63636 char *buf;
63637 int ret;
63638
63639- buf = kmalloc(s->to - s->from + 1, GFP_KERNEL);
63640+ buf = kmalloc((s->to - s->from) + 1, GFP_KERNEL);
63641 if (!buf)
63642 return -ENOMEM;
63643 memcpy(buf, s->from, s->to - s->from);
63644diff -urNp linux-2.6.32.42/lib/radix-tree.c linux-2.6.32.42/lib/radix-tree.c
63645--- linux-2.6.32.42/lib/radix-tree.c 2011-03-27 14:31:47.000000000 -0400
63646+++ linux-2.6.32.42/lib/radix-tree.c 2011-04-17 15:56:46.000000000 -0400
63647@@ -81,7 +81,7 @@ struct radix_tree_preload {
63648 int nr;
63649 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
63650 };
63651-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
63652+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
63653
63654 static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
63655 {
63656diff -urNp linux-2.6.32.42/lib/random32.c linux-2.6.32.42/lib/random32.c
63657--- linux-2.6.32.42/lib/random32.c 2011-03-27 14:31:47.000000000 -0400
63658+++ linux-2.6.32.42/lib/random32.c 2011-04-17 15:56:46.000000000 -0400
63659@@ -61,7 +61,7 @@ static u32 __random32(struct rnd_state *
63660 */
63661 static inline u32 __seed(u32 x, u32 m)
63662 {
63663- return (x < m) ? x + m : x;
63664+ return (x <= m) ? x + m + 1 : x;
63665 }
63666
63667 /**
63668diff -urNp linux-2.6.32.42/lib/vsprintf.c linux-2.6.32.42/lib/vsprintf.c
63669--- linux-2.6.32.42/lib/vsprintf.c 2011-03-27 14:31:47.000000000 -0400
63670+++ linux-2.6.32.42/lib/vsprintf.c 2011-04-17 15:56:46.000000000 -0400
63671@@ -16,6 +16,9 @@
63672 * - scnprintf and vscnprintf
63673 */
63674
63675+#ifdef CONFIG_GRKERNSEC_HIDESYM
63676+#define __INCLUDED_BY_HIDESYM 1
63677+#endif
63678 #include <stdarg.h>
63679 #include <linux/module.h>
63680 #include <linux/types.h>
63681@@ -546,12 +549,12 @@ static char *number(char *buf, char *end
63682 return buf;
63683 }
63684
63685-static char *string(char *buf, char *end, char *s, struct printf_spec spec)
63686+static char *string(char *buf, char *end, const char *s, struct printf_spec spec)
63687 {
63688 int len, i;
63689
63690 if ((unsigned long)s < PAGE_SIZE)
63691- s = "<NULL>";
63692+ s = "(null)";
63693
63694 len = strnlen(s, spec.precision);
63695
63696@@ -581,7 +584,7 @@ static char *symbol_string(char *buf, ch
63697 unsigned long value = (unsigned long) ptr;
63698 #ifdef CONFIG_KALLSYMS
63699 char sym[KSYM_SYMBOL_LEN];
63700- if (ext != 'f' && ext != 's')
63701+ if (ext != 'f' && ext != 's' && ext != 'a')
63702 sprint_symbol(sym, value);
63703 else
63704 kallsyms_lookup(value, NULL, NULL, NULL, sym);
63705@@ -801,6 +804,8 @@ static char *ip4_addr_string(char *buf,
63706 * - 'f' For simple symbolic function names without offset
63707 * - 'S' For symbolic direct pointers with offset
63708 * - 's' For symbolic direct pointers without offset
63709+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
63710+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
63711 * - 'R' For a struct resource pointer, it prints the range of
63712 * addresses (not the name nor the flags)
63713 * - 'M' For a 6-byte MAC address, it prints the address in the
63714@@ -822,7 +827,7 @@ static char *pointer(const char *fmt, ch
63715 struct printf_spec spec)
63716 {
63717 if (!ptr)
63718- return string(buf, end, "(null)", spec);
63719+ return string(buf, end, "(nil)", spec);
63720
63721 switch (*fmt) {
63722 case 'F':
63723@@ -831,6 +836,14 @@ static char *pointer(const char *fmt, ch
63724 case 's':
63725 /* Fallthrough */
63726 case 'S':
63727+#ifdef CONFIG_GRKERNSEC_HIDESYM
63728+ break;
63729+#else
63730+ return symbol_string(buf, end, ptr, spec, *fmt);
63731+#endif
63732+ case 'a':
63733+ /* Fallthrough */
63734+ case 'A':
63735 return symbol_string(buf, end, ptr, spec, *fmt);
63736 case 'R':
63737 return resource_string(buf, end, ptr, spec);
63738@@ -1445,7 +1458,7 @@ do { \
63739 size_t len;
63740 if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
63741 || (unsigned long)save_str < PAGE_SIZE)
63742- save_str = "<NULL>";
63743+ save_str = "(null)";
63744 len = strlen(save_str);
63745 if (str + len + 1 < end)
63746 memcpy(str, save_str, len + 1);
63747@@ -1555,11 +1568,11 @@ int bstr_printf(char *buf, size_t size,
63748 typeof(type) value; \
63749 if (sizeof(type) == 8) { \
63750 args = PTR_ALIGN(args, sizeof(u32)); \
63751- *(u32 *)&value = *(u32 *)args; \
63752- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
63753+ *(u32 *)&value = *(const u32 *)args; \
63754+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
63755 } else { \
63756 args = PTR_ALIGN(args, sizeof(type)); \
63757- value = *(typeof(type) *)args; \
63758+ value = *(const typeof(type) *)args; \
63759 } \
63760 args += sizeof(type); \
63761 value; \
63762@@ -1622,7 +1635,7 @@ int bstr_printf(char *buf, size_t size,
63763 const char *str_arg = args;
63764 size_t len = strlen(str_arg);
63765 args += len + 1;
63766- str = string(str, end, (char *)str_arg, spec);
63767+ str = string(str, end, str_arg, spec);
63768 break;
63769 }
63770
63771diff -urNp linux-2.6.32.42/localversion-grsec linux-2.6.32.42/localversion-grsec
63772--- linux-2.6.32.42/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
63773+++ linux-2.6.32.42/localversion-grsec 2011-04-17 15:56:46.000000000 -0400
63774@@ -0,0 +1 @@
63775+-grsec
63776diff -urNp linux-2.6.32.42/Makefile linux-2.6.32.42/Makefile
63777--- linux-2.6.32.42/Makefile 2011-06-25 12:55:34.000000000 -0400
63778+++ linux-2.6.32.42/Makefile 2011-07-06 19:53:33.000000000 -0400
63779@@ -221,8 +221,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
63780
63781 HOSTCC = gcc
63782 HOSTCXX = g++
63783-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
63784-HOSTCXXFLAGS = -O2
63785+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
63786+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
63787+HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks
63788
63789 # Decide whether to build built-in, modular, or both.
63790 # Normally, just do built-in.
63791@@ -342,10 +343,12 @@ LINUXINCLUDE := -Iinclude \
63792 KBUILD_CPPFLAGS := -D__KERNEL__
63793
63794 KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
63795+ -W -Wno-unused-parameter -Wno-missing-field-initializers \
63796 -fno-strict-aliasing -fno-common \
63797 -Werror-implicit-function-declaration \
63798 -Wno-format-security \
63799 -fno-delete-null-pointer-checks
63800+KBUILD_CFLAGS += $(call cc-option, -Wno-empty-body)
63801 KBUILD_AFLAGS := -D__ASSEMBLY__
63802
63803 # Read KERNELRELEASE from include/config/kernel.release (if it exists)
63804@@ -372,12 +375,25 @@ export MODVERDIR := $(if $(KBUILD_EXTMOD
63805 RCS_FIND_IGNORE := \( -name SCCS -o -name BitKeeper -o -name .svn -o -name CVS -o -name .pc -o -name .hg -o -name .git \) -prune -o
63806 export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exclude CVS --exclude .pc --exclude .hg --exclude .git
63807
63808+ifeq ($(CONFIG_PAX_MEMORY_STACKLEAK),y)
63809+KBUILD_CFLAGS += $(call cc-ifversion, -ge, 0405, -fplugin=$(objtree)/tools/gcc/pax_plugin.so -fplugin-arg-pax_plugin-track-lowest-sp=100)
63810+endif
63811+PHONY += pax_plugin
63812+pax-plugin:
63813+ifneq (,$(findstring pax_plugin, $(KBUILD_CFLAGS)))
63814+ $(Q)$(MAKE) $(build)=tools/gcc
63815+else
63816+ifeq ($(CONFIG_PAX_MEMORY_STACKLEAK),y)
63817+ $(Q)echo "warning, your gcc does not support plugins, PAX_MEMORY_STACKLEAK will be less secure"
63818+endif
63819+endif
63820+
63821 # ===========================================================================
63822 # Rules shared between *config targets and build targets
63823
63824 # Basic helpers built in scripts/
63825 PHONY += scripts_basic
63826-scripts_basic:
63827+scripts_basic: pax-plugin
63828 $(Q)$(MAKE) $(build)=scripts/basic
63829
63830 # To avoid any implicit rule to kick in, define an empty command.
63831@@ -403,7 +419,7 @@ endif
63832 # of make so .config is not included in this case either (for *config).
63833
63834 no-dot-config-targets := clean mrproper distclean \
63835- cscope TAGS tags help %docs check% \
63836+ cscope gtags TAGS tags help %docs check% \
63837 include/linux/version.h headers_% \
63838 kernelrelease kernelversion
63839
63840@@ -644,7 +660,7 @@ export mod_strip_cmd
63841
63842
63843 ifeq ($(KBUILD_EXTMOD),)
63844-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
63845+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
63846
63847 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
63848 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
63849@@ -970,7 +986,7 @@ ifneq ($(KBUILD_SRC),)
63850 endif
63851
63852 # prepare2 creates a makefile if using a separate output directory
63853-prepare2: prepare3 outputmakefile
63854+prepare2: prepare3 outputmakefile pax-plugin
63855
63856 prepare1: prepare2 include/linux/version.h include/linux/utsrelease.h \
63857 include/asm include/config/auto.conf
63858@@ -1198,7 +1214,7 @@ MRPROPER_FILES += .config .config.old in
63859 include/linux/autoconf.h include/linux/version.h \
63860 include/linux/utsrelease.h \
63861 include/linux/bounds.h include/asm*/asm-offsets.h \
63862- Module.symvers Module.markers tags TAGS cscope*
63863+ Module.symvers Module.markers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
63864
63865 # clean - Delete most, but leave enough to build external modules
63866 #
63867@@ -1289,6 +1305,7 @@ help:
63868 @echo ' modules_prepare - Set up for building external modules'
63869 @echo ' tags/TAGS - Generate tags file for editors'
63870 @echo ' cscope - Generate cscope index'
63871+ @echo ' gtags - Generate GNU GLOBAL index'
63872 @echo ' kernelrelease - Output the release version string'
63873 @echo ' kernelversion - Output the version stored in Makefile'
63874 @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
63875@@ -1421,7 +1438,7 @@ clean: $(clean-dirs)
63876 $(call cmd,rmdirs)
63877 $(call cmd,rmfiles)
63878 @find $(KBUILD_EXTMOD) $(RCS_FIND_IGNORE) \
63879- \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
63880+ \( -name '*.[oas]' -o -name '*.[ks]o' -o -name '.*.cmd' \
63881 -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
63882 -o -name '*.gcno' \) -type f -print | xargs rm -f
63883
63884@@ -1445,7 +1462,7 @@ endif # KBUILD_EXTMOD
63885 quiet_cmd_tags = GEN $@
63886 cmd_tags = $(CONFIG_SHELL) $(srctree)/scripts/tags.sh $@
63887
63888-tags TAGS cscope: FORCE
63889+tags TAGS cscope gtags: FORCE
63890 $(call cmd,tags)
63891
63892 # Scripts to check various things for consistency
63893diff -urNp linux-2.6.32.42/mm/backing-dev.c linux-2.6.32.42/mm/backing-dev.c
63894--- linux-2.6.32.42/mm/backing-dev.c 2011-03-27 14:31:47.000000000 -0400
63895+++ linux-2.6.32.42/mm/backing-dev.c 2011-05-04 17:56:28.000000000 -0400
63896@@ -484,7 +484,7 @@ static void bdi_add_to_pending(struct rc
63897 * Add the default flusher task that gets created for any bdi
63898 * that has dirty data pending writeout
63899 */
63900-void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
63901+static void bdi_add_default_flusher_task(struct backing_dev_info *bdi)
63902 {
63903 if (!bdi_cap_writeback_dirty(bdi))
63904 return;
63905diff -urNp linux-2.6.32.42/mm/filemap.c linux-2.6.32.42/mm/filemap.c
63906--- linux-2.6.32.42/mm/filemap.c 2011-03-27 14:31:47.000000000 -0400
63907+++ linux-2.6.32.42/mm/filemap.c 2011-04-17 15:56:46.000000000 -0400
63908@@ -1631,7 +1631,7 @@ int generic_file_mmap(struct file * file
63909 struct address_space *mapping = file->f_mapping;
63910
63911 if (!mapping->a_ops->readpage)
63912- return -ENOEXEC;
63913+ return -ENODEV;
63914 file_accessed(file);
63915 vma->vm_ops = &generic_file_vm_ops;
63916 vma->vm_flags |= VM_CAN_NONLINEAR;
63917@@ -2027,6 +2027,7 @@ inline int generic_write_checks(struct f
63918 *pos = i_size_read(inode);
63919
63920 if (limit != RLIM_INFINITY) {
63921+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
63922 if (*pos >= limit) {
63923 send_sig(SIGXFSZ, current, 0);
63924 return -EFBIG;
63925diff -urNp linux-2.6.32.42/mm/fremap.c linux-2.6.32.42/mm/fremap.c
63926--- linux-2.6.32.42/mm/fremap.c 2011-03-27 14:31:47.000000000 -0400
63927+++ linux-2.6.32.42/mm/fremap.c 2011-04-17 15:56:46.000000000 -0400
63928@@ -153,6 +153,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
63929 retry:
63930 vma = find_vma(mm, start);
63931
63932+#ifdef CONFIG_PAX_SEGMEXEC
63933+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
63934+ goto out;
63935+#endif
63936+
63937 /*
63938 * Make sure the vma is shared, that it supports prefaulting,
63939 * and that the remapped range is valid and fully within
63940@@ -221,7 +226,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
63941 /*
63942 * drop PG_Mlocked flag for over-mapped range
63943 */
63944- unsigned int saved_flags = vma->vm_flags;
63945+ unsigned long saved_flags = vma->vm_flags;
63946 munlock_vma_pages_range(vma, start, start + size);
63947 vma->vm_flags = saved_flags;
63948 }
63949diff -urNp linux-2.6.32.42/mm/highmem.c linux-2.6.32.42/mm/highmem.c
63950--- linux-2.6.32.42/mm/highmem.c 2011-03-27 14:31:47.000000000 -0400
63951+++ linux-2.6.32.42/mm/highmem.c 2011-04-17 15:56:46.000000000 -0400
63952@@ -116,9 +116,10 @@ static void flush_all_zero_pkmaps(void)
63953 * So no dangers, even with speculative execution.
63954 */
63955 page = pte_page(pkmap_page_table[i]);
63956+ pax_open_kernel();
63957 pte_clear(&init_mm, (unsigned long)page_address(page),
63958 &pkmap_page_table[i]);
63959-
63960+ pax_close_kernel();
63961 set_page_address(page, NULL);
63962 need_flush = 1;
63963 }
63964@@ -177,9 +178,11 @@ start:
63965 }
63966 }
63967 vaddr = PKMAP_ADDR(last_pkmap_nr);
63968+
63969+ pax_open_kernel();
63970 set_pte_at(&init_mm, vaddr,
63971 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
63972-
63973+ pax_close_kernel();
63974 pkmap_count[last_pkmap_nr] = 1;
63975 set_page_address(page, (void *)vaddr);
63976
63977diff -urNp linux-2.6.32.42/mm/hugetlb.c linux-2.6.32.42/mm/hugetlb.c
63978--- linux-2.6.32.42/mm/hugetlb.c 2011-06-25 12:55:35.000000000 -0400
63979+++ linux-2.6.32.42/mm/hugetlb.c 2011-06-25 12:56:37.000000000 -0400
63980@@ -1925,6 +1925,26 @@ static int unmap_ref_private(struct mm_s
63981 return 1;
63982 }
63983
63984+#ifdef CONFIG_PAX_SEGMEXEC
63985+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
63986+{
63987+ struct mm_struct *mm = vma->vm_mm;
63988+ struct vm_area_struct *vma_m;
63989+ unsigned long address_m;
63990+ pte_t *ptep_m;
63991+
63992+ vma_m = pax_find_mirror_vma(vma);
63993+ if (!vma_m)
63994+ return;
63995+
63996+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
63997+ address_m = address + SEGMEXEC_TASK_SIZE;
63998+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
63999+ get_page(page_m);
64000+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
64001+}
64002+#endif
64003+
64004 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
64005 unsigned long address, pte_t *ptep, pte_t pte,
64006 struct page *pagecache_page)
64007@@ -1996,6 +2016,11 @@ retry_avoidcopy:
64008 huge_ptep_clear_flush(vma, address, ptep);
64009 set_huge_pte_at(mm, address, ptep,
64010 make_huge_pte(vma, new_page, 1));
64011+
64012+#ifdef CONFIG_PAX_SEGMEXEC
64013+ pax_mirror_huge_pte(vma, address, new_page);
64014+#endif
64015+
64016 /* Make the old page be freed below */
64017 new_page = old_page;
64018 }
64019@@ -2127,6 +2152,10 @@ retry:
64020 && (vma->vm_flags & VM_SHARED)));
64021 set_huge_pte_at(mm, address, ptep, new_pte);
64022
64023+#ifdef CONFIG_PAX_SEGMEXEC
64024+ pax_mirror_huge_pte(vma, address, page);
64025+#endif
64026+
64027 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
64028 /* Optimization, do the COW without a second fault */
64029 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
64030@@ -2155,6 +2184,28 @@ int hugetlb_fault(struct mm_struct *mm,
64031 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
64032 struct hstate *h = hstate_vma(vma);
64033
64034+#ifdef CONFIG_PAX_SEGMEXEC
64035+ struct vm_area_struct *vma_m;
64036+
64037+ vma_m = pax_find_mirror_vma(vma);
64038+ if (vma_m) {
64039+ unsigned long address_m;
64040+
64041+ if (vma->vm_start > vma_m->vm_start) {
64042+ address_m = address;
64043+ address -= SEGMEXEC_TASK_SIZE;
64044+ vma = vma_m;
64045+ h = hstate_vma(vma);
64046+ } else
64047+ address_m = address + SEGMEXEC_TASK_SIZE;
64048+
64049+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
64050+ return VM_FAULT_OOM;
64051+ address_m &= HPAGE_MASK;
64052+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
64053+ }
64054+#endif
64055+
64056 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
64057 if (!ptep)
64058 return VM_FAULT_OOM;
64059diff -urNp linux-2.6.32.42/mm/Kconfig linux-2.6.32.42/mm/Kconfig
64060--- linux-2.6.32.42/mm/Kconfig 2011-03-27 14:31:47.000000000 -0400
64061+++ linux-2.6.32.42/mm/Kconfig 2011-04-17 15:56:46.000000000 -0400
64062@@ -228,7 +228,7 @@ config KSM
64063 config DEFAULT_MMAP_MIN_ADDR
64064 int "Low address space to protect from user allocation"
64065 depends on MMU
64066- default 4096
64067+ default 65536
64068 help
64069 This is the portion of low virtual memory which should be protected
64070 from userspace allocation. Keeping a user from writing to low pages
64071diff -urNp linux-2.6.32.42/mm/kmemleak.c linux-2.6.32.42/mm/kmemleak.c
64072--- linux-2.6.32.42/mm/kmemleak.c 2011-06-25 12:55:35.000000000 -0400
64073+++ linux-2.6.32.42/mm/kmemleak.c 2011-06-25 12:56:37.000000000 -0400
64074@@ -358,7 +358,7 @@ static void print_unreferenced(struct se
64075
64076 for (i = 0; i < object->trace_len; i++) {
64077 void *ptr = (void *)object->trace[i];
64078- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
64079+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
64080 }
64081 }
64082
64083diff -urNp linux-2.6.32.42/mm/ksm.c linux-2.6.32.42/mm/ksm.c
64084--- linux-2.6.32.42/mm/ksm.c 2011-03-27 14:31:47.000000000 -0400
64085+++ linux-2.6.32.42/mm/ksm.c 2011-06-20 19:38:36.000000000 -0400
64086@@ -1215,6 +1215,12 @@ static struct rmap_item *scan_get_next_r
64087 slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list);
64088 ksm_scan.mm_slot = slot;
64089 spin_unlock(&ksm_mmlist_lock);
64090+ /*
64091+ * Although we tested list_empty() above, a racing __ksm_exit
64092+ * of the last mm on the list may have removed it since then.
64093+ */
64094+ if (slot == &ksm_mm_head)
64095+ return NULL;
64096 next_mm:
64097 ksm_scan.address = 0;
64098 ksm_scan.rmap_item = list_entry(&slot->rmap_list,
64099diff -urNp linux-2.6.32.42/mm/maccess.c linux-2.6.32.42/mm/maccess.c
64100--- linux-2.6.32.42/mm/maccess.c 2011-03-27 14:31:47.000000000 -0400
64101+++ linux-2.6.32.42/mm/maccess.c 2011-04-17 15:56:46.000000000 -0400
64102@@ -14,7 +14,7 @@
64103 * Safely read from address @src to the buffer at @dst. If a kernel fault
64104 * happens, handle that and return -EFAULT.
64105 */
64106-long probe_kernel_read(void *dst, void *src, size_t size)
64107+long probe_kernel_read(void *dst, const void *src, size_t size)
64108 {
64109 long ret;
64110 mm_segment_t old_fs = get_fs();
64111@@ -39,7 +39,7 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
64112 * Safely write to address @dst from the buffer at @src. If a kernel fault
64113 * happens, handle that and return -EFAULT.
64114 */
64115-long notrace __weak probe_kernel_write(void *dst, void *src, size_t size)
64116+long notrace __weak probe_kernel_write(void *dst, const void *src, size_t size)
64117 {
64118 long ret;
64119 mm_segment_t old_fs = get_fs();
64120diff -urNp linux-2.6.32.42/mm/madvise.c linux-2.6.32.42/mm/madvise.c
64121--- linux-2.6.32.42/mm/madvise.c 2011-03-27 14:31:47.000000000 -0400
64122+++ linux-2.6.32.42/mm/madvise.c 2011-04-17 15:56:46.000000000 -0400
64123@@ -44,6 +44,10 @@ static long madvise_behavior(struct vm_a
64124 pgoff_t pgoff;
64125 unsigned long new_flags = vma->vm_flags;
64126
64127+#ifdef CONFIG_PAX_SEGMEXEC
64128+ struct vm_area_struct *vma_m;
64129+#endif
64130+
64131 switch (behavior) {
64132 case MADV_NORMAL:
64133 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
64134@@ -103,6 +107,13 @@ success:
64135 /*
64136 * vm_flags is protected by the mmap_sem held in write mode.
64137 */
64138+
64139+#ifdef CONFIG_PAX_SEGMEXEC
64140+ vma_m = pax_find_mirror_vma(vma);
64141+ if (vma_m)
64142+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
64143+#endif
64144+
64145 vma->vm_flags = new_flags;
64146
64147 out:
64148@@ -161,6 +172,11 @@ static long madvise_dontneed(struct vm_a
64149 struct vm_area_struct ** prev,
64150 unsigned long start, unsigned long end)
64151 {
64152+
64153+#ifdef CONFIG_PAX_SEGMEXEC
64154+ struct vm_area_struct *vma_m;
64155+#endif
64156+
64157 *prev = vma;
64158 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
64159 return -EINVAL;
64160@@ -173,6 +189,21 @@ static long madvise_dontneed(struct vm_a
64161 zap_page_range(vma, start, end - start, &details);
64162 } else
64163 zap_page_range(vma, start, end - start, NULL);
64164+
64165+#ifdef CONFIG_PAX_SEGMEXEC
64166+ vma_m = pax_find_mirror_vma(vma);
64167+ if (vma_m) {
64168+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
64169+ struct zap_details details = {
64170+ .nonlinear_vma = vma_m,
64171+ .last_index = ULONG_MAX,
64172+ };
64173+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
64174+ } else
64175+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
64176+ }
64177+#endif
64178+
64179 return 0;
64180 }
64181
64182@@ -359,6 +390,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
64183 if (end < start)
64184 goto out;
64185
64186+#ifdef CONFIG_PAX_SEGMEXEC
64187+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
64188+ if (end > SEGMEXEC_TASK_SIZE)
64189+ goto out;
64190+ } else
64191+#endif
64192+
64193+ if (end > TASK_SIZE)
64194+ goto out;
64195+
64196 error = 0;
64197 if (end == start)
64198 goto out;
64199diff -urNp linux-2.6.32.42/mm/memory.c linux-2.6.32.42/mm/memory.c
64200--- linux-2.6.32.42/mm/memory.c 2011-03-27 14:31:47.000000000 -0400
64201+++ linux-2.6.32.42/mm/memory.c 2011-04-17 15:56:46.000000000 -0400
64202@@ -187,8 +187,12 @@ static inline void free_pmd_range(struct
64203 return;
64204
64205 pmd = pmd_offset(pud, start);
64206+
64207+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
64208 pud_clear(pud);
64209 pmd_free_tlb(tlb, pmd, start);
64210+#endif
64211+
64212 }
64213
64214 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
64215@@ -219,9 +223,12 @@ static inline void free_pud_range(struct
64216 if (end - 1 > ceiling - 1)
64217 return;
64218
64219+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
64220 pud = pud_offset(pgd, start);
64221 pgd_clear(pgd);
64222 pud_free_tlb(tlb, pud, start);
64223+#endif
64224+
64225 }
64226
64227 /*
64228@@ -1251,10 +1258,10 @@ int __get_user_pages(struct task_struct
64229 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
64230 i = 0;
64231
64232- do {
64233+ while (nr_pages) {
64234 struct vm_area_struct *vma;
64235
64236- vma = find_extend_vma(mm, start);
64237+ vma = find_vma(mm, start);
64238 if (!vma && in_gate_area(tsk, start)) {
64239 unsigned long pg = start & PAGE_MASK;
64240 struct vm_area_struct *gate_vma = get_gate_vma(tsk);
64241@@ -1306,7 +1313,7 @@ int __get_user_pages(struct task_struct
64242 continue;
64243 }
64244
64245- if (!vma ||
64246+ if (!vma || start < vma->vm_start ||
64247 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
64248 !(vm_flags & vma->vm_flags))
64249 return i ? : -EFAULT;
64250@@ -1381,7 +1388,7 @@ int __get_user_pages(struct task_struct
64251 start += PAGE_SIZE;
64252 nr_pages--;
64253 } while (nr_pages && start < vma->vm_end);
64254- } while (nr_pages);
64255+ }
64256 return i;
64257 }
64258
64259@@ -1526,6 +1533,10 @@ static int insert_page(struct vm_area_st
64260 page_add_file_rmap(page);
64261 set_pte_at(mm, addr, pte, mk_pte(page, prot));
64262
64263+#ifdef CONFIG_PAX_SEGMEXEC
64264+ pax_mirror_file_pte(vma, addr, page, ptl);
64265+#endif
64266+
64267 retval = 0;
64268 pte_unmap_unlock(pte, ptl);
64269 return retval;
64270@@ -1560,10 +1571,22 @@ out:
64271 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
64272 struct page *page)
64273 {
64274+
64275+#ifdef CONFIG_PAX_SEGMEXEC
64276+ struct vm_area_struct *vma_m;
64277+#endif
64278+
64279 if (addr < vma->vm_start || addr >= vma->vm_end)
64280 return -EFAULT;
64281 if (!page_count(page))
64282 return -EINVAL;
64283+
64284+#ifdef CONFIG_PAX_SEGMEXEC
64285+ vma_m = pax_find_mirror_vma(vma);
64286+ if (vma_m)
64287+ vma_m->vm_flags |= VM_INSERTPAGE;
64288+#endif
64289+
64290 vma->vm_flags |= VM_INSERTPAGE;
64291 return insert_page(vma, addr, page, vma->vm_page_prot);
64292 }
64293@@ -1649,6 +1672,7 @@ int vm_insert_mixed(struct vm_area_struc
64294 unsigned long pfn)
64295 {
64296 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
64297+ BUG_ON(vma->vm_mirror);
64298
64299 if (addr < vma->vm_start || addr >= vma->vm_end)
64300 return -EFAULT;
64301@@ -1977,6 +2001,186 @@ static inline void cow_user_page(struct
64302 copy_user_highpage(dst, src, va, vma);
64303 }
64304
64305+#ifdef CONFIG_PAX_SEGMEXEC
64306+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
64307+{
64308+ struct mm_struct *mm = vma->vm_mm;
64309+ spinlock_t *ptl;
64310+ pte_t *pte, entry;
64311+
64312+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
64313+ entry = *pte;
64314+ if (!pte_present(entry)) {
64315+ if (!pte_none(entry)) {
64316+ BUG_ON(pte_file(entry));
64317+ free_swap_and_cache(pte_to_swp_entry(entry));
64318+ pte_clear_not_present_full(mm, address, pte, 0);
64319+ }
64320+ } else {
64321+ struct page *page;
64322+
64323+ flush_cache_page(vma, address, pte_pfn(entry));
64324+ entry = ptep_clear_flush(vma, address, pte);
64325+ BUG_ON(pte_dirty(entry));
64326+ page = vm_normal_page(vma, address, entry);
64327+ if (page) {
64328+ update_hiwater_rss(mm);
64329+ if (PageAnon(page))
64330+ dec_mm_counter(mm, anon_rss);
64331+ else
64332+ dec_mm_counter(mm, file_rss);
64333+ page_remove_rmap(page);
64334+ page_cache_release(page);
64335+ }
64336+ }
64337+ pte_unmap_unlock(pte, ptl);
64338+}
64339+
64340+/* PaX: if vma is mirrored, synchronize the mirror's PTE
64341+ *
64342+ * the ptl of the lower mapped page is held on entry and is not released on exit
64343+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
64344+ */
64345+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
64346+{
64347+ struct mm_struct *mm = vma->vm_mm;
64348+ unsigned long address_m;
64349+ spinlock_t *ptl_m;
64350+ struct vm_area_struct *vma_m;
64351+ pmd_t *pmd_m;
64352+ pte_t *pte_m, entry_m;
64353+
64354+ BUG_ON(!page_m || !PageAnon(page_m));
64355+
64356+ vma_m = pax_find_mirror_vma(vma);
64357+ if (!vma_m)
64358+ return;
64359+
64360+ BUG_ON(!PageLocked(page_m));
64361+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
64362+ address_m = address + SEGMEXEC_TASK_SIZE;
64363+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
64364+ pte_m = pte_offset_map_nested(pmd_m, address_m);
64365+ ptl_m = pte_lockptr(mm, pmd_m);
64366+ if (ptl != ptl_m) {
64367+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
64368+ if (!pte_none(*pte_m))
64369+ goto out;
64370+ }
64371+
64372+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
64373+ page_cache_get(page_m);
64374+ page_add_anon_rmap(page_m, vma_m, address_m);
64375+ inc_mm_counter(mm, anon_rss);
64376+ set_pte_at(mm, address_m, pte_m, entry_m);
64377+ update_mmu_cache(vma_m, address_m, entry_m);
64378+out:
64379+ if (ptl != ptl_m)
64380+ spin_unlock(ptl_m);
64381+ pte_unmap_nested(pte_m);
64382+ unlock_page(page_m);
64383+}
64384+
64385+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
64386+{
64387+ struct mm_struct *mm = vma->vm_mm;
64388+ unsigned long address_m;
64389+ spinlock_t *ptl_m;
64390+ struct vm_area_struct *vma_m;
64391+ pmd_t *pmd_m;
64392+ pte_t *pte_m, entry_m;
64393+
64394+ BUG_ON(!page_m || PageAnon(page_m));
64395+
64396+ vma_m = pax_find_mirror_vma(vma);
64397+ if (!vma_m)
64398+ return;
64399+
64400+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
64401+ address_m = address + SEGMEXEC_TASK_SIZE;
64402+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
64403+ pte_m = pte_offset_map_nested(pmd_m, address_m);
64404+ ptl_m = pte_lockptr(mm, pmd_m);
64405+ if (ptl != ptl_m) {
64406+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
64407+ if (!pte_none(*pte_m))
64408+ goto out;
64409+ }
64410+
64411+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
64412+ page_cache_get(page_m);
64413+ page_add_file_rmap(page_m);
64414+ inc_mm_counter(mm, file_rss);
64415+ set_pte_at(mm, address_m, pte_m, entry_m);
64416+ update_mmu_cache(vma_m, address_m, entry_m);
64417+out:
64418+ if (ptl != ptl_m)
64419+ spin_unlock(ptl_m);
64420+ pte_unmap_nested(pte_m);
64421+}
64422+
64423+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
64424+{
64425+ struct mm_struct *mm = vma->vm_mm;
64426+ unsigned long address_m;
64427+ spinlock_t *ptl_m;
64428+ struct vm_area_struct *vma_m;
64429+ pmd_t *pmd_m;
64430+ pte_t *pte_m, entry_m;
64431+
64432+ vma_m = pax_find_mirror_vma(vma);
64433+ if (!vma_m)
64434+ return;
64435+
64436+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
64437+ address_m = address + SEGMEXEC_TASK_SIZE;
64438+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
64439+ pte_m = pte_offset_map_nested(pmd_m, address_m);
64440+ ptl_m = pte_lockptr(mm, pmd_m);
64441+ if (ptl != ptl_m) {
64442+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
64443+ if (!pte_none(*pte_m))
64444+ goto out;
64445+ }
64446+
64447+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
64448+ set_pte_at(mm, address_m, pte_m, entry_m);
64449+out:
64450+ if (ptl != ptl_m)
64451+ spin_unlock(ptl_m);
64452+ pte_unmap_nested(pte_m);
64453+}
64454+
64455+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
64456+{
64457+ struct page *page_m;
64458+ pte_t entry;
64459+
64460+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
64461+ goto out;
64462+
64463+ entry = *pte;
64464+ page_m = vm_normal_page(vma, address, entry);
64465+ if (!page_m)
64466+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
64467+ else if (PageAnon(page_m)) {
64468+ if (pax_find_mirror_vma(vma)) {
64469+ pte_unmap_unlock(pte, ptl);
64470+ lock_page(page_m);
64471+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
64472+ if (pte_same(entry, *pte))
64473+ pax_mirror_anon_pte(vma, address, page_m, ptl);
64474+ else
64475+ unlock_page(page_m);
64476+ }
64477+ } else
64478+ pax_mirror_file_pte(vma, address, page_m, ptl);
64479+
64480+out:
64481+ pte_unmap_unlock(pte, ptl);
64482+}
64483+#endif
64484+
64485 /*
64486 * This routine handles present pages, when users try to write
64487 * to a shared page. It is done by copying the page to a new address
64488@@ -2156,6 +2360,12 @@ gotten:
64489 */
64490 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
64491 if (likely(pte_same(*page_table, orig_pte))) {
64492+
64493+#ifdef CONFIG_PAX_SEGMEXEC
64494+ if (pax_find_mirror_vma(vma))
64495+ BUG_ON(!trylock_page(new_page));
64496+#endif
64497+
64498 if (old_page) {
64499 if (!PageAnon(old_page)) {
64500 dec_mm_counter(mm, file_rss);
64501@@ -2207,6 +2417,10 @@ gotten:
64502 page_remove_rmap(old_page);
64503 }
64504
64505+#ifdef CONFIG_PAX_SEGMEXEC
64506+ pax_mirror_anon_pte(vma, address, new_page, ptl);
64507+#endif
64508+
64509 /* Free the old page.. */
64510 new_page = old_page;
64511 ret |= VM_FAULT_WRITE;
64512@@ -2604,6 +2818,11 @@ static int do_swap_page(struct mm_struct
64513 swap_free(entry);
64514 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
64515 try_to_free_swap(page);
64516+
64517+#ifdef CONFIG_PAX_SEGMEXEC
64518+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
64519+#endif
64520+
64521 unlock_page(page);
64522
64523 if (flags & FAULT_FLAG_WRITE) {
64524@@ -2615,6 +2834,11 @@ static int do_swap_page(struct mm_struct
64525
64526 /* No need to invalidate - it was non-present before */
64527 update_mmu_cache(vma, address, pte);
64528+
64529+#ifdef CONFIG_PAX_SEGMEXEC
64530+ pax_mirror_anon_pte(vma, address, page, ptl);
64531+#endif
64532+
64533 unlock:
64534 pte_unmap_unlock(page_table, ptl);
64535 out:
64536@@ -2630,40 +2854,6 @@ out_release:
64537 }
64538
64539 /*
64540- * This is like a special single-page "expand_{down|up}wards()",
64541- * except we must first make sure that 'address{-|+}PAGE_SIZE'
64542- * doesn't hit another vma.
64543- */
64544-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
64545-{
64546- address &= PAGE_MASK;
64547- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
64548- struct vm_area_struct *prev = vma->vm_prev;
64549-
64550- /*
64551- * Is there a mapping abutting this one below?
64552- *
64553- * That's only ok if it's the same stack mapping
64554- * that has gotten split..
64555- */
64556- if (prev && prev->vm_end == address)
64557- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
64558-
64559- expand_stack(vma, address - PAGE_SIZE);
64560- }
64561- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
64562- struct vm_area_struct *next = vma->vm_next;
64563-
64564- /* As VM_GROWSDOWN but s/below/above/ */
64565- if (next && next->vm_start == address + PAGE_SIZE)
64566- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
64567-
64568- expand_upwards(vma, address + PAGE_SIZE);
64569- }
64570- return 0;
64571-}
64572-
64573-/*
64574 * We enter with non-exclusive mmap_sem (to exclude vma changes,
64575 * but allow concurrent faults), and pte mapped but not yet locked.
64576 * We return with mmap_sem still held, but pte unmapped and unlocked.
64577@@ -2672,27 +2862,23 @@ static int do_anonymous_page(struct mm_s
64578 unsigned long address, pte_t *page_table, pmd_t *pmd,
64579 unsigned int flags)
64580 {
64581- struct page *page;
64582+ struct page *page = NULL;
64583 spinlock_t *ptl;
64584 pte_t entry;
64585
64586- pte_unmap(page_table);
64587-
64588- /* Check if we need to add a guard page to the stack */
64589- if (check_stack_guard_page(vma, address) < 0)
64590- return VM_FAULT_SIGBUS;
64591-
64592- /* Use the zero-page for reads */
64593 if (!(flags & FAULT_FLAG_WRITE)) {
64594 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
64595 vma->vm_page_prot));
64596- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
64597+ ptl = pte_lockptr(mm, pmd);
64598+ spin_lock(ptl);
64599 if (!pte_none(*page_table))
64600 goto unlock;
64601 goto setpte;
64602 }
64603
64604 /* Allocate our own private page. */
64605+ pte_unmap(page_table);
64606+
64607 if (unlikely(anon_vma_prepare(vma)))
64608 goto oom;
64609 page = alloc_zeroed_user_highpage_movable(vma, address);
64610@@ -2711,6 +2897,11 @@ static int do_anonymous_page(struct mm_s
64611 if (!pte_none(*page_table))
64612 goto release;
64613
64614+#ifdef CONFIG_PAX_SEGMEXEC
64615+ if (pax_find_mirror_vma(vma))
64616+ BUG_ON(!trylock_page(page));
64617+#endif
64618+
64619 inc_mm_counter(mm, anon_rss);
64620 page_add_new_anon_rmap(page, vma, address);
64621 setpte:
64622@@ -2718,6 +2909,12 @@ setpte:
64623
64624 /* No need to invalidate - it was non-present before */
64625 update_mmu_cache(vma, address, entry);
64626+
64627+#ifdef CONFIG_PAX_SEGMEXEC
64628+ if (page)
64629+ pax_mirror_anon_pte(vma, address, page, ptl);
64630+#endif
64631+
64632 unlock:
64633 pte_unmap_unlock(page_table, ptl);
64634 return 0;
64635@@ -2860,6 +3057,12 @@ static int __do_fault(struct mm_struct *
64636 */
64637 /* Only go through if we didn't race with anybody else... */
64638 if (likely(pte_same(*page_table, orig_pte))) {
64639+
64640+#ifdef CONFIG_PAX_SEGMEXEC
64641+ if (anon && pax_find_mirror_vma(vma))
64642+ BUG_ON(!trylock_page(page));
64643+#endif
64644+
64645 flush_icache_page(vma, page);
64646 entry = mk_pte(page, vma->vm_page_prot);
64647 if (flags & FAULT_FLAG_WRITE)
64648@@ -2879,6 +3082,14 @@ static int __do_fault(struct mm_struct *
64649
64650 /* no need to invalidate: a not-present page won't be cached */
64651 update_mmu_cache(vma, address, entry);
64652+
64653+#ifdef CONFIG_PAX_SEGMEXEC
64654+ if (anon)
64655+ pax_mirror_anon_pte(vma, address, page, ptl);
64656+ else
64657+ pax_mirror_file_pte(vma, address, page, ptl);
64658+#endif
64659+
64660 } else {
64661 if (charged)
64662 mem_cgroup_uncharge_page(page);
64663@@ -3026,6 +3237,12 @@ static inline int handle_pte_fault(struc
64664 if (flags & FAULT_FLAG_WRITE)
64665 flush_tlb_page(vma, address);
64666 }
64667+
64668+#ifdef CONFIG_PAX_SEGMEXEC
64669+ pax_mirror_pte(vma, address, pte, pmd, ptl);
64670+ return 0;
64671+#endif
64672+
64673 unlock:
64674 pte_unmap_unlock(pte, ptl);
64675 return 0;
64676@@ -3042,6 +3259,10 @@ int handle_mm_fault(struct mm_struct *mm
64677 pmd_t *pmd;
64678 pte_t *pte;
64679
64680+#ifdef CONFIG_PAX_SEGMEXEC
64681+ struct vm_area_struct *vma_m;
64682+#endif
64683+
64684 __set_current_state(TASK_RUNNING);
64685
64686 count_vm_event(PGFAULT);
64687@@ -3049,6 +3270,34 @@ int handle_mm_fault(struct mm_struct *mm
64688 if (unlikely(is_vm_hugetlb_page(vma)))
64689 return hugetlb_fault(mm, vma, address, flags);
64690
64691+#ifdef CONFIG_PAX_SEGMEXEC
64692+ vma_m = pax_find_mirror_vma(vma);
64693+ if (vma_m) {
64694+ unsigned long address_m;
64695+ pgd_t *pgd_m;
64696+ pud_t *pud_m;
64697+ pmd_t *pmd_m;
64698+
64699+ if (vma->vm_start > vma_m->vm_start) {
64700+ address_m = address;
64701+ address -= SEGMEXEC_TASK_SIZE;
64702+ vma = vma_m;
64703+ } else
64704+ address_m = address + SEGMEXEC_TASK_SIZE;
64705+
64706+ pgd_m = pgd_offset(mm, address_m);
64707+ pud_m = pud_alloc(mm, pgd_m, address_m);
64708+ if (!pud_m)
64709+ return VM_FAULT_OOM;
64710+ pmd_m = pmd_alloc(mm, pud_m, address_m);
64711+ if (!pmd_m)
64712+ return VM_FAULT_OOM;
64713+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, pmd_m, address_m))
64714+ return VM_FAULT_OOM;
64715+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
64716+ }
64717+#endif
64718+
64719 pgd = pgd_offset(mm, address);
64720 pud = pud_alloc(mm, pgd, address);
64721 if (!pud)
64722@@ -3146,7 +3395,7 @@ static int __init gate_vma_init(void)
64723 gate_vma.vm_start = FIXADDR_USER_START;
64724 gate_vma.vm_end = FIXADDR_USER_END;
64725 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
64726- gate_vma.vm_page_prot = __P101;
64727+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
64728 /*
64729 * Make sure the vDSO gets into every core dump.
64730 * Dumping its contents makes post-mortem fully interpretable later
64731diff -urNp linux-2.6.32.42/mm/memory-failure.c linux-2.6.32.42/mm/memory-failure.c
64732--- linux-2.6.32.42/mm/memory-failure.c 2011-03-27 14:31:47.000000000 -0400
64733+++ linux-2.6.32.42/mm/memory-failure.c 2011-04-17 15:56:46.000000000 -0400
64734@@ -46,7 +46,7 @@ int sysctl_memory_failure_early_kill __r
64735
64736 int sysctl_memory_failure_recovery __read_mostly = 1;
64737
64738-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
64739+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
64740
64741 /*
64742 * Send all the processes who have the page mapped an ``action optional''
64743@@ -745,7 +745,7 @@ int __memory_failure(unsigned long pfn,
64744 return 0;
64745 }
64746
64747- atomic_long_add(1, &mce_bad_pages);
64748+ atomic_long_add_unchecked(1, &mce_bad_pages);
64749
64750 /*
64751 * We need/can do nothing about count=0 pages.
64752diff -urNp linux-2.6.32.42/mm/mempolicy.c linux-2.6.32.42/mm/mempolicy.c
64753--- linux-2.6.32.42/mm/mempolicy.c 2011-03-27 14:31:47.000000000 -0400
64754+++ linux-2.6.32.42/mm/mempolicy.c 2011-04-17 15:56:46.000000000 -0400
64755@@ -573,6 +573,10 @@ static int mbind_range(struct vm_area_st
64756 struct vm_area_struct *next;
64757 int err;
64758
64759+#ifdef CONFIG_PAX_SEGMEXEC
64760+ struct vm_area_struct *vma_m;
64761+#endif
64762+
64763 err = 0;
64764 for (; vma && vma->vm_start < end; vma = next) {
64765 next = vma->vm_next;
64766@@ -584,6 +588,16 @@ static int mbind_range(struct vm_area_st
64767 err = policy_vma(vma, new);
64768 if (err)
64769 break;
64770+
64771+#ifdef CONFIG_PAX_SEGMEXEC
64772+ vma_m = pax_find_mirror_vma(vma);
64773+ if (vma_m) {
64774+ err = policy_vma(vma_m, new);
64775+ if (err)
64776+ break;
64777+ }
64778+#endif
64779+
64780 }
64781 return err;
64782 }
64783@@ -1002,6 +1016,17 @@ static long do_mbind(unsigned long start
64784
64785 if (end < start)
64786 return -EINVAL;
64787+
64788+#ifdef CONFIG_PAX_SEGMEXEC
64789+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
64790+ if (end > SEGMEXEC_TASK_SIZE)
64791+ return -EINVAL;
64792+ } else
64793+#endif
64794+
64795+ if (end > TASK_SIZE)
64796+ return -EINVAL;
64797+
64798 if (end == start)
64799 return 0;
64800
64801@@ -1207,6 +1232,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
64802 if (!mm)
64803 return -EINVAL;
64804
64805+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64806+ if (mm != current->mm &&
64807+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
64808+ err = -EPERM;
64809+ goto out;
64810+ }
64811+#endif
64812+
64813 /*
64814 * Check if this process has the right to modify the specified
64815 * process. The right exists if the process has administrative
64816@@ -1216,8 +1249,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
64817 rcu_read_lock();
64818 tcred = __task_cred(task);
64819 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
64820- cred->uid != tcred->suid && cred->uid != tcred->uid &&
64821- !capable(CAP_SYS_NICE)) {
64822+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
64823 rcu_read_unlock();
64824 err = -EPERM;
64825 goto out;
64826@@ -2396,7 +2428,7 @@ int show_numa_map(struct seq_file *m, vo
64827
64828 if (file) {
64829 seq_printf(m, " file=");
64830- seq_path(m, &file->f_path, "\n\t= ");
64831+ seq_path(m, &file->f_path, "\n\t\\= ");
64832 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
64833 seq_printf(m, " heap");
64834 } else if (vma->vm_start <= mm->start_stack &&
64835diff -urNp linux-2.6.32.42/mm/migrate.c linux-2.6.32.42/mm/migrate.c
64836--- linux-2.6.32.42/mm/migrate.c 2011-03-27 14:31:47.000000000 -0400
64837+++ linux-2.6.32.42/mm/migrate.c 2011-05-16 21:46:57.000000000 -0400
64838@@ -916,6 +916,8 @@ static int do_pages_move(struct mm_struc
64839 unsigned long chunk_start;
64840 int err;
64841
64842+ pax_track_stack();
64843+
64844 task_nodes = cpuset_mems_allowed(task);
64845
64846 err = -ENOMEM;
64847@@ -1106,6 +1108,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
64848 if (!mm)
64849 return -EINVAL;
64850
64851+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64852+ if (mm != current->mm &&
64853+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
64854+ err = -EPERM;
64855+ goto out;
64856+ }
64857+#endif
64858+
64859 /*
64860 * Check if this process has the right to modify the specified
64861 * process. The right exists if the process has administrative
64862@@ -1115,8 +1125,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
64863 rcu_read_lock();
64864 tcred = __task_cred(task);
64865 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
64866- cred->uid != tcred->suid && cred->uid != tcred->uid &&
64867- !capable(CAP_SYS_NICE)) {
64868+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
64869 rcu_read_unlock();
64870 err = -EPERM;
64871 goto out;
64872diff -urNp linux-2.6.32.42/mm/mlock.c linux-2.6.32.42/mm/mlock.c
64873--- linux-2.6.32.42/mm/mlock.c 2011-03-27 14:31:47.000000000 -0400
64874+++ linux-2.6.32.42/mm/mlock.c 2011-04-17 15:56:46.000000000 -0400
64875@@ -13,6 +13,7 @@
64876 #include <linux/pagemap.h>
64877 #include <linux/mempolicy.h>
64878 #include <linux/syscalls.h>
64879+#include <linux/security.h>
64880 #include <linux/sched.h>
64881 #include <linux/module.h>
64882 #include <linux/rmap.h>
64883@@ -138,13 +139,6 @@ void munlock_vma_page(struct page *page)
64884 }
64885 }
64886
64887-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
64888-{
64889- return (vma->vm_flags & VM_GROWSDOWN) &&
64890- (vma->vm_start == addr) &&
64891- !vma_stack_continue(vma->vm_prev, addr);
64892-}
64893-
64894 /**
64895 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
64896 * @vma: target vma
64897@@ -177,12 +171,6 @@ static long __mlock_vma_pages_range(stru
64898 if (vma->vm_flags & VM_WRITE)
64899 gup_flags |= FOLL_WRITE;
64900
64901- /* We don't try to access the guard page of a stack vma */
64902- if (stack_guard_page(vma, start)) {
64903- addr += PAGE_SIZE;
64904- nr_pages--;
64905- }
64906-
64907 while (nr_pages > 0) {
64908 int i;
64909
64910@@ -440,7 +428,7 @@ static int do_mlock(unsigned long start,
64911 {
64912 unsigned long nstart, end, tmp;
64913 struct vm_area_struct * vma, * prev;
64914- int error;
64915+ int error = -EINVAL;
64916
64917 len = PAGE_ALIGN(len);
64918 end = start + len;
64919@@ -448,6 +436,9 @@ static int do_mlock(unsigned long start,
64920 return -EINVAL;
64921 if (end == start)
64922 return 0;
64923+ if (end > TASK_SIZE)
64924+ return -EINVAL;
64925+
64926 vma = find_vma_prev(current->mm, start, &prev);
64927 if (!vma || vma->vm_start > start)
64928 return -ENOMEM;
64929@@ -458,6 +449,11 @@ static int do_mlock(unsigned long start,
64930 for (nstart = start ; ; ) {
64931 unsigned int newflags;
64932
64933+#ifdef CONFIG_PAX_SEGMEXEC
64934+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
64935+ break;
64936+#endif
64937+
64938 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
64939
64940 newflags = vma->vm_flags | VM_LOCKED;
64941@@ -507,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
64942 lock_limit >>= PAGE_SHIFT;
64943
64944 /* check against resource limits */
64945+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
64946 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
64947 error = do_mlock(start, len, 1);
64948 up_write(&current->mm->mmap_sem);
64949@@ -528,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
64950 static int do_mlockall(int flags)
64951 {
64952 struct vm_area_struct * vma, * prev = NULL;
64953- unsigned int def_flags = 0;
64954
64955 if (flags & MCL_FUTURE)
64956- def_flags = VM_LOCKED;
64957- current->mm->def_flags = def_flags;
64958+ current->mm->def_flags |= VM_LOCKED;
64959+ else
64960+ current->mm->def_flags &= ~VM_LOCKED;
64961 if (flags == MCL_FUTURE)
64962 goto out;
64963
64964 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
64965- unsigned int newflags;
64966+ unsigned long newflags;
64967+
64968+#ifdef CONFIG_PAX_SEGMEXEC
64969+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
64970+ break;
64971+#endif
64972
64973+ BUG_ON(vma->vm_end > TASK_SIZE);
64974 newflags = vma->vm_flags | VM_LOCKED;
64975 if (!(flags & MCL_CURRENT))
64976 newflags &= ~VM_LOCKED;
64977@@ -570,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
64978 lock_limit >>= PAGE_SHIFT;
64979
64980 ret = -ENOMEM;
64981+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
64982 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
64983 capable(CAP_IPC_LOCK))
64984 ret = do_mlockall(flags);
64985diff -urNp linux-2.6.32.42/mm/mmap.c linux-2.6.32.42/mm/mmap.c
64986--- linux-2.6.32.42/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
64987+++ linux-2.6.32.42/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
64988@@ -45,6 +45,16 @@
64989 #define arch_rebalance_pgtables(addr, len) (addr)
64990 #endif
64991
64992+static inline void verify_mm_writelocked(struct mm_struct *mm)
64993+{
64994+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
64995+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
64996+ up_read(&mm->mmap_sem);
64997+ BUG();
64998+ }
64999+#endif
65000+}
65001+
65002 static void unmap_region(struct mm_struct *mm,
65003 struct vm_area_struct *vma, struct vm_area_struct *prev,
65004 unsigned long start, unsigned long end);
65005@@ -70,22 +80,32 @@ static void unmap_region(struct mm_struc
65006 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
65007 *
65008 */
65009-pgprot_t protection_map[16] = {
65010+pgprot_t protection_map[16] __read_only = {
65011 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
65012 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
65013 };
65014
65015 pgprot_t vm_get_page_prot(unsigned long vm_flags)
65016 {
65017- return __pgprot(pgprot_val(protection_map[vm_flags &
65018+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
65019 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
65020 pgprot_val(arch_vm_get_page_prot(vm_flags)));
65021+
65022+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
65023+ if (!nx_enabled &&
65024+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
65025+ (vm_flags & (VM_READ | VM_WRITE)))
65026+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
65027+#endif
65028+
65029+ return prot;
65030 }
65031 EXPORT_SYMBOL(vm_get_page_prot);
65032
65033 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
65034 int sysctl_overcommit_ratio = 50; /* default is 50% */
65035 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
65036+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
65037 struct percpu_counter vm_committed_as;
65038
65039 /*
65040@@ -231,6 +251,7 @@ static struct vm_area_struct *remove_vma
65041 struct vm_area_struct *next = vma->vm_next;
65042
65043 might_sleep();
65044+ BUG_ON(vma->vm_mirror);
65045 if (vma->vm_ops && vma->vm_ops->close)
65046 vma->vm_ops->close(vma);
65047 if (vma->vm_file) {
65048@@ -267,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
65049 * not page aligned -Ram Gupta
65050 */
65051 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
65052+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
65053 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
65054 (mm->end_data - mm->start_data) > rlim)
65055 goto out;
65056@@ -704,6 +726,12 @@ static int
65057 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
65058 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
65059 {
65060+
65061+#ifdef CONFIG_PAX_SEGMEXEC
65062+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
65063+ return 0;
65064+#endif
65065+
65066 if (is_mergeable_vma(vma, file, vm_flags) &&
65067 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
65068 if (vma->vm_pgoff == vm_pgoff)
65069@@ -723,6 +751,12 @@ static int
65070 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
65071 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
65072 {
65073+
65074+#ifdef CONFIG_PAX_SEGMEXEC
65075+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
65076+ return 0;
65077+#endif
65078+
65079 if (is_mergeable_vma(vma, file, vm_flags) &&
65080 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
65081 pgoff_t vm_pglen;
65082@@ -765,12 +799,19 @@ can_vma_merge_after(struct vm_area_struc
65083 struct vm_area_struct *vma_merge(struct mm_struct *mm,
65084 struct vm_area_struct *prev, unsigned long addr,
65085 unsigned long end, unsigned long vm_flags,
65086- struct anon_vma *anon_vma, struct file *file,
65087+ struct anon_vma *anon_vma, struct file *file,
65088 pgoff_t pgoff, struct mempolicy *policy)
65089 {
65090 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
65091 struct vm_area_struct *area, *next;
65092
65093+#ifdef CONFIG_PAX_SEGMEXEC
65094+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
65095+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
65096+
65097+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
65098+#endif
65099+
65100 /*
65101 * We later require that vma->vm_flags == vm_flags,
65102 * so this tests vma->vm_flags & VM_SPECIAL, too.
65103@@ -786,6 +827,15 @@ struct vm_area_struct *vma_merge(struct
65104 if (next && next->vm_end == end) /* cases 6, 7, 8 */
65105 next = next->vm_next;
65106
65107+#ifdef CONFIG_PAX_SEGMEXEC
65108+ if (prev)
65109+ prev_m = pax_find_mirror_vma(prev);
65110+ if (area)
65111+ area_m = pax_find_mirror_vma(area);
65112+ if (next)
65113+ next_m = pax_find_mirror_vma(next);
65114+#endif
65115+
65116 /*
65117 * Can it merge with the predecessor?
65118 */
65119@@ -805,9 +855,24 @@ struct vm_area_struct *vma_merge(struct
65120 /* cases 1, 6 */
65121 vma_adjust(prev, prev->vm_start,
65122 next->vm_end, prev->vm_pgoff, NULL);
65123- } else /* cases 2, 5, 7 */
65124+
65125+#ifdef CONFIG_PAX_SEGMEXEC
65126+ if (prev_m)
65127+ vma_adjust(prev_m, prev_m->vm_start,
65128+ next_m->vm_end, prev_m->vm_pgoff, NULL);
65129+#endif
65130+
65131+ } else { /* cases 2, 5, 7 */
65132 vma_adjust(prev, prev->vm_start,
65133 end, prev->vm_pgoff, NULL);
65134+
65135+#ifdef CONFIG_PAX_SEGMEXEC
65136+ if (prev_m)
65137+ vma_adjust(prev_m, prev_m->vm_start,
65138+ end_m, prev_m->vm_pgoff, NULL);
65139+#endif
65140+
65141+ }
65142 return prev;
65143 }
65144
65145@@ -818,12 +883,27 @@ struct vm_area_struct *vma_merge(struct
65146 mpol_equal(policy, vma_policy(next)) &&
65147 can_vma_merge_before(next, vm_flags,
65148 anon_vma, file, pgoff+pglen)) {
65149- if (prev && addr < prev->vm_end) /* case 4 */
65150+ if (prev && addr < prev->vm_end) { /* case 4 */
65151 vma_adjust(prev, prev->vm_start,
65152 addr, prev->vm_pgoff, NULL);
65153- else /* cases 3, 8 */
65154+
65155+#ifdef CONFIG_PAX_SEGMEXEC
65156+ if (prev_m)
65157+ vma_adjust(prev_m, prev_m->vm_start,
65158+ addr_m, prev_m->vm_pgoff, NULL);
65159+#endif
65160+
65161+ } else { /* cases 3, 8 */
65162 vma_adjust(area, addr, next->vm_end,
65163 next->vm_pgoff - pglen, NULL);
65164+
65165+#ifdef CONFIG_PAX_SEGMEXEC
65166+ if (area_m)
65167+ vma_adjust(area_m, addr_m, next_m->vm_end,
65168+ next_m->vm_pgoff - pglen, NULL);
65169+#endif
65170+
65171+ }
65172 return area;
65173 }
65174
65175@@ -898,14 +978,11 @@ none:
65176 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
65177 struct file *file, long pages)
65178 {
65179- const unsigned long stack_flags
65180- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
65181-
65182 if (file) {
65183 mm->shared_vm += pages;
65184 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
65185 mm->exec_vm += pages;
65186- } else if (flags & stack_flags)
65187+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
65188 mm->stack_vm += pages;
65189 if (flags & (VM_RESERVED|VM_IO))
65190 mm->reserved_vm += pages;
65191@@ -932,7 +1009,7 @@ unsigned long do_mmap_pgoff(struct file
65192 * (the exception is when the underlying filesystem is noexec
65193 * mounted, in which case we dont add PROT_EXEC.)
65194 */
65195- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
65196+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
65197 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
65198 prot |= PROT_EXEC;
65199
65200@@ -958,7 +1035,7 @@ unsigned long do_mmap_pgoff(struct file
65201 /* Obtain the address to map to. we verify (or select) it and ensure
65202 * that it represents a valid section of the address space.
65203 */
65204- addr = get_unmapped_area(file, addr, len, pgoff, flags);
65205+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
65206 if (addr & ~PAGE_MASK)
65207 return addr;
65208
65209@@ -969,6 +1046,36 @@ unsigned long do_mmap_pgoff(struct file
65210 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
65211 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
65212
65213+#ifdef CONFIG_PAX_MPROTECT
65214+ if (mm->pax_flags & MF_PAX_MPROTECT) {
65215+#ifndef CONFIG_PAX_MPROTECT_COMPAT
65216+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
65217+ gr_log_rwxmmap(file);
65218+
65219+#ifdef CONFIG_PAX_EMUPLT
65220+ vm_flags &= ~VM_EXEC;
65221+#else
65222+ return -EPERM;
65223+#endif
65224+
65225+ }
65226+
65227+ if (!(vm_flags & VM_EXEC))
65228+ vm_flags &= ~VM_MAYEXEC;
65229+#else
65230+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
65231+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
65232+#endif
65233+ else
65234+ vm_flags &= ~VM_MAYWRITE;
65235+ }
65236+#endif
65237+
65238+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
65239+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
65240+ vm_flags &= ~VM_PAGEEXEC;
65241+#endif
65242+
65243 if (flags & MAP_LOCKED)
65244 if (!can_do_mlock())
65245 return -EPERM;
65246@@ -980,6 +1087,7 @@ unsigned long do_mmap_pgoff(struct file
65247 locked += mm->locked_vm;
65248 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
65249 lock_limit >>= PAGE_SHIFT;
65250+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
65251 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
65252 return -EAGAIN;
65253 }
65254@@ -1053,6 +1161,9 @@ unsigned long do_mmap_pgoff(struct file
65255 if (error)
65256 return error;
65257
65258+ if (!gr_acl_handle_mmap(file, prot))
65259+ return -EACCES;
65260+
65261 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
65262 }
65263 EXPORT_SYMBOL(do_mmap_pgoff);
65264@@ -1065,10 +1176,10 @@ EXPORT_SYMBOL(do_mmap_pgoff);
65265 */
65266 int vma_wants_writenotify(struct vm_area_struct *vma)
65267 {
65268- unsigned int vm_flags = vma->vm_flags;
65269+ unsigned long vm_flags = vma->vm_flags;
65270
65271 /* If it was private or non-writable, the write bit is already clear */
65272- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
65273+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
65274 return 0;
65275
65276 /* The backer wishes to know when pages are first written to? */
65277@@ -1117,14 +1228,24 @@ unsigned long mmap_region(struct file *f
65278 unsigned long charged = 0;
65279 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
65280
65281+#ifdef CONFIG_PAX_SEGMEXEC
65282+ struct vm_area_struct *vma_m = NULL;
65283+#endif
65284+
65285+ /*
65286+ * mm->mmap_sem is required to protect against another thread
65287+ * changing the mappings in case we sleep.
65288+ */
65289+ verify_mm_writelocked(mm);
65290+
65291 /* Clear old maps */
65292 error = -ENOMEM;
65293-munmap_back:
65294 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
65295 if (vma && vma->vm_start < addr + len) {
65296 if (do_munmap(mm, addr, len))
65297 return -ENOMEM;
65298- goto munmap_back;
65299+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
65300+ BUG_ON(vma && vma->vm_start < addr + len);
65301 }
65302
65303 /* Check against address space limit. */
65304@@ -1173,6 +1294,16 @@ munmap_back:
65305 goto unacct_error;
65306 }
65307
65308+#ifdef CONFIG_PAX_SEGMEXEC
65309+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
65310+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
65311+ if (!vma_m) {
65312+ error = -ENOMEM;
65313+ goto free_vma;
65314+ }
65315+ }
65316+#endif
65317+
65318 vma->vm_mm = mm;
65319 vma->vm_start = addr;
65320 vma->vm_end = addr + len;
65321@@ -1195,6 +1326,19 @@ munmap_back:
65322 error = file->f_op->mmap(file, vma);
65323 if (error)
65324 goto unmap_and_free_vma;
65325+
65326+#ifdef CONFIG_PAX_SEGMEXEC
65327+ if (vma_m && (vm_flags & VM_EXECUTABLE))
65328+ added_exe_file_vma(mm);
65329+#endif
65330+
65331+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
65332+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
65333+ vma->vm_flags |= VM_PAGEEXEC;
65334+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
65335+ }
65336+#endif
65337+
65338 if (vm_flags & VM_EXECUTABLE)
65339 added_exe_file_vma(mm);
65340
65341@@ -1218,6 +1362,11 @@ munmap_back:
65342 vma_link(mm, vma, prev, rb_link, rb_parent);
65343 file = vma->vm_file;
65344
65345+#ifdef CONFIG_PAX_SEGMEXEC
65346+ if (vma_m)
65347+ pax_mirror_vma(vma_m, vma);
65348+#endif
65349+
65350 /* Once vma denies write, undo our temporary denial count */
65351 if (correct_wcount)
65352 atomic_inc(&inode->i_writecount);
65353@@ -1226,6 +1375,7 @@ out:
65354
65355 mm->total_vm += len >> PAGE_SHIFT;
65356 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
65357+ track_exec_limit(mm, addr, addr + len, vm_flags);
65358 if (vm_flags & VM_LOCKED) {
65359 /*
65360 * makes pages present; downgrades, drops, reacquires mmap_sem
65361@@ -1248,6 +1398,12 @@ unmap_and_free_vma:
65362 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
65363 charged = 0;
65364 free_vma:
65365+
65366+#ifdef CONFIG_PAX_SEGMEXEC
65367+ if (vma_m)
65368+ kmem_cache_free(vm_area_cachep, vma_m);
65369+#endif
65370+
65371 kmem_cache_free(vm_area_cachep, vma);
65372 unacct_error:
65373 if (charged)
65374@@ -1255,6 +1411,44 @@ unacct_error:
65375 return error;
65376 }
65377
65378+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
65379+{
65380+ if (!vma) {
65381+#ifdef CONFIG_STACK_GROWSUP
65382+ if (addr > sysctl_heap_stack_gap)
65383+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
65384+ else
65385+ vma = find_vma(current->mm, 0);
65386+ if (vma && (vma->vm_flags & VM_GROWSUP))
65387+ return false;
65388+#endif
65389+ return true;
65390+ }
65391+
65392+ if (addr + len > vma->vm_start)
65393+ return false;
65394+
65395+ if (vma->vm_flags & VM_GROWSDOWN)
65396+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
65397+#ifdef CONFIG_STACK_GROWSUP
65398+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
65399+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
65400+#endif
65401+
65402+ return true;
65403+}
65404+
65405+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
65406+{
65407+ if (vma->vm_start < len)
65408+ return -ENOMEM;
65409+ if (!(vma->vm_flags & VM_GROWSDOWN))
65410+ return vma->vm_start - len;
65411+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
65412+ return vma->vm_start - len - sysctl_heap_stack_gap;
65413+ return -ENOMEM;
65414+}
65415+
65416 /* Get an address range which is currently unmapped.
65417 * For shmat() with addr=0.
65418 *
65419@@ -1281,18 +1475,23 @@ arch_get_unmapped_area(struct file *filp
65420 if (flags & MAP_FIXED)
65421 return addr;
65422
65423+#ifdef CONFIG_PAX_RANDMMAP
65424+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
65425+#endif
65426+
65427 if (addr) {
65428 addr = PAGE_ALIGN(addr);
65429- vma = find_vma(mm, addr);
65430- if (TASK_SIZE - len >= addr &&
65431- (!vma || addr + len <= vma->vm_start))
65432- return addr;
65433+ if (TASK_SIZE - len >= addr) {
65434+ vma = find_vma(mm, addr);
65435+ if (check_heap_stack_gap(vma, addr, len))
65436+ return addr;
65437+ }
65438 }
65439 if (len > mm->cached_hole_size) {
65440- start_addr = addr = mm->free_area_cache;
65441+ start_addr = addr = mm->free_area_cache;
65442 } else {
65443- start_addr = addr = TASK_UNMAPPED_BASE;
65444- mm->cached_hole_size = 0;
65445+ start_addr = addr = mm->mmap_base;
65446+ mm->cached_hole_size = 0;
65447 }
65448
65449 full_search:
65450@@ -1303,34 +1502,40 @@ full_search:
65451 * Start a new search - just in case we missed
65452 * some holes.
65453 */
65454- if (start_addr != TASK_UNMAPPED_BASE) {
65455- addr = TASK_UNMAPPED_BASE;
65456- start_addr = addr;
65457+ if (start_addr != mm->mmap_base) {
65458+ start_addr = addr = mm->mmap_base;
65459 mm->cached_hole_size = 0;
65460 goto full_search;
65461 }
65462 return -ENOMEM;
65463 }
65464- if (!vma || addr + len <= vma->vm_start) {
65465- /*
65466- * Remember the place where we stopped the search:
65467- */
65468- mm->free_area_cache = addr + len;
65469- return addr;
65470- }
65471+ if (check_heap_stack_gap(vma, addr, len))
65472+ break;
65473 if (addr + mm->cached_hole_size < vma->vm_start)
65474 mm->cached_hole_size = vma->vm_start - addr;
65475 addr = vma->vm_end;
65476 }
65477+
65478+ /*
65479+ * Remember the place where we stopped the search:
65480+ */
65481+ mm->free_area_cache = addr + len;
65482+ return addr;
65483 }
65484 #endif
65485
65486 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
65487 {
65488+
65489+#ifdef CONFIG_PAX_SEGMEXEC
65490+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
65491+ return;
65492+#endif
65493+
65494 /*
65495 * Is this a new hole at the lowest possible address?
65496 */
65497- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
65498+ if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
65499 mm->free_area_cache = addr;
65500 mm->cached_hole_size = ~0UL;
65501 }
65502@@ -1348,7 +1553,7 @@ arch_get_unmapped_area_topdown(struct fi
65503 {
65504 struct vm_area_struct *vma;
65505 struct mm_struct *mm = current->mm;
65506- unsigned long addr = addr0;
65507+ unsigned long base = mm->mmap_base, addr = addr0;
65508
65509 /* requested length too big for entire address space */
65510 if (len > TASK_SIZE)
65511@@ -1357,13 +1562,18 @@ arch_get_unmapped_area_topdown(struct fi
65512 if (flags & MAP_FIXED)
65513 return addr;
65514
65515+#ifdef CONFIG_PAX_RANDMMAP
65516+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
65517+#endif
65518+
65519 /* requesting a specific address */
65520 if (addr) {
65521 addr = PAGE_ALIGN(addr);
65522- vma = find_vma(mm, addr);
65523- if (TASK_SIZE - len >= addr &&
65524- (!vma || addr + len <= vma->vm_start))
65525- return addr;
65526+ if (TASK_SIZE - len >= addr) {
65527+ vma = find_vma(mm, addr);
65528+ if (check_heap_stack_gap(vma, addr, len))
65529+ return addr;
65530+ }
65531 }
65532
65533 /* check if free_area_cache is useful for us */
65534@@ -1378,7 +1588,7 @@ arch_get_unmapped_area_topdown(struct fi
65535 /* make sure it can fit in the remaining address space */
65536 if (addr > len) {
65537 vma = find_vma(mm, addr-len);
65538- if (!vma || addr <= vma->vm_start)
65539+ if (check_heap_stack_gap(vma, addr - len, len))
65540 /* remember the address as a hint for next time */
65541 return (mm->free_area_cache = addr-len);
65542 }
65543@@ -1395,7 +1605,7 @@ arch_get_unmapped_area_topdown(struct fi
65544 * return with success:
65545 */
65546 vma = find_vma(mm, addr);
65547- if (!vma || addr+len <= vma->vm_start)
65548+ if (check_heap_stack_gap(vma, addr, len))
65549 /* remember the address as a hint for next time */
65550 return (mm->free_area_cache = addr);
65551
65552@@ -1404,8 +1614,8 @@ arch_get_unmapped_area_topdown(struct fi
65553 mm->cached_hole_size = vma->vm_start - addr;
65554
65555 /* try just below the current vma->vm_start */
65556- addr = vma->vm_start-len;
65557- } while (len < vma->vm_start);
65558+ addr = skip_heap_stack_gap(vma, len);
65559+ } while (!IS_ERR_VALUE(addr));
65560
65561 bottomup:
65562 /*
65563@@ -1414,13 +1624,21 @@ bottomup:
65564 * can happen with large stack limits and large mmap()
65565 * allocations.
65566 */
65567+ mm->mmap_base = TASK_UNMAPPED_BASE;
65568+
65569+#ifdef CONFIG_PAX_RANDMMAP
65570+ if (mm->pax_flags & MF_PAX_RANDMMAP)
65571+ mm->mmap_base += mm->delta_mmap;
65572+#endif
65573+
65574+ mm->free_area_cache = mm->mmap_base;
65575 mm->cached_hole_size = ~0UL;
65576- mm->free_area_cache = TASK_UNMAPPED_BASE;
65577 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
65578 /*
65579 * Restore the topdown base:
65580 */
65581- mm->free_area_cache = mm->mmap_base;
65582+ mm->mmap_base = base;
65583+ mm->free_area_cache = base;
65584 mm->cached_hole_size = ~0UL;
65585
65586 return addr;
65587@@ -1429,6 +1647,12 @@ bottomup:
65588
65589 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
65590 {
65591+
65592+#ifdef CONFIG_PAX_SEGMEXEC
65593+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
65594+ return;
65595+#endif
65596+
65597 /*
65598 * Is this a new hole at the highest possible address?
65599 */
65600@@ -1436,8 +1660,10 @@ void arch_unmap_area_topdown(struct mm_s
65601 mm->free_area_cache = addr;
65602
65603 /* dont allow allocations above current base */
65604- if (mm->free_area_cache > mm->mmap_base)
65605+ if (mm->free_area_cache > mm->mmap_base) {
65606 mm->free_area_cache = mm->mmap_base;
65607+ mm->cached_hole_size = ~0UL;
65608+ }
65609 }
65610
65611 unsigned long
65612@@ -1545,6 +1771,27 @@ out:
65613 return prev ? prev->vm_next : vma;
65614 }
65615
65616+#ifdef CONFIG_PAX_SEGMEXEC
65617+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
65618+{
65619+ struct vm_area_struct *vma_m;
65620+
65621+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
65622+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
65623+ BUG_ON(vma->vm_mirror);
65624+ return NULL;
65625+ }
65626+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
65627+ vma_m = vma->vm_mirror;
65628+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
65629+ BUG_ON(vma->vm_file != vma_m->vm_file);
65630+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
65631+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff || vma->anon_vma != vma_m->anon_vma);
65632+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
65633+ return vma_m;
65634+}
65635+#endif
65636+
65637 /*
65638 * Verify that the stack growth is acceptable and
65639 * update accounting. This is shared with both the
65640@@ -1561,6 +1808,7 @@ static int acct_stack_growth(struct vm_a
65641 return -ENOMEM;
65642
65643 /* Stack limit test */
65644+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
65645 if (size > rlim[RLIMIT_STACK].rlim_cur)
65646 return -ENOMEM;
65647
65648@@ -1570,6 +1818,7 @@ static int acct_stack_growth(struct vm_a
65649 unsigned long limit;
65650 locked = mm->locked_vm + grow;
65651 limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
65652+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
65653 if (locked > limit && !capable(CAP_IPC_LOCK))
65654 return -ENOMEM;
65655 }
65656@@ -1600,37 +1849,48 @@ static int acct_stack_growth(struct vm_a
65657 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
65658 * vma is the last one with address > vma->vm_end. Have to extend vma.
65659 */
65660+#ifndef CONFIG_IA64
65661+static
65662+#endif
65663 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
65664 {
65665 int error;
65666+ bool locknext;
65667
65668 if (!(vma->vm_flags & VM_GROWSUP))
65669 return -EFAULT;
65670
65671+ /* Also guard against wrapping around to address 0. */
65672+ if (address < PAGE_ALIGN(address+1))
65673+ address = PAGE_ALIGN(address+1);
65674+ else
65675+ return -ENOMEM;
65676+
65677 /*
65678 * We must make sure the anon_vma is allocated
65679 * so that the anon_vma locking is not a noop.
65680 */
65681 if (unlikely(anon_vma_prepare(vma)))
65682 return -ENOMEM;
65683+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
65684+ if (locknext && anon_vma_prepare(vma->vm_next))
65685+ return -ENOMEM;
65686 anon_vma_lock(vma);
65687+ if (locknext)
65688+ anon_vma_lock(vma->vm_next);
65689
65690 /*
65691 * vma->vm_start/vm_end cannot change under us because the caller
65692 * is required to hold the mmap_sem in read mode. We need the
65693- * anon_vma lock to serialize against concurrent expand_stacks.
65694- * Also guard against wrapping around to address 0.
65695+ * anon_vma locks to serialize against concurrent expand_stacks
65696+ * and expand_upwards.
65697 */
65698- if (address < PAGE_ALIGN(address+4))
65699- address = PAGE_ALIGN(address+4);
65700- else {
65701- anon_vma_unlock(vma);
65702- return -ENOMEM;
65703- }
65704 error = 0;
65705
65706 /* Somebody else might have raced and expanded it already */
65707- if (address > vma->vm_end) {
65708+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
65709+ error = -ENOMEM;
65710+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
65711 unsigned long size, grow;
65712
65713 size = address - vma->vm_start;
65714@@ -1640,6 +1900,8 @@ int expand_upwards(struct vm_area_struct
65715 if (!error)
65716 vma->vm_end = address;
65717 }
65718+ if (locknext)
65719+ anon_vma_unlock(vma->vm_next);
65720 anon_vma_unlock(vma);
65721 return error;
65722 }
65723@@ -1652,6 +1914,8 @@ static int expand_downwards(struct vm_ar
65724 unsigned long address)
65725 {
65726 int error;
65727+ bool lockprev = false;
65728+ struct vm_area_struct *prev;
65729
65730 /*
65731 * We must make sure the anon_vma is allocated
65732@@ -1665,6 +1929,15 @@ static int expand_downwards(struct vm_ar
65733 if (error)
65734 return error;
65735
65736+ prev = vma->vm_prev;
65737+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
65738+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
65739+#endif
65740+ if (lockprev && anon_vma_prepare(prev))
65741+ return -ENOMEM;
65742+ if (lockprev)
65743+ anon_vma_lock(prev);
65744+
65745 anon_vma_lock(vma);
65746
65747 /*
65748@@ -1674,9 +1947,17 @@ static int expand_downwards(struct vm_ar
65749 */
65750
65751 /* Somebody else might have raced and expanded it already */
65752- if (address < vma->vm_start) {
65753+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
65754+ error = -ENOMEM;
65755+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
65756 unsigned long size, grow;
65757
65758+#ifdef CONFIG_PAX_SEGMEXEC
65759+ struct vm_area_struct *vma_m;
65760+
65761+ vma_m = pax_find_mirror_vma(vma);
65762+#endif
65763+
65764 size = vma->vm_end - address;
65765 grow = (vma->vm_start - address) >> PAGE_SHIFT;
65766
65767@@ -1684,9 +1965,20 @@ static int expand_downwards(struct vm_ar
65768 if (!error) {
65769 vma->vm_start = address;
65770 vma->vm_pgoff -= grow;
65771+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
65772+
65773+#ifdef CONFIG_PAX_SEGMEXEC
65774+ if (vma_m) {
65775+ vma_m->vm_start -= grow << PAGE_SHIFT;
65776+ vma_m->vm_pgoff -= grow;
65777+ }
65778+#endif
65779+
65780 }
65781 }
65782 anon_vma_unlock(vma);
65783+ if (lockprev)
65784+ anon_vma_unlock(prev);
65785 return error;
65786 }
65787
65788@@ -1762,6 +2054,13 @@ static void remove_vma_list(struct mm_st
65789 do {
65790 long nrpages = vma_pages(vma);
65791
65792+#ifdef CONFIG_PAX_SEGMEXEC
65793+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
65794+ vma = remove_vma(vma);
65795+ continue;
65796+ }
65797+#endif
65798+
65799 mm->total_vm -= nrpages;
65800 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
65801 vma = remove_vma(vma);
65802@@ -1807,6 +2106,16 @@ detach_vmas_to_be_unmapped(struct mm_str
65803 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
65804 vma->vm_prev = NULL;
65805 do {
65806+
65807+#ifdef CONFIG_PAX_SEGMEXEC
65808+ if (vma->vm_mirror) {
65809+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
65810+ vma->vm_mirror->vm_mirror = NULL;
65811+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
65812+ vma->vm_mirror = NULL;
65813+ }
65814+#endif
65815+
65816 rb_erase(&vma->vm_rb, &mm->mm_rb);
65817 mm->map_count--;
65818 tail_vma = vma;
65819@@ -1834,10 +2143,25 @@ int split_vma(struct mm_struct * mm, str
65820 struct mempolicy *pol;
65821 struct vm_area_struct *new;
65822
65823+#ifdef CONFIG_PAX_SEGMEXEC
65824+ struct vm_area_struct *vma_m, *new_m = NULL;
65825+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
65826+#endif
65827+
65828 if (is_vm_hugetlb_page(vma) && (addr &
65829 ~(huge_page_mask(hstate_vma(vma)))))
65830 return -EINVAL;
65831
65832+#ifdef CONFIG_PAX_SEGMEXEC
65833+ vma_m = pax_find_mirror_vma(vma);
65834+
65835+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
65836+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
65837+ if (mm->map_count >= sysctl_max_map_count-1)
65838+ return -ENOMEM;
65839+ } else
65840+#endif
65841+
65842 if (mm->map_count >= sysctl_max_map_count)
65843 return -ENOMEM;
65844
65845@@ -1845,6 +2169,16 @@ int split_vma(struct mm_struct * mm, str
65846 if (!new)
65847 return -ENOMEM;
65848
65849+#ifdef CONFIG_PAX_SEGMEXEC
65850+ if (vma_m) {
65851+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
65852+ if (!new_m) {
65853+ kmem_cache_free(vm_area_cachep, new);
65854+ return -ENOMEM;
65855+ }
65856+ }
65857+#endif
65858+
65859 /* most fields are the same, copy all, and then fixup */
65860 *new = *vma;
65861
65862@@ -1855,8 +2189,29 @@ int split_vma(struct mm_struct * mm, str
65863 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
65864 }
65865
65866+#ifdef CONFIG_PAX_SEGMEXEC
65867+ if (vma_m) {
65868+ *new_m = *vma_m;
65869+ new_m->vm_mirror = new;
65870+ new->vm_mirror = new_m;
65871+
65872+ if (new_below)
65873+ new_m->vm_end = addr_m;
65874+ else {
65875+ new_m->vm_start = addr_m;
65876+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
65877+ }
65878+ }
65879+#endif
65880+
65881 pol = mpol_dup(vma_policy(vma));
65882 if (IS_ERR(pol)) {
65883+
65884+#ifdef CONFIG_PAX_SEGMEXEC
65885+ if (new_m)
65886+ kmem_cache_free(vm_area_cachep, new_m);
65887+#endif
65888+
65889 kmem_cache_free(vm_area_cachep, new);
65890 return PTR_ERR(pol);
65891 }
65892@@ -1877,6 +2232,28 @@ int split_vma(struct mm_struct * mm, str
65893 else
65894 vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
65895
65896+#ifdef CONFIG_PAX_SEGMEXEC
65897+ if (vma_m) {
65898+ mpol_get(pol);
65899+ vma_set_policy(new_m, pol);
65900+
65901+ if (new_m->vm_file) {
65902+ get_file(new_m->vm_file);
65903+ if (vma_m->vm_flags & VM_EXECUTABLE)
65904+ added_exe_file_vma(mm);
65905+ }
65906+
65907+ if (new_m->vm_ops && new_m->vm_ops->open)
65908+ new_m->vm_ops->open(new_m);
65909+
65910+ if (new_below)
65911+ vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
65912+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
65913+ else
65914+ vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
65915+ }
65916+#endif
65917+
65918 return 0;
65919 }
65920
65921@@ -1885,11 +2262,30 @@ int split_vma(struct mm_struct * mm, str
65922 * work. This now handles partial unmappings.
65923 * Jeremy Fitzhardinge <jeremy@goop.org>
65924 */
65925+#ifdef CONFIG_PAX_SEGMEXEC
65926+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
65927+{
65928+ int ret = __do_munmap(mm, start, len);
65929+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
65930+ return ret;
65931+
65932+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
65933+}
65934+
65935+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
65936+#else
65937 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
65938+#endif
65939 {
65940 unsigned long end;
65941 struct vm_area_struct *vma, *prev, *last;
65942
65943+ /*
65944+ * mm->mmap_sem is required to protect against another thread
65945+ * changing the mappings in case we sleep.
65946+ */
65947+ verify_mm_writelocked(mm);
65948+
65949 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
65950 return -EINVAL;
65951
65952@@ -1953,6 +2349,8 @@ int do_munmap(struct mm_struct *mm, unsi
65953 /* Fix up all other VM information */
65954 remove_vma_list(mm, vma);
65955
65956+ track_exec_limit(mm, start, end, 0UL);
65957+
65958 return 0;
65959 }
65960
65961@@ -1965,22 +2363,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
65962
65963 profile_munmap(addr);
65964
65965+#ifdef CONFIG_PAX_SEGMEXEC
65966+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
65967+ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
65968+ return -EINVAL;
65969+#endif
65970+
65971 down_write(&mm->mmap_sem);
65972 ret = do_munmap(mm, addr, len);
65973 up_write(&mm->mmap_sem);
65974 return ret;
65975 }
65976
65977-static inline void verify_mm_writelocked(struct mm_struct *mm)
65978-{
65979-#ifdef CONFIG_DEBUG_VM
65980- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
65981- WARN_ON(1);
65982- up_read(&mm->mmap_sem);
65983- }
65984-#endif
65985-}
65986-
65987 /*
65988 * this is really a simplified "do_mmap". it only handles
65989 * anonymous maps. eventually we may be able to do some
65990@@ -1994,6 +2388,7 @@ unsigned long do_brk(unsigned long addr,
65991 struct rb_node ** rb_link, * rb_parent;
65992 pgoff_t pgoff = addr >> PAGE_SHIFT;
65993 int error;
65994+ unsigned long charged;
65995
65996 len = PAGE_ALIGN(len);
65997 if (!len)
65998@@ -2005,16 +2400,30 @@ unsigned long do_brk(unsigned long addr,
65999
66000 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
66001
66002+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
66003+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
66004+ flags &= ~VM_EXEC;
66005+
66006+#ifdef CONFIG_PAX_MPROTECT
66007+ if (mm->pax_flags & MF_PAX_MPROTECT)
66008+ flags &= ~VM_MAYEXEC;
66009+#endif
66010+
66011+ }
66012+#endif
66013+
66014 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
66015 if (error & ~PAGE_MASK)
66016 return error;
66017
66018+ charged = len >> PAGE_SHIFT;
66019+
66020 /*
66021 * mlock MCL_FUTURE?
66022 */
66023 if (mm->def_flags & VM_LOCKED) {
66024 unsigned long locked, lock_limit;
66025- locked = len >> PAGE_SHIFT;
66026+ locked = charged;
66027 locked += mm->locked_vm;
66028 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
66029 lock_limit >>= PAGE_SHIFT;
66030@@ -2031,22 +2440,22 @@ unsigned long do_brk(unsigned long addr,
66031 /*
66032 * Clear old maps. this also does some error checking for us
66033 */
66034- munmap_back:
66035 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
66036 if (vma && vma->vm_start < addr + len) {
66037 if (do_munmap(mm, addr, len))
66038 return -ENOMEM;
66039- goto munmap_back;
66040+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
66041+ BUG_ON(vma && vma->vm_start < addr + len);
66042 }
66043
66044 /* Check against address space limits *after* clearing old maps... */
66045- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
66046+ if (!may_expand_vm(mm, charged))
66047 return -ENOMEM;
66048
66049 if (mm->map_count > sysctl_max_map_count)
66050 return -ENOMEM;
66051
66052- if (security_vm_enough_memory(len >> PAGE_SHIFT))
66053+ if (security_vm_enough_memory(charged))
66054 return -ENOMEM;
66055
66056 /* Can we just expand an old private anonymous mapping? */
66057@@ -2060,7 +2469,7 @@ unsigned long do_brk(unsigned long addr,
66058 */
66059 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
66060 if (!vma) {
66061- vm_unacct_memory(len >> PAGE_SHIFT);
66062+ vm_unacct_memory(charged);
66063 return -ENOMEM;
66064 }
66065
66066@@ -2072,11 +2481,12 @@ unsigned long do_brk(unsigned long addr,
66067 vma->vm_page_prot = vm_get_page_prot(flags);
66068 vma_link(mm, vma, prev, rb_link, rb_parent);
66069 out:
66070- mm->total_vm += len >> PAGE_SHIFT;
66071+ mm->total_vm += charged;
66072 if (flags & VM_LOCKED) {
66073 if (!mlock_vma_pages_range(vma, addr, addr + len))
66074- mm->locked_vm += (len >> PAGE_SHIFT);
66075+ mm->locked_vm += charged;
66076 }
66077+ track_exec_limit(mm, addr, addr + len, flags);
66078 return addr;
66079 }
66080
66081@@ -2123,8 +2533,10 @@ void exit_mmap(struct mm_struct *mm)
66082 * Walk the list again, actually closing and freeing it,
66083 * with preemption enabled, without holding any MM locks.
66084 */
66085- while (vma)
66086+ while (vma) {
66087+ vma->vm_mirror = NULL;
66088 vma = remove_vma(vma);
66089+ }
66090
66091 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
66092 }
66093@@ -2138,6 +2550,10 @@ int insert_vm_struct(struct mm_struct *
66094 struct vm_area_struct * __vma, * prev;
66095 struct rb_node ** rb_link, * rb_parent;
66096
66097+#ifdef CONFIG_PAX_SEGMEXEC
66098+ struct vm_area_struct *vma_m = NULL;
66099+#endif
66100+
66101 /*
66102 * The vm_pgoff of a purely anonymous vma should be irrelevant
66103 * until its first write fault, when page's anon_vma and index
66104@@ -2160,7 +2576,22 @@ int insert_vm_struct(struct mm_struct *
66105 if ((vma->vm_flags & VM_ACCOUNT) &&
66106 security_vm_enough_memory_mm(mm, vma_pages(vma)))
66107 return -ENOMEM;
66108+
66109+#ifdef CONFIG_PAX_SEGMEXEC
66110+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
66111+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
66112+ if (!vma_m)
66113+ return -ENOMEM;
66114+ }
66115+#endif
66116+
66117 vma_link(mm, vma, prev, rb_link, rb_parent);
66118+
66119+#ifdef CONFIG_PAX_SEGMEXEC
66120+ if (vma_m)
66121+ pax_mirror_vma(vma_m, vma);
66122+#endif
66123+
66124 return 0;
66125 }
66126
66127@@ -2178,6 +2609,8 @@ struct vm_area_struct *copy_vma(struct v
66128 struct rb_node **rb_link, *rb_parent;
66129 struct mempolicy *pol;
66130
66131+ BUG_ON(vma->vm_mirror);
66132+
66133 /*
66134 * If anonymous vma has not yet been faulted, update new pgoff
66135 * to match new location, to increase its chance of merging.
66136@@ -2221,6 +2654,35 @@ struct vm_area_struct *copy_vma(struct v
66137 return new_vma;
66138 }
66139
66140+#ifdef CONFIG_PAX_SEGMEXEC
66141+void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
66142+{
66143+ struct vm_area_struct *prev_m;
66144+ struct rb_node **rb_link_m, *rb_parent_m;
66145+ struct mempolicy *pol_m;
66146+
66147+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
66148+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
66149+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
66150+ *vma_m = *vma;
66151+ pol_m = vma_policy(vma_m);
66152+ mpol_get(pol_m);
66153+ vma_set_policy(vma_m, pol_m);
66154+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
66155+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
66156+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
66157+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
66158+ if (vma_m->vm_file)
66159+ get_file(vma_m->vm_file);
66160+ if (vma_m->vm_ops && vma_m->vm_ops->open)
66161+ vma_m->vm_ops->open(vma_m);
66162+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
66163+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
66164+ vma_m->vm_mirror = vma;
66165+ vma->vm_mirror = vma_m;
66166+}
66167+#endif
66168+
66169 /*
66170 * Return true if the calling process may expand its vm space by the passed
66171 * number of pages
66172@@ -2231,7 +2693,7 @@ int may_expand_vm(struct mm_struct *mm,
66173 unsigned long lim;
66174
66175 lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
66176-
66177+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
66178 if (cur + npages > lim)
66179 return 0;
66180 return 1;
66181@@ -2301,6 +2763,22 @@ int install_special_mapping(struct mm_st
66182 vma->vm_start = addr;
66183 vma->vm_end = addr + len;
66184
66185+#ifdef CONFIG_PAX_MPROTECT
66186+ if (mm->pax_flags & MF_PAX_MPROTECT) {
66187+#ifndef CONFIG_PAX_MPROTECT_COMPAT
66188+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
66189+ return -EPERM;
66190+ if (!(vm_flags & VM_EXEC))
66191+ vm_flags &= ~VM_MAYEXEC;
66192+#else
66193+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
66194+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
66195+#endif
66196+ else
66197+ vm_flags &= ~VM_MAYWRITE;
66198+ }
66199+#endif
66200+
66201 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
66202 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
66203
66204diff -urNp linux-2.6.32.42/mm/mprotect.c linux-2.6.32.42/mm/mprotect.c
66205--- linux-2.6.32.42/mm/mprotect.c 2011-03-27 14:31:47.000000000 -0400
66206+++ linux-2.6.32.42/mm/mprotect.c 2011-04-17 15:56:46.000000000 -0400
66207@@ -24,10 +24,16 @@
66208 #include <linux/mmu_notifier.h>
66209 #include <linux/migrate.h>
66210 #include <linux/perf_event.h>
66211+
66212+#ifdef CONFIG_PAX_MPROTECT
66213+#include <linux/elf.h>
66214+#endif
66215+
66216 #include <asm/uaccess.h>
66217 #include <asm/pgtable.h>
66218 #include <asm/cacheflush.h>
66219 #include <asm/tlbflush.h>
66220+#include <asm/mmu_context.h>
66221
66222 #ifndef pgprot_modify
66223 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
66224@@ -132,6 +138,48 @@ static void change_protection(struct vm_
66225 flush_tlb_range(vma, start, end);
66226 }
66227
66228+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
66229+/* called while holding the mmap semaphor for writing except stack expansion */
66230+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
66231+{
66232+ unsigned long oldlimit, newlimit = 0UL;
66233+
66234+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || nx_enabled)
66235+ return;
66236+
66237+ spin_lock(&mm->page_table_lock);
66238+ oldlimit = mm->context.user_cs_limit;
66239+ if ((prot & VM_EXEC) && oldlimit < end)
66240+ /* USER_CS limit moved up */
66241+ newlimit = end;
66242+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
66243+ /* USER_CS limit moved down */
66244+ newlimit = start;
66245+
66246+ if (newlimit) {
66247+ mm->context.user_cs_limit = newlimit;
66248+
66249+#ifdef CONFIG_SMP
66250+ wmb();
66251+ cpus_clear(mm->context.cpu_user_cs_mask);
66252+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
66253+#endif
66254+
66255+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
66256+ }
66257+ spin_unlock(&mm->page_table_lock);
66258+ if (newlimit == end) {
66259+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
66260+
66261+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
66262+ if (is_vm_hugetlb_page(vma))
66263+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
66264+ else
66265+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
66266+ }
66267+}
66268+#endif
66269+
66270 int
66271 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
66272 unsigned long start, unsigned long end, unsigned long newflags)
66273@@ -144,11 +192,29 @@ mprotect_fixup(struct vm_area_struct *vm
66274 int error;
66275 int dirty_accountable = 0;
66276
66277+#ifdef CONFIG_PAX_SEGMEXEC
66278+ struct vm_area_struct *vma_m = NULL;
66279+ unsigned long start_m, end_m;
66280+
66281+ start_m = start + SEGMEXEC_TASK_SIZE;
66282+ end_m = end + SEGMEXEC_TASK_SIZE;
66283+#endif
66284+
66285 if (newflags == oldflags) {
66286 *pprev = vma;
66287 return 0;
66288 }
66289
66290+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
66291+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
66292+
66293+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
66294+ return -ENOMEM;
66295+
66296+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
66297+ return -ENOMEM;
66298+ }
66299+
66300 /*
66301 * If we make a private mapping writable we increase our commit;
66302 * but (without finer accounting) cannot reduce our commit if we
66303@@ -165,6 +231,38 @@ mprotect_fixup(struct vm_area_struct *vm
66304 }
66305 }
66306
66307+#ifdef CONFIG_PAX_SEGMEXEC
66308+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
66309+ if (start != vma->vm_start) {
66310+ error = split_vma(mm, vma, start, 1);
66311+ if (error)
66312+ goto fail;
66313+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
66314+ *pprev = (*pprev)->vm_next;
66315+ }
66316+
66317+ if (end != vma->vm_end) {
66318+ error = split_vma(mm, vma, end, 0);
66319+ if (error)
66320+ goto fail;
66321+ }
66322+
66323+ if (pax_find_mirror_vma(vma)) {
66324+ error = __do_munmap(mm, start_m, end_m - start_m);
66325+ if (error)
66326+ goto fail;
66327+ } else {
66328+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
66329+ if (!vma_m) {
66330+ error = -ENOMEM;
66331+ goto fail;
66332+ }
66333+ vma->vm_flags = newflags;
66334+ pax_mirror_vma(vma_m, vma);
66335+ }
66336+ }
66337+#endif
66338+
66339 /*
66340 * First try to merge with previous and/or next vma.
66341 */
66342@@ -195,9 +293,21 @@ success:
66343 * vm_flags and vm_page_prot are protected by the mmap_sem
66344 * held in write mode.
66345 */
66346+
66347+#ifdef CONFIG_PAX_SEGMEXEC
66348+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
66349+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
66350+#endif
66351+
66352 vma->vm_flags = newflags;
66353+
66354+#ifdef CONFIG_PAX_MPROTECT
66355+ if (mm->binfmt && mm->binfmt->handle_mprotect)
66356+ mm->binfmt->handle_mprotect(vma, newflags);
66357+#endif
66358+
66359 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
66360- vm_get_page_prot(newflags));
66361+ vm_get_page_prot(vma->vm_flags));
66362
66363 if (vma_wants_writenotify(vma)) {
66364 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
66365@@ -239,6 +349,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
66366 end = start + len;
66367 if (end <= start)
66368 return -ENOMEM;
66369+
66370+#ifdef CONFIG_PAX_SEGMEXEC
66371+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
66372+ if (end > SEGMEXEC_TASK_SIZE)
66373+ return -EINVAL;
66374+ } else
66375+#endif
66376+
66377+ if (end > TASK_SIZE)
66378+ return -EINVAL;
66379+
66380 if (!arch_validate_prot(prot))
66381 return -EINVAL;
66382
66383@@ -246,7 +367,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
66384 /*
66385 * Does the application expect PROT_READ to imply PROT_EXEC:
66386 */
66387- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
66388+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
66389 prot |= PROT_EXEC;
66390
66391 vm_flags = calc_vm_prot_bits(prot);
66392@@ -278,6 +399,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
66393 if (start > vma->vm_start)
66394 prev = vma;
66395
66396+#ifdef CONFIG_PAX_MPROTECT
66397+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
66398+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
66399+#endif
66400+
66401 for (nstart = start ; ; ) {
66402 unsigned long newflags;
66403
66404@@ -287,6 +413,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
66405
66406 /* newflags >> 4 shift VM_MAY% in place of VM_% */
66407 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
66408+ if (prot & (PROT_WRITE | PROT_EXEC))
66409+ gr_log_rwxmprotect(vma->vm_file);
66410+
66411+ error = -EACCES;
66412+ goto out;
66413+ }
66414+
66415+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
66416 error = -EACCES;
66417 goto out;
66418 }
66419@@ -301,6 +435,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
66420 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
66421 if (error)
66422 goto out;
66423+
66424+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
66425+
66426 nstart = tmp;
66427
66428 if (nstart < prev->vm_end)
66429diff -urNp linux-2.6.32.42/mm/mremap.c linux-2.6.32.42/mm/mremap.c
66430--- linux-2.6.32.42/mm/mremap.c 2011-04-17 17:00:52.000000000 -0400
66431+++ linux-2.6.32.42/mm/mremap.c 2011-04-17 17:03:58.000000000 -0400
66432@@ -112,6 +112,12 @@ static void move_ptes(struct vm_area_str
66433 continue;
66434 pte = ptep_clear_flush(vma, old_addr, old_pte);
66435 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
66436+
66437+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
66438+ if (!nx_enabled && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
66439+ pte = pte_exprotect(pte);
66440+#endif
66441+
66442 set_pte_at(mm, new_addr, new_pte, pte);
66443 }
66444
66445@@ -271,6 +277,11 @@ static struct vm_area_struct *vma_to_res
66446 if (is_vm_hugetlb_page(vma))
66447 goto Einval;
66448
66449+#ifdef CONFIG_PAX_SEGMEXEC
66450+ if (pax_find_mirror_vma(vma))
66451+ goto Einval;
66452+#endif
66453+
66454 /* We can't remap across vm area boundaries */
66455 if (old_len > vma->vm_end - addr)
66456 goto Efault;
66457@@ -327,20 +338,25 @@ static unsigned long mremap_to(unsigned
66458 unsigned long ret = -EINVAL;
66459 unsigned long charged = 0;
66460 unsigned long map_flags;
66461+ unsigned long pax_task_size = TASK_SIZE;
66462
66463 if (new_addr & ~PAGE_MASK)
66464 goto out;
66465
66466- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
66467+#ifdef CONFIG_PAX_SEGMEXEC
66468+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
66469+ pax_task_size = SEGMEXEC_TASK_SIZE;
66470+#endif
66471+
66472+ pax_task_size -= PAGE_SIZE;
66473+
66474+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
66475 goto out;
66476
66477 /* Check if the location we're moving into overlaps the
66478 * old location at all, and fail if it does.
66479 */
66480- if ((new_addr <= addr) && (new_addr+new_len) > addr)
66481- goto out;
66482-
66483- if ((addr <= new_addr) && (addr+old_len) > new_addr)
66484+ if (addr + old_len > new_addr && new_addr + new_len > addr)
66485 goto out;
66486
66487 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
66488@@ -412,6 +428,7 @@ unsigned long do_mremap(unsigned long ad
66489 struct vm_area_struct *vma;
66490 unsigned long ret = -EINVAL;
66491 unsigned long charged = 0;
66492+ unsigned long pax_task_size = TASK_SIZE;
66493
66494 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
66495 goto out;
66496@@ -430,6 +447,17 @@ unsigned long do_mremap(unsigned long ad
66497 if (!new_len)
66498 goto out;
66499
66500+#ifdef CONFIG_PAX_SEGMEXEC
66501+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
66502+ pax_task_size = SEGMEXEC_TASK_SIZE;
66503+#endif
66504+
66505+ pax_task_size -= PAGE_SIZE;
66506+
66507+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
66508+ old_len > pax_task_size || addr > pax_task_size-old_len)
66509+ goto out;
66510+
66511 if (flags & MREMAP_FIXED) {
66512 if (flags & MREMAP_MAYMOVE)
66513 ret = mremap_to(addr, old_len, new_addr, new_len);
66514@@ -476,6 +504,7 @@ unsigned long do_mremap(unsigned long ad
66515 addr + new_len);
66516 }
66517 ret = addr;
66518+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
66519 goto out;
66520 }
66521 }
66522@@ -502,7 +531,13 @@ unsigned long do_mremap(unsigned long ad
66523 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
66524 if (ret)
66525 goto out;
66526+
66527+ map_flags = vma->vm_flags;
66528 ret = move_vma(vma, addr, old_len, new_len, new_addr);
66529+ if (!(ret & ~PAGE_MASK)) {
66530+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
66531+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
66532+ }
66533 }
66534 out:
66535 if (ret & ~PAGE_MASK)
66536diff -urNp linux-2.6.32.42/mm/nommu.c linux-2.6.32.42/mm/nommu.c
66537--- linux-2.6.32.42/mm/nommu.c 2011-03-27 14:31:47.000000000 -0400
66538+++ linux-2.6.32.42/mm/nommu.c 2011-04-17 15:56:46.000000000 -0400
66539@@ -67,7 +67,6 @@ int sysctl_overcommit_memory = OVERCOMMI
66540 int sysctl_overcommit_ratio = 50; /* default is 50% */
66541 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
66542 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
66543-int heap_stack_gap = 0;
66544
66545 atomic_long_t mmap_pages_allocated;
66546
66547@@ -761,15 +760,6 @@ struct vm_area_struct *find_vma(struct m
66548 EXPORT_SYMBOL(find_vma);
66549
66550 /*
66551- * find a VMA
66552- * - we don't extend stack VMAs under NOMMU conditions
66553- */
66554-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
66555-{
66556- return find_vma(mm, addr);
66557-}
66558-
66559-/*
66560 * expand a stack to a given address
66561 * - not supported under NOMMU conditions
66562 */
66563diff -urNp linux-2.6.32.42/mm/page_alloc.c linux-2.6.32.42/mm/page_alloc.c
66564--- linux-2.6.32.42/mm/page_alloc.c 2011-06-25 12:55:35.000000000 -0400
66565+++ linux-2.6.32.42/mm/page_alloc.c 2011-06-25 12:56:37.000000000 -0400
66566@@ -587,6 +587,10 @@ static void __free_pages_ok(struct page
66567 int bad = 0;
66568 int wasMlocked = __TestClearPageMlocked(page);
66569
66570+#ifdef CONFIG_PAX_MEMORY_SANITIZE
66571+ unsigned long index = 1UL << order;
66572+#endif
66573+
66574 kmemcheck_free_shadow(page, order);
66575
66576 for (i = 0 ; i < (1 << order) ; ++i)
66577@@ -599,6 +603,12 @@ static void __free_pages_ok(struct page
66578 debug_check_no_obj_freed(page_address(page),
66579 PAGE_SIZE << order);
66580 }
66581+
66582+#ifdef CONFIG_PAX_MEMORY_SANITIZE
66583+ for (; index; --index)
66584+ sanitize_highpage(page + index - 1);
66585+#endif
66586+
66587 arch_free_page(page, order);
66588 kernel_map_pages(page, 1 << order, 0);
66589
66590@@ -702,8 +712,10 @@ static int prep_new_page(struct page *pa
66591 arch_alloc_page(page, order);
66592 kernel_map_pages(page, 1 << order, 1);
66593
66594+#ifndef CONFIG_PAX_MEMORY_SANITIZE
66595 if (gfp_flags & __GFP_ZERO)
66596 prep_zero_page(page, order, gfp_flags);
66597+#endif
66598
66599 if (order && (gfp_flags & __GFP_COMP))
66600 prep_compound_page(page, order);
66601@@ -1097,6 +1109,11 @@ static void free_hot_cold_page(struct pa
66602 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
66603 debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
66604 }
66605+
66606+#ifdef CONFIG_PAX_MEMORY_SANITIZE
66607+ sanitize_highpage(page);
66608+#endif
66609+
66610 arch_free_page(page, 0);
66611 kernel_map_pages(page, 1, 0);
66612
66613@@ -2179,6 +2196,8 @@ void show_free_areas(void)
66614 int cpu;
66615 struct zone *zone;
66616
66617+ pax_track_stack();
66618+
66619 for_each_populated_zone(zone) {
66620 show_node(zone);
66621 printk("%s per-cpu:\n", zone->name);
66622@@ -3736,7 +3755,7 @@ static void __init setup_usemap(struct p
66623 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
66624 }
66625 #else
66626-static void inline setup_usemap(struct pglist_data *pgdat,
66627+static inline void setup_usemap(struct pglist_data *pgdat,
66628 struct zone *zone, unsigned long zonesize) {}
66629 #endif /* CONFIG_SPARSEMEM */
66630
66631diff -urNp linux-2.6.32.42/mm/percpu.c linux-2.6.32.42/mm/percpu.c
66632--- linux-2.6.32.42/mm/percpu.c 2011-03-27 14:31:47.000000000 -0400
66633+++ linux-2.6.32.42/mm/percpu.c 2011-04-17 15:56:46.000000000 -0400
66634@@ -115,7 +115,7 @@ static unsigned int pcpu_first_unit_cpu
66635 static unsigned int pcpu_last_unit_cpu __read_mostly;
66636
66637 /* the address of the first chunk which starts with the kernel static area */
66638-void *pcpu_base_addr __read_mostly;
66639+void *pcpu_base_addr __read_only;
66640 EXPORT_SYMBOL_GPL(pcpu_base_addr);
66641
66642 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
66643diff -urNp linux-2.6.32.42/mm/rmap.c linux-2.6.32.42/mm/rmap.c
66644--- linux-2.6.32.42/mm/rmap.c 2011-03-27 14:31:47.000000000 -0400
66645+++ linux-2.6.32.42/mm/rmap.c 2011-04-17 15:56:46.000000000 -0400
66646@@ -121,6 +121,17 @@ int anon_vma_prepare(struct vm_area_stru
66647 /* page_table_lock to protect against threads */
66648 spin_lock(&mm->page_table_lock);
66649 if (likely(!vma->anon_vma)) {
66650+
66651+#ifdef CONFIG_PAX_SEGMEXEC
66652+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
66653+
66654+ if (vma_m) {
66655+ BUG_ON(vma_m->anon_vma);
66656+ vma_m->anon_vma = anon_vma;
66657+ list_add_tail(&vma_m->anon_vma_node, &anon_vma->head);
66658+ }
66659+#endif
66660+
66661 vma->anon_vma = anon_vma;
66662 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
66663 allocated = NULL;
66664diff -urNp linux-2.6.32.42/mm/shmem.c linux-2.6.32.42/mm/shmem.c
66665--- linux-2.6.32.42/mm/shmem.c 2011-03-27 14:31:47.000000000 -0400
66666+++ linux-2.6.32.42/mm/shmem.c 2011-05-18 20:09:37.000000000 -0400
66667@@ -31,7 +31,7 @@
66668 #include <linux/swap.h>
66669 #include <linux/ima.h>
66670
66671-static struct vfsmount *shm_mnt;
66672+struct vfsmount *shm_mnt;
66673
66674 #ifdef CONFIG_SHMEM
66675 /*
66676@@ -1061,6 +1061,8 @@ static int shmem_writepage(struct page *
66677 goto unlock;
66678 }
66679 entry = shmem_swp_entry(info, index, NULL);
66680+ if (!entry)
66681+ goto unlock;
66682 if (entry->val) {
66683 /*
66684 * The more uptodate page coming down from a stacked
66685@@ -1144,6 +1146,8 @@ static struct page *shmem_swapin(swp_ent
66686 struct vm_area_struct pvma;
66687 struct page *page;
66688
66689+ pax_track_stack();
66690+
66691 spol = mpol_cond_copy(&mpol,
66692 mpol_shared_policy_lookup(&info->policy, idx));
66693
66694@@ -1962,7 +1966,7 @@ static int shmem_symlink(struct inode *d
66695
66696 info = SHMEM_I(inode);
66697 inode->i_size = len-1;
66698- if (len <= (char *)inode - (char *)info) {
66699+ if (len <= (char *)inode - (char *)info && len <= 64) {
66700 /* do it inline */
66701 memcpy(info, symname, len);
66702 inode->i_op = &shmem_symlink_inline_operations;
66703@@ -2310,8 +2314,7 @@ int shmem_fill_super(struct super_block
66704 int err = -ENOMEM;
66705
66706 /* Round up to L1_CACHE_BYTES to resist false sharing */
66707- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
66708- L1_CACHE_BYTES), GFP_KERNEL);
66709+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
66710 if (!sbinfo)
66711 return -ENOMEM;
66712
66713diff -urNp linux-2.6.32.42/mm/slab.c linux-2.6.32.42/mm/slab.c
66714--- linux-2.6.32.42/mm/slab.c 2011-03-27 14:31:47.000000000 -0400
66715+++ linux-2.6.32.42/mm/slab.c 2011-05-04 17:56:20.000000000 -0400
66716@@ -174,7 +174,7 @@
66717
66718 /* Legal flag mask for kmem_cache_create(). */
66719 #if DEBUG
66720-# define CREATE_MASK (SLAB_RED_ZONE | \
66721+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
66722 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
66723 SLAB_CACHE_DMA | \
66724 SLAB_STORE_USER | \
66725@@ -182,7 +182,7 @@
66726 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
66727 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
66728 #else
66729-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
66730+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
66731 SLAB_CACHE_DMA | \
66732 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
66733 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
66734@@ -308,7 +308,7 @@ struct kmem_list3 {
66735 * Need this for bootstrapping a per node allocator.
66736 */
66737 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
66738-struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
66739+struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
66740 #define CACHE_CACHE 0
66741 #define SIZE_AC MAX_NUMNODES
66742 #define SIZE_L3 (2 * MAX_NUMNODES)
66743@@ -409,10 +409,10 @@ static void kmem_list3_init(struct kmem_
66744 if ((x)->max_freeable < i) \
66745 (x)->max_freeable = i; \
66746 } while (0)
66747-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
66748-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
66749-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
66750-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
66751+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
66752+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
66753+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
66754+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
66755 #else
66756 #define STATS_INC_ACTIVE(x) do { } while (0)
66757 #define STATS_DEC_ACTIVE(x) do { } while (0)
66758@@ -558,7 +558,7 @@ static inline void *index_to_obj(struct
66759 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
66760 */
66761 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
66762- const struct slab *slab, void *obj)
66763+ const struct slab *slab, const void *obj)
66764 {
66765 u32 offset = (obj - slab->s_mem);
66766 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
66767@@ -1453,7 +1453,7 @@ void __init kmem_cache_init(void)
66768 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
66769 sizes[INDEX_AC].cs_size,
66770 ARCH_KMALLOC_MINALIGN,
66771- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
66772+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
66773 NULL);
66774
66775 if (INDEX_AC != INDEX_L3) {
66776@@ -1461,7 +1461,7 @@ void __init kmem_cache_init(void)
66777 kmem_cache_create(names[INDEX_L3].name,
66778 sizes[INDEX_L3].cs_size,
66779 ARCH_KMALLOC_MINALIGN,
66780- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
66781+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
66782 NULL);
66783 }
66784
66785@@ -1479,7 +1479,7 @@ void __init kmem_cache_init(void)
66786 sizes->cs_cachep = kmem_cache_create(names->name,
66787 sizes->cs_size,
66788 ARCH_KMALLOC_MINALIGN,
66789- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
66790+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
66791 NULL);
66792 }
66793 #ifdef CONFIG_ZONE_DMA
66794@@ -4211,10 +4211,10 @@ static int s_show(struct seq_file *m, vo
66795 }
66796 /* cpu stats */
66797 {
66798- unsigned long allochit = atomic_read(&cachep->allochit);
66799- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
66800- unsigned long freehit = atomic_read(&cachep->freehit);
66801- unsigned long freemiss = atomic_read(&cachep->freemiss);
66802+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
66803+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
66804+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
66805+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
66806
66807 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
66808 allochit, allocmiss, freehit, freemiss);
66809@@ -4471,15 +4471,66 @@ static const struct file_operations proc
66810
66811 static int __init slab_proc_init(void)
66812 {
66813- proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
66814+ mode_t gr_mode = S_IRUGO;
66815+
66816+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66817+ gr_mode = S_IRUSR;
66818+#endif
66819+
66820+ proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
66821 #ifdef CONFIG_DEBUG_SLAB_LEAK
66822- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
66823+ proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
66824 #endif
66825 return 0;
66826 }
66827 module_init(slab_proc_init);
66828 #endif
66829
66830+void check_object_size(const void *ptr, unsigned long n, bool to)
66831+{
66832+
66833+#ifdef CONFIG_PAX_USERCOPY
66834+ struct page *page;
66835+ struct kmem_cache *cachep = NULL;
66836+ struct slab *slabp;
66837+ unsigned int objnr;
66838+ unsigned long offset;
66839+
66840+ if (!n)
66841+ return;
66842+
66843+ if (ZERO_OR_NULL_PTR(ptr))
66844+ goto report;
66845+
66846+ if (!virt_addr_valid(ptr))
66847+ return;
66848+
66849+ page = virt_to_head_page(ptr);
66850+
66851+ if (!PageSlab(page)) {
66852+ if (object_is_on_stack(ptr, n) == -1)
66853+ goto report;
66854+ return;
66855+ }
66856+
66857+ cachep = page_get_cache(page);
66858+ if (!(cachep->flags & SLAB_USERCOPY))
66859+ goto report;
66860+
66861+ slabp = page_get_slab(page);
66862+ objnr = obj_to_index(cachep, slabp, ptr);
66863+ BUG_ON(objnr >= cachep->num);
66864+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
66865+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
66866+ return;
66867+
66868+report:
66869+ pax_report_usercopy(ptr, n, to, cachep ? cachep->name : NULL);
66870+#endif
66871+
66872+}
66873+EXPORT_SYMBOL(check_object_size);
66874+
66875 /**
66876 * ksize - get the actual amount of memory allocated for a given object
66877 * @objp: Pointer to the object
66878diff -urNp linux-2.6.32.42/mm/slob.c linux-2.6.32.42/mm/slob.c
66879--- linux-2.6.32.42/mm/slob.c 2011-03-27 14:31:47.000000000 -0400
66880+++ linux-2.6.32.42/mm/slob.c 2011-07-06 19:53:33.000000000 -0400
66881@@ -29,7 +29,7 @@
66882 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
66883 * alloc_pages() directly, allocating compound pages so the page order
66884 * does not have to be separately tracked, and also stores the exact
66885- * allocation size in page->private so that it can be used to accurately
66886+ * allocation size in slob_page->size so that it can be used to accurately
66887 * provide ksize(). These objects are detected in kfree() because slob_page()
66888 * is false for them.
66889 *
66890@@ -58,6 +58,7 @@
66891 */
66892
66893 #include <linux/kernel.h>
66894+#include <linux/sched.h>
66895 #include <linux/slab.h>
66896 #include <linux/mm.h>
66897 #include <linux/swap.h> /* struct reclaim_state */
66898@@ -100,7 +101,8 @@ struct slob_page {
66899 unsigned long flags; /* mandatory */
66900 atomic_t _count; /* mandatory */
66901 slobidx_t units; /* free units left in page */
66902- unsigned long pad[2];
66903+ unsigned long pad[1];
66904+ unsigned long size; /* size when >=PAGE_SIZE */
66905 slob_t *free; /* first free slob_t in page */
66906 struct list_head list; /* linked list of free pages */
66907 };
66908@@ -133,7 +135,7 @@ static LIST_HEAD(free_slob_large);
66909 */
66910 static inline int is_slob_page(struct slob_page *sp)
66911 {
66912- return PageSlab((struct page *)sp);
66913+ return PageSlab((struct page *)sp) && !sp->size;
66914 }
66915
66916 static inline void set_slob_page(struct slob_page *sp)
66917@@ -148,7 +150,7 @@ static inline void clear_slob_page(struc
66918
66919 static inline struct slob_page *slob_page(const void *addr)
66920 {
66921- return (struct slob_page *)virt_to_page(addr);
66922+ return (struct slob_page *)virt_to_head_page(addr);
66923 }
66924
66925 /*
66926@@ -208,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_
66927 /*
66928 * Return the size of a slob block.
66929 */
66930-static slobidx_t slob_units(slob_t *s)
66931+static slobidx_t slob_units(const slob_t *s)
66932 {
66933 if (s->units > 0)
66934 return s->units;
66935@@ -218,7 +220,7 @@ static slobidx_t slob_units(slob_t *s)
66936 /*
66937 * Return the next free slob block pointer after this one.
66938 */
66939-static slob_t *slob_next(slob_t *s)
66940+static slob_t *slob_next(const slob_t *s)
66941 {
66942 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
66943 slobidx_t next;
66944@@ -233,7 +235,7 @@ static slob_t *slob_next(slob_t *s)
66945 /*
66946 * Returns true if s is the last free block in its page.
66947 */
66948-static int slob_last(slob_t *s)
66949+static int slob_last(const slob_t *s)
66950 {
66951 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
66952 }
66953@@ -252,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, i
66954 if (!page)
66955 return NULL;
66956
66957+ set_slob_page(page);
66958 return page_address(page);
66959 }
66960
66961@@ -368,11 +371,11 @@ static void *slob_alloc(size_t size, gfp
66962 if (!b)
66963 return NULL;
66964 sp = slob_page(b);
66965- set_slob_page(sp);
66966
66967 spin_lock_irqsave(&slob_lock, flags);
66968 sp->units = SLOB_UNITS(PAGE_SIZE);
66969 sp->free = b;
66970+ sp->size = 0;
66971 INIT_LIST_HEAD(&sp->list);
66972 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
66973 set_slob_page_free(sp, slob_list);
66974@@ -475,10 +478,9 @@ out:
66975 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
66976 #endif
66977
66978-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
66979+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
66980 {
66981- unsigned int *m;
66982- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
66983+ slob_t *m;
66984 void *ret;
66985
66986 lockdep_trace_alloc(gfp);
66987@@ -491,7 +493,10 @@ void *__kmalloc_node(size_t size, gfp_t
66988
66989 if (!m)
66990 return NULL;
66991- *m = size;
66992+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
66993+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
66994+ m[0].units = size;
66995+ m[1].units = align;
66996 ret = (void *)m + align;
66997
66998 trace_kmalloc_node(_RET_IP_, ret,
66999@@ -501,16 +506,25 @@ void *__kmalloc_node(size_t size, gfp_t
67000
67001 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
67002 if (ret) {
67003- struct page *page;
67004- page = virt_to_page(ret);
67005- page->private = size;
67006+ struct slob_page *sp;
67007+ sp = slob_page(ret);
67008+ sp->size = size;
67009 }
67010
67011 trace_kmalloc_node(_RET_IP_, ret,
67012 size, PAGE_SIZE << order, gfp, node);
67013 }
67014
67015- kmemleak_alloc(ret, size, 1, gfp);
67016+ return ret;
67017+}
67018+
67019+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
67020+{
67021+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
67022+ void *ret = __kmalloc_node_align(size, gfp, node, align);
67023+
67024+ if (!ZERO_OR_NULL_PTR(ret))
67025+ kmemleak_alloc(ret, size, 1, gfp);
67026 return ret;
67027 }
67028 EXPORT_SYMBOL(__kmalloc_node);
67029@@ -528,13 +542,88 @@ void kfree(const void *block)
67030 sp = slob_page(block);
67031 if (is_slob_page(sp)) {
67032 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
67033- unsigned int *m = (unsigned int *)(block - align);
67034- slob_free(m, *m + align);
67035- } else
67036+ slob_t *m = (slob_t *)(block - align);
67037+ slob_free(m, m[0].units + align);
67038+ } else {
67039+ clear_slob_page(sp);
67040+ free_slob_page(sp);
67041+ sp->size = 0;
67042 put_page(&sp->page);
67043+ }
67044 }
67045 EXPORT_SYMBOL(kfree);
67046
67047+void check_object_size(const void *ptr, unsigned long n, bool to)
67048+{
67049+
67050+#ifdef CONFIG_PAX_USERCOPY
67051+ struct slob_page *sp;
67052+ const slob_t *free;
67053+ const void *base;
67054+ unsigned long flags;
67055+
67056+ if (!n)
67057+ return;
67058+
67059+ if (ZERO_OR_NULL_PTR(ptr))
67060+ goto report;
67061+
67062+ if (!virt_addr_valid(ptr))
67063+ return;
67064+
67065+ sp = slob_page(ptr);
67066+ if (!PageSlab((struct page*)sp)) {
67067+ if (object_is_on_stack(ptr, n) == -1)
67068+ goto report;
67069+ return;
67070+ }
67071+
67072+ if (sp->size) {
67073+ base = page_address(&sp->page);
67074+ if (base <= ptr && n <= sp->size - (ptr - base))
67075+ return;
67076+ goto report;
67077+ }
67078+
67079+ /* some tricky double walking to find the chunk */
67080+ spin_lock_irqsave(&slob_lock, flags);
67081+ base = (void *)((unsigned long)ptr & PAGE_MASK);
67082+ free = sp->free;
67083+
67084+ while (!slob_last(free) && (void *)free <= ptr) {
67085+ base = free + slob_units(free);
67086+ free = slob_next(free);
67087+ }
67088+
67089+ while (base < (void *)free) {
67090+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
67091+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
67092+ int offset;
67093+
67094+ if (ptr < base + align)
67095+ break;
67096+
67097+ offset = ptr - base - align;
67098+ if (offset >= m) {
67099+ base += size;
67100+ continue;
67101+ }
67102+
67103+ if (n > m - offset)
67104+ break;
67105+
67106+ spin_unlock_irqrestore(&slob_lock, flags);
67107+ return;
67108+ }
67109+
67110+ spin_unlock_irqrestore(&slob_lock, flags);
67111+report:
67112+ pax_report_usercopy(ptr, n, to, NULL);
67113+#endif
67114+
67115+}
67116+EXPORT_SYMBOL(check_object_size);
67117+
67118 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
67119 size_t ksize(const void *block)
67120 {
67121@@ -547,10 +636,10 @@ size_t ksize(const void *block)
67122 sp = slob_page(block);
67123 if (is_slob_page(sp)) {
67124 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
67125- unsigned int *m = (unsigned int *)(block - align);
67126- return SLOB_UNITS(*m) * SLOB_UNIT;
67127+ slob_t *m = (slob_t *)(block - align);
67128+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
67129 } else
67130- return sp->page.private;
67131+ return sp->size;
67132 }
67133 EXPORT_SYMBOL(ksize);
67134
67135@@ -566,8 +655,13 @@ struct kmem_cache *kmem_cache_create(con
67136 {
67137 struct kmem_cache *c;
67138
67139+#ifdef CONFIG_PAX_USERCOPY
67140+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
67141+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
67142+#else
67143 c = slob_alloc(sizeof(struct kmem_cache),
67144 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
67145+#endif
67146
67147 if (c) {
67148 c->name = name;
67149@@ -605,17 +699,25 @@ void *kmem_cache_alloc_node(struct kmem_
67150 {
67151 void *b;
67152
67153+#ifdef CONFIG_PAX_USERCOPY
67154+ b = __kmalloc_node_align(c->size, flags, node, c->align);
67155+#else
67156 if (c->size < PAGE_SIZE) {
67157 b = slob_alloc(c->size, flags, c->align, node);
67158 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
67159 SLOB_UNITS(c->size) * SLOB_UNIT,
67160 flags, node);
67161 } else {
67162+ struct slob_page *sp;
67163+
67164 b = slob_new_pages(flags, get_order(c->size), node);
67165+ sp = slob_page(b);
67166+ sp->size = c->size;
67167 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
67168 PAGE_SIZE << get_order(c->size),
67169 flags, node);
67170 }
67171+#endif
67172
67173 if (c->ctor)
67174 c->ctor(b);
67175@@ -627,10 +729,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
67176
67177 static void __kmem_cache_free(void *b, int size)
67178 {
67179- if (size < PAGE_SIZE)
67180+ struct slob_page *sp = slob_page(b);
67181+
67182+ if (is_slob_page(sp))
67183 slob_free(b, size);
67184- else
67185+ else {
67186+ clear_slob_page(sp);
67187+ free_slob_page(sp);
67188+ sp->size = 0;
67189 slob_free_pages(b, get_order(size));
67190+ }
67191 }
67192
67193 static void kmem_rcu_free(struct rcu_head *head)
67194@@ -643,18 +751,32 @@ static void kmem_rcu_free(struct rcu_hea
67195
67196 void kmem_cache_free(struct kmem_cache *c, void *b)
67197 {
67198+ int size = c->size;
67199+
67200+#ifdef CONFIG_PAX_USERCOPY
67201+ if (size + c->align < PAGE_SIZE) {
67202+ size += c->align;
67203+ b -= c->align;
67204+ }
67205+#endif
67206+
67207 kmemleak_free_recursive(b, c->flags);
67208 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
67209 struct slob_rcu *slob_rcu;
67210- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
67211+ slob_rcu = b + (size - sizeof(struct slob_rcu));
67212 INIT_RCU_HEAD(&slob_rcu->head);
67213- slob_rcu->size = c->size;
67214+ slob_rcu->size = size;
67215 call_rcu(&slob_rcu->head, kmem_rcu_free);
67216 } else {
67217- __kmem_cache_free(b, c->size);
67218+ __kmem_cache_free(b, size);
67219 }
67220
67221+#ifdef CONFIG_PAX_USERCOPY
67222+ trace_kfree(_RET_IP_, b);
67223+#else
67224 trace_kmem_cache_free(_RET_IP_, b);
67225+#endif
67226+
67227 }
67228 EXPORT_SYMBOL(kmem_cache_free);
67229
67230diff -urNp linux-2.6.32.42/mm/slub.c linux-2.6.32.42/mm/slub.c
67231--- linux-2.6.32.42/mm/slub.c 2011-03-27 14:31:47.000000000 -0400
67232+++ linux-2.6.32.42/mm/slub.c 2011-04-17 15:56:46.000000000 -0400
67233@@ -410,7 +410,7 @@ static void print_track(const char *s, s
67234 if (!t->addr)
67235 return;
67236
67237- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
67238+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
67239 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
67240 }
67241
67242@@ -1893,6 +1893,8 @@ void kmem_cache_free(struct kmem_cache *
67243
67244 page = virt_to_head_page(x);
67245
67246+ BUG_ON(!PageSlab(page));
67247+
67248 slab_free(s, page, x, _RET_IP_);
67249
67250 trace_kmem_cache_free(_RET_IP_, x);
67251@@ -1937,7 +1939,7 @@ static int slub_min_objects;
67252 * Merge control. If this is set then no merging of slab caches will occur.
67253 * (Could be removed. This was introduced to pacify the merge skeptics.)
67254 */
67255-static int slub_nomerge;
67256+static int slub_nomerge = 1;
67257
67258 /*
67259 * Calculate the order of allocation given an slab object size.
67260@@ -2493,7 +2495,7 @@ static int kmem_cache_open(struct kmem_c
67261 * list to avoid pounding the page allocator excessively.
67262 */
67263 set_min_partial(s, ilog2(s->size));
67264- s->refcount = 1;
67265+ atomic_set(&s->refcount, 1);
67266 #ifdef CONFIG_NUMA
67267 s->remote_node_defrag_ratio = 1000;
67268 #endif
67269@@ -2630,8 +2632,7 @@ static inline int kmem_cache_close(struc
67270 void kmem_cache_destroy(struct kmem_cache *s)
67271 {
67272 down_write(&slub_lock);
67273- s->refcount--;
67274- if (!s->refcount) {
67275+ if (atomic_dec_and_test(&s->refcount)) {
67276 list_del(&s->list);
67277 up_write(&slub_lock);
67278 if (kmem_cache_close(s)) {
67279@@ -2691,12 +2692,10 @@ static int __init setup_slub_nomerge(cha
67280 __setup("slub_nomerge", setup_slub_nomerge);
67281
67282 static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
67283- const char *name, int size, gfp_t gfp_flags)
67284+ const char *name, int size, gfp_t gfp_flags, unsigned int flags)
67285 {
67286- unsigned int flags = 0;
67287-
67288 if (gfp_flags & SLUB_DMA)
67289- flags = SLAB_CACHE_DMA;
67290+ flags |= SLAB_CACHE_DMA;
67291
67292 /*
67293 * This function is called with IRQs disabled during early-boot on
67294@@ -2915,6 +2914,46 @@ void *__kmalloc_node(size_t size, gfp_t
67295 EXPORT_SYMBOL(__kmalloc_node);
67296 #endif
67297
67298+void check_object_size(const void *ptr, unsigned long n, bool to)
67299+{
67300+
67301+#ifdef CONFIG_PAX_USERCOPY
67302+ struct page *page;
67303+ struct kmem_cache *s = NULL;
67304+ unsigned long offset;
67305+
67306+ if (!n)
67307+ return;
67308+
67309+ if (ZERO_OR_NULL_PTR(ptr))
67310+ goto report;
67311+
67312+ if (!virt_addr_valid(ptr))
67313+ return;
67314+
67315+ page = get_object_page(ptr);
67316+
67317+ if (!page) {
67318+ if (object_is_on_stack(ptr, n) == -1)
67319+ goto report;
67320+ return;
67321+ }
67322+
67323+ s = page->slab;
67324+ if (!(s->flags & SLAB_USERCOPY))
67325+ goto report;
67326+
67327+ offset = (ptr - page_address(page)) % s->size;
67328+ if (offset <= s->objsize && n <= s->objsize - offset)
67329+ return;
67330+
67331+report:
67332+ pax_report_usercopy(ptr, n, to, s ? s->name : NULL);
67333+#endif
67334+
67335+}
67336+EXPORT_SYMBOL(check_object_size);
67337+
67338 size_t ksize(const void *object)
67339 {
67340 struct page *page;
67341@@ -3185,8 +3224,8 @@ void __init kmem_cache_init(void)
67342 * kmem_cache_open for slab_state == DOWN.
67343 */
67344 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
67345- sizeof(struct kmem_cache_node), GFP_NOWAIT);
67346- kmalloc_caches[0].refcount = -1;
67347+ sizeof(struct kmem_cache_node), GFP_NOWAIT, 0);
67348+ atomic_set(&kmalloc_caches[0].refcount, -1);
67349 caches++;
67350
67351 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
67352@@ -3198,18 +3237,18 @@ void __init kmem_cache_init(void)
67353 /* Caches that are not of the two-to-the-power-of size */
67354 if (KMALLOC_MIN_SIZE <= 32) {
67355 create_kmalloc_cache(&kmalloc_caches[1],
67356- "kmalloc-96", 96, GFP_NOWAIT);
67357+ "kmalloc-96", 96, GFP_NOWAIT, SLAB_USERCOPY);
67358 caches++;
67359 }
67360 if (KMALLOC_MIN_SIZE <= 64) {
67361 create_kmalloc_cache(&kmalloc_caches[2],
67362- "kmalloc-192", 192, GFP_NOWAIT);
67363+ "kmalloc-192", 192, GFP_NOWAIT, SLAB_USERCOPY);
67364 caches++;
67365 }
67366
67367 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
67368 create_kmalloc_cache(&kmalloc_caches[i],
67369- "kmalloc", 1 << i, GFP_NOWAIT);
67370+ "kmalloc", 1 << i, GFP_NOWAIT, SLAB_USERCOPY);
67371 caches++;
67372 }
67373
67374@@ -3293,7 +3332,7 @@ static int slab_unmergeable(struct kmem_
67375 /*
67376 * We may have set a slab to be unmergeable during bootstrap.
67377 */
67378- if (s->refcount < 0)
67379+ if (atomic_read(&s->refcount) < 0)
67380 return 1;
67381
67382 return 0;
67383@@ -3353,7 +3392,7 @@ struct kmem_cache *kmem_cache_create(con
67384 if (s) {
67385 int cpu;
67386
67387- s->refcount++;
67388+ atomic_inc(&s->refcount);
67389 /*
67390 * Adjust the object sizes so that we clear
67391 * the complete object on kzalloc.
67392@@ -3372,7 +3411,7 @@ struct kmem_cache *kmem_cache_create(con
67393
67394 if (sysfs_slab_alias(s, name)) {
67395 down_write(&slub_lock);
67396- s->refcount--;
67397+ atomic_dec(&s->refcount);
67398 up_write(&slub_lock);
67399 goto err;
67400 }
67401@@ -4101,7 +4140,7 @@ SLAB_ATTR_RO(ctor);
67402
67403 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
67404 {
67405- return sprintf(buf, "%d\n", s->refcount - 1);
67406+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
67407 }
67408 SLAB_ATTR_RO(aliases);
67409
67410@@ -4503,7 +4542,7 @@ static void kmem_cache_release(struct ko
67411 kfree(s);
67412 }
67413
67414-static struct sysfs_ops slab_sysfs_ops = {
67415+static const struct sysfs_ops slab_sysfs_ops = {
67416 .show = slab_attr_show,
67417 .store = slab_attr_store,
67418 };
67419@@ -4522,7 +4561,7 @@ static int uevent_filter(struct kset *ks
67420 return 0;
67421 }
67422
67423-static struct kset_uevent_ops slab_uevent_ops = {
67424+static const struct kset_uevent_ops slab_uevent_ops = {
67425 .filter = uevent_filter,
67426 };
67427
67428@@ -4785,7 +4824,13 @@ static const struct file_operations proc
67429
67430 static int __init slab_proc_init(void)
67431 {
67432- proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
67433+ mode_t gr_mode = S_IRUGO;
67434+
67435+#ifdef CONFIG_GRKERNSEC_PROC_ADD
67436+ gr_mode = S_IRUSR;
67437+#endif
67438+
67439+ proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
67440 return 0;
67441 }
67442 module_init(slab_proc_init);
67443diff -urNp linux-2.6.32.42/mm/util.c linux-2.6.32.42/mm/util.c
67444--- linux-2.6.32.42/mm/util.c 2011-03-27 14:31:47.000000000 -0400
67445+++ linux-2.6.32.42/mm/util.c 2011-04-17 15:56:46.000000000 -0400
67446@@ -228,6 +228,12 @@ EXPORT_SYMBOL(strndup_user);
67447 void arch_pick_mmap_layout(struct mm_struct *mm)
67448 {
67449 mm->mmap_base = TASK_UNMAPPED_BASE;
67450+
67451+#ifdef CONFIG_PAX_RANDMMAP
67452+ if (mm->pax_flags & MF_PAX_RANDMMAP)
67453+ mm->mmap_base += mm->delta_mmap;
67454+#endif
67455+
67456 mm->get_unmapped_area = arch_get_unmapped_area;
67457 mm->unmap_area = arch_unmap_area;
67458 }
67459diff -urNp linux-2.6.32.42/mm/vmalloc.c linux-2.6.32.42/mm/vmalloc.c
67460--- linux-2.6.32.42/mm/vmalloc.c 2011-03-27 14:31:47.000000000 -0400
67461+++ linux-2.6.32.42/mm/vmalloc.c 2011-04-17 15:56:46.000000000 -0400
67462@@ -40,8 +40,19 @@ static void vunmap_pte_range(pmd_t *pmd,
67463
67464 pte = pte_offset_kernel(pmd, addr);
67465 do {
67466- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
67467- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
67468+
67469+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
67470+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
67471+ BUG_ON(!pte_exec(*pte));
67472+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
67473+ continue;
67474+ }
67475+#endif
67476+
67477+ {
67478+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
67479+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
67480+ }
67481 } while (pte++, addr += PAGE_SIZE, addr != end);
67482 }
67483
67484@@ -92,6 +103,7 @@ static int vmap_pte_range(pmd_t *pmd, un
67485 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
67486 {
67487 pte_t *pte;
67488+ int ret = -ENOMEM;
67489
67490 /*
67491 * nr is a running index into the array which helps higher level
67492@@ -101,17 +113,32 @@ static int vmap_pte_range(pmd_t *pmd, un
67493 pte = pte_alloc_kernel(pmd, addr);
67494 if (!pte)
67495 return -ENOMEM;
67496+
67497+ pax_open_kernel();
67498 do {
67499 struct page *page = pages[*nr];
67500
67501- if (WARN_ON(!pte_none(*pte)))
67502- return -EBUSY;
67503- if (WARN_ON(!page))
67504- return -ENOMEM;
67505+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
67506+ if (!(pgprot_val(prot) & _PAGE_NX))
67507+ BUG_ON(!pte_exec(*pte) || pte_pfn(*pte) != __pa(addr) >> PAGE_SHIFT);
67508+ else
67509+#endif
67510+
67511+ if (WARN_ON(!pte_none(*pte))) {
67512+ ret = -EBUSY;
67513+ goto out;
67514+ }
67515+ if (WARN_ON(!page)) {
67516+ ret = -ENOMEM;
67517+ goto out;
67518+ }
67519 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
67520 (*nr)++;
67521 } while (pte++, addr += PAGE_SIZE, addr != end);
67522- return 0;
67523+ ret = 0;
67524+out:
67525+ pax_close_kernel();
67526+ return ret;
67527 }
67528
67529 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
67530@@ -192,11 +219,20 @@ int is_vmalloc_or_module_addr(const void
67531 * and fall back on vmalloc() if that fails. Others
67532 * just put it in the vmalloc space.
67533 */
67534-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
67535+#ifdef CONFIG_MODULES
67536+#ifdef MODULES_VADDR
67537 unsigned long addr = (unsigned long)x;
67538 if (addr >= MODULES_VADDR && addr < MODULES_END)
67539 return 1;
67540 #endif
67541+
67542+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
67543+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
67544+ return 1;
67545+#endif
67546+
67547+#endif
67548+
67549 return is_vmalloc_addr(x);
67550 }
67551
67552@@ -217,8 +253,14 @@ struct page *vmalloc_to_page(const void
67553
67554 if (!pgd_none(*pgd)) {
67555 pud_t *pud = pud_offset(pgd, addr);
67556+#ifdef CONFIG_X86
67557+ if (!pud_large(*pud))
67558+#endif
67559 if (!pud_none(*pud)) {
67560 pmd_t *pmd = pmd_offset(pud, addr);
67561+#ifdef CONFIG_X86
67562+ if (!pmd_large(*pmd))
67563+#endif
67564 if (!pmd_none(*pmd)) {
67565 pte_t *ptep, pte;
67566
67567@@ -292,13 +334,13 @@ static void __insert_vmap_area(struct vm
67568 struct rb_node *tmp;
67569
67570 while (*p) {
67571- struct vmap_area *tmp;
67572+ struct vmap_area *varea;
67573
67574 parent = *p;
67575- tmp = rb_entry(parent, struct vmap_area, rb_node);
67576- if (va->va_start < tmp->va_end)
67577+ varea = rb_entry(parent, struct vmap_area, rb_node);
67578+ if (va->va_start < varea->va_end)
67579 p = &(*p)->rb_left;
67580- else if (va->va_end > tmp->va_start)
67581+ else if (va->va_end > varea->va_start)
67582 p = &(*p)->rb_right;
67583 else
67584 BUG();
67585@@ -1232,6 +1274,16 @@ static struct vm_struct *__get_vm_area_n
67586 struct vm_struct *area;
67587
67588 BUG_ON(in_interrupt());
67589+
67590+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
67591+ if (flags & VM_KERNEXEC) {
67592+ if (start != VMALLOC_START || end != VMALLOC_END)
67593+ return NULL;
67594+ start = (unsigned long)MODULES_EXEC_VADDR;
67595+ end = (unsigned long)MODULES_EXEC_END;
67596+ }
67597+#endif
67598+
67599 if (flags & VM_IOREMAP) {
67600 int bit = fls(size);
67601
67602@@ -1457,6 +1509,11 @@ void *vmap(struct page **pages, unsigned
67603 if (count > totalram_pages)
67604 return NULL;
67605
67606+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
67607+ if (!(pgprot_val(prot) & _PAGE_NX))
67608+ flags |= VM_KERNEXEC;
67609+#endif
67610+
67611 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
67612 __builtin_return_address(0));
67613 if (!area)
67614@@ -1567,6 +1624,13 @@ static void *__vmalloc_node(unsigned lon
67615 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
67616 return NULL;
67617
67618+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
67619+ if (!(pgprot_val(prot) & _PAGE_NX))
67620+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_KERNEXEC, VMALLOC_START, VMALLOC_END,
67621+ node, gfp_mask, caller);
67622+ else
67623+#endif
67624+
67625 area = __get_vm_area_node(size, align, VM_ALLOC, VMALLOC_START,
67626 VMALLOC_END, node, gfp_mask, caller);
67627
67628@@ -1585,6 +1649,7 @@ static void *__vmalloc_node(unsigned lon
67629 return addr;
67630 }
67631
67632+#undef __vmalloc
67633 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
67634 {
67635 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
67636@@ -1601,6 +1666,7 @@ EXPORT_SYMBOL(__vmalloc);
67637 * For tight control over page level allocator and protection flags
67638 * use __vmalloc() instead.
67639 */
67640+#undef vmalloc
67641 void *vmalloc(unsigned long size)
67642 {
67643 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
67644@@ -1615,6 +1681,7 @@ EXPORT_SYMBOL(vmalloc);
67645 * The resulting memory area is zeroed so it can be mapped to userspace
67646 * without leaking data.
67647 */
67648+#undef vmalloc_user
67649 void *vmalloc_user(unsigned long size)
67650 {
67651 struct vm_struct *area;
67652@@ -1642,6 +1709,7 @@ EXPORT_SYMBOL(vmalloc_user);
67653 * For tight control over page level allocator and protection flags
67654 * use __vmalloc() instead.
67655 */
67656+#undef vmalloc_node
67657 void *vmalloc_node(unsigned long size, int node)
67658 {
67659 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
67660@@ -1664,10 +1732,10 @@ EXPORT_SYMBOL(vmalloc_node);
67661 * For tight control over page level allocator and protection flags
67662 * use __vmalloc() instead.
67663 */
67664-
67665+#undef vmalloc_exec
67666 void *vmalloc_exec(unsigned long size)
67667 {
67668- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
67669+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
67670 -1, __builtin_return_address(0));
67671 }
67672
67673@@ -1686,6 +1754,7 @@ void *vmalloc_exec(unsigned long size)
67674 * Allocate enough 32bit PA addressable pages to cover @size from the
67675 * page level allocator and map them into contiguous kernel virtual space.
67676 */
67677+#undef vmalloc_32
67678 void *vmalloc_32(unsigned long size)
67679 {
67680 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
67681@@ -1700,6 +1769,7 @@ EXPORT_SYMBOL(vmalloc_32);
67682 * The resulting memory area is 32bit addressable and zeroed so it can be
67683 * mapped to userspace without leaking data.
67684 */
67685+#undef vmalloc_32_user
67686 void *vmalloc_32_user(unsigned long size)
67687 {
67688 struct vm_struct *area;
67689@@ -1964,6 +2034,8 @@ int remap_vmalloc_range(struct vm_area_s
67690 unsigned long uaddr = vma->vm_start;
67691 unsigned long usize = vma->vm_end - vma->vm_start;
67692
67693+ BUG_ON(vma->vm_mirror);
67694+
67695 if ((PAGE_SIZE-1) & (unsigned long)addr)
67696 return -EINVAL;
67697
67698diff -urNp linux-2.6.32.42/mm/vmstat.c linux-2.6.32.42/mm/vmstat.c
67699--- linux-2.6.32.42/mm/vmstat.c 2011-03-27 14:31:47.000000000 -0400
67700+++ linux-2.6.32.42/mm/vmstat.c 2011-04-17 15:56:46.000000000 -0400
67701@@ -74,7 +74,7 @@ void vm_events_fold_cpu(int cpu)
67702 *
67703 * vm_stat contains the global counters
67704 */
67705-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
67706+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
67707 EXPORT_SYMBOL(vm_stat);
67708
67709 #ifdef CONFIG_SMP
67710@@ -324,7 +324,7 @@ void refresh_cpu_vm_stats(int cpu)
67711 v = p->vm_stat_diff[i];
67712 p->vm_stat_diff[i] = 0;
67713 local_irq_restore(flags);
67714- atomic_long_add(v, &zone->vm_stat[i]);
67715+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
67716 global_diff[i] += v;
67717 #ifdef CONFIG_NUMA
67718 /* 3 seconds idle till flush */
67719@@ -362,7 +362,7 @@ void refresh_cpu_vm_stats(int cpu)
67720
67721 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
67722 if (global_diff[i])
67723- atomic_long_add(global_diff[i], &vm_stat[i]);
67724+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
67725 }
67726
67727 #endif
67728@@ -953,10 +953,20 @@ static int __init setup_vmstat(void)
67729 start_cpu_timer(cpu);
67730 #endif
67731 #ifdef CONFIG_PROC_FS
67732- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
67733- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
67734- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
67735- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
67736+ {
67737+ mode_t gr_mode = S_IRUGO;
67738+#ifdef CONFIG_GRKERNSEC_PROC_ADD
67739+ gr_mode = S_IRUSR;
67740+#endif
67741+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
67742+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
67743+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
67744+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
67745+#else
67746+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
67747+#endif
67748+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
67749+ }
67750 #endif
67751 return 0;
67752 }
67753diff -urNp linux-2.6.32.42/net/8021q/vlan.c linux-2.6.32.42/net/8021q/vlan.c
67754--- linux-2.6.32.42/net/8021q/vlan.c 2011-03-27 14:31:47.000000000 -0400
67755+++ linux-2.6.32.42/net/8021q/vlan.c 2011-04-17 15:56:46.000000000 -0400
67756@@ -622,8 +622,7 @@ static int vlan_ioctl_handler(struct net
67757 err = -EPERM;
67758 if (!capable(CAP_NET_ADMIN))
67759 break;
67760- if ((args.u.name_type >= 0) &&
67761- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
67762+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
67763 struct vlan_net *vn;
67764
67765 vn = net_generic(net, vlan_net_id);
67766diff -urNp linux-2.6.32.42/net/atm/atm_misc.c linux-2.6.32.42/net/atm/atm_misc.c
67767--- linux-2.6.32.42/net/atm/atm_misc.c 2011-03-27 14:31:47.000000000 -0400
67768+++ linux-2.6.32.42/net/atm/atm_misc.c 2011-04-17 15:56:46.000000000 -0400
67769@@ -19,7 +19,7 @@ int atm_charge(struct atm_vcc *vcc,int t
67770 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
67771 return 1;
67772 atm_return(vcc,truesize);
67773- atomic_inc(&vcc->stats->rx_drop);
67774+ atomic_inc_unchecked(&vcc->stats->rx_drop);
67775 return 0;
67776 }
67777
67778@@ -41,7 +41,7 @@ struct sk_buff *atm_alloc_charge(struct
67779 }
67780 }
67781 atm_return(vcc,guess);
67782- atomic_inc(&vcc->stats->rx_drop);
67783+ atomic_inc_unchecked(&vcc->stats->rx_drop);
67784 return NULL;
67785 }
67786
67787@@ -88,7 +88,7 @@ int atm_pcr_goal(const struct atm_trafpr
67788
67789 void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
67790 {
67791-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
67792+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
67793 __SONET_ITEMS
67794 #undef __HANDLE_ITEM
67795 }
67796@@ -96,7 +96,7 @@ void sonet_copy_stats(struct k_sonet_sta
67797
67798 void sonet_subtract_stats(struct k_sonet_stats *from,struct sonet_stats *to)
67799 {
67800-#define __HANDLE_ITEM(i) atomic_sub(to->i,&from->i)
67801+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
67802 __SONET_ITEMS
67803 #undef __HANDLE_ITEM
67804 }
67805diff -urNp linux-2.6.32.42/net/atm/mpoa_caches.c linux-2.6.32.42/net/atm/mpoa_caches.c
67806--- linux-2.6.32.42/net/atm/mpoa_caches.c 2011-03-27 14:31:47.000000000 -0400
67807+++ linux-2.6.32.42/net/atm/mpoa_caches.c 2011-05-16 21:46:57.000000000 -0400
67808@@ -498,6 +498,8 @@ static void clear_expired(struct mpoa_cl
67809 struct timeval now;
67810 struct k_message msg;
67811
67812+ pax_track_stack();
67813+
67814 do_gettimeofday(&now);
67815
67816 write_lock_irq(&client->egress_lock);
67817diff -urNp linux-2.6.32.42/net/atm/proc.c linux-2.6.32.42/net/atm/proc.c
67818--- linux-2.6.32.42/net/atm/proc.c 2011-03-27 14:31:47.000000000 -0400
67819+++ linux-2.6.32.42/net/atm/proc.c 2011-04-17 15:56:46.000000000 -0400
67820@@ -43,9 +43,9 @@ static void add_stats(struct seq_file *s
67821 const struct k_atm_aal_stats *stats)
67822 {
67823 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
67824- atomic_read(&stats->tx),atomic_read(&stats->tx_err),
67825- atomic_read(&stats->rx),atomic_read(&stats->rx_err),
67826- atomic_read(&stats->rx_drop));
67827+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
67828+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
67829+ atomic_read_unchecked(&stats->rx_drop));
67830 }
67831
67832 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
67833@@ -188,7 +188,12 @@ static void vcc_info(struct seq_file *se
67834 {
67835 struct sock *sk = sk_atm(vcc);
67836
67837+#ifdef CONFIG_GRKERNSEC_HIDESYM
67838+ seq_printf(seq, "%p ", NULL);
67839+#else
67840 seq_printf(seq, "%p ", vcc);
67841+#endif
67842+
67843 if (!vcc->dev)
67844 seq_printf(seq, "Unassigned ");
67845 else
67846@@ -214,7 +219,11 @@ static void svc_info(struct seq_file *se
67847 {
67848 if (!vcc->dev)
67849 seq_printf(seq, sizeof(void *) == 4 ?
67850+#ifdef CONFIG_GRKERNSEC_HIDESYM
67851+ "N/A@%p%10s" : "N/A@%p%2s", NULL, "");
67852+#else
67853 "N/A@%p%10s" : "N/A@%p%2s", vcc, "");
67854+#endif
67855 else
67856 seq_printf(seq, "%3d %3d %5d ",
67857 vcc->dev->number, vcc->vpi, vcc->vci);
67858diff -urNp linux-2.6.32.42/net/atm/resources.c linux-2.6.32.42/net/atm/resources.c
67859--- linux-2.6.32.42/net/atm/resources.c 2011-03-27 14:31:47.000000000 -0400
67860+++ linux-2.6.32.42/net/atm/resources.c 2011-04-17 15:56:46.000000000 -0400
67861@@ -161,7 +161,7 @@ void atm_dev_deregister(struct atm_dev *
67862 static void copy_aal_stats(struct k_atm_aal_stats *from,
67863 struct atm_aal_stats *to)
67864 {
67865-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
67866+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
67867 __AAL_STAT_ITEMS
67868 #undef __HANDLE_ITEM
67869 }
67870@@ -170,7 +170,7 @@ static void copy_aal_stats(struct k_atm_
67871 static void subtract_aal_stats(struct k_atm_aal_stats *from,
67872 struct atm_aal_stats *to)
67873 {
67874-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
67875+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
67876 __AAL_STAT_ITEMS
67877 #undef __HANDLE_ITEM
67878 }
67879diff -urNp linux-2.6.32.42/net/bluetooth/l2cap.c linux-2.6.32.42/net/bluetooth/l2cap.c
67880--- linux-2.6.32.42/net/bluetooth/l2cap.c 2011-03-27 14:31:47.000000000 -0400
67881+++ linux-2.6.32.42/net/bluetooth/l2cap.c 2011-06-25 14:36:21.000000000 -0400
67882@@ -1885,7 +1885,7 @@ static int l2cap_sock_getsockopt_old(str
67883 err = -ENOTCONN;
67884 break;
67885 }
67886-
67887+ memset(&cinfo, 0, sizeof(cinfo));
67888 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
67889 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
67890
67891@@ -2719,7 +2719,7 @@ static inline int l2cap_config_req(struc
67892
67893 /* Reject if config buffer is too small. */
67894 len = cmd_len - sizeof(*req);
67895- if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
67896+ if (len < 0 || l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
67897 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
67898 l2cap_build_conf_rsp(sk, rsp,
67899 L2CAP_CONF_REJECT, flags), rsp);
67900diff -urNp linux-2.6.32.42/net/bluetooth/rfcomm/sock.c linux-2.6.32.42/net/bluetooth/rfcomm/sock.c
67901--- linux-2.6.32.42/net/bluetooth/rfcomm/sock.c 2011-03-27 14:31:47.000000000 -0400
67902+++ linux-2.6.32.42/net/bluetooth/rfcomm/sock.c 2011-06-12 06:35:00.000000000 -0400
67903@@ -878,6 +878,7 @@ static int rfcomm_sock_getsockopt_old(st
67904
67905 l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk;
67906
67907+ memset(&cinfo, 0, sizeof(cinfo));
67908 cinfo.hci_handle = l2cap_pi(l2cap_sk)->conn->hcon->handle;
67909 memcpy(cinfo.dev_class, l2cap_pi(l2cap_sk)->conn->hcon->dev_class, 3);
67910
67911diff -urNp linux-2.6.32.42/net/bridge/br_private.h linux-2.6.32.42/net/bridge/br_private.h
67912--- linux-2.6.32.42/net/bridge/br_private.h 2011-03-27 14:31:47.000000000 -0400
67913+++ linux-2.6.32.42/net/bridge/br_private.h 2011-04-17 15:56:46.000000000 -0400
67914@@ -254,7 +254,7 @@ extern void br_ifinfo_notify(int event,
67915
67916 #ifdef CONFIG_SYSFS
67917 /* br_sysfs_if.c */
67918-extern struct sysfs_ops brport_sysfs_ops;
67919+extern const struct sysfs_ops brport_sysfs_ops;
67920 extern int br_sysfs_addif(struct net_bridge_port *p);
67921
67922 /* br_sysfs_br.c */
67923diff -urNp linux-2.6.32.42/net/bridge/br_stp_if.c linux-2.6.32.42/net/bridge/br_stp_if.c
67924--- linux-2.6.32.42/net/bridge/br_stp_if.c 2011-03-27 14:31:47.000000000 -0400
67925+++ linux-2.6.32.42/net/bridge/br_stp_if.c 2011-04-17 15:56:46.000000000 -0400
67926@@ -146,7 +146,7 @@ static void br_stp_stop(struct net_bridg
67927 char *envp[] = { NULL };
67928
67929 if (br->stp_enabled == BR_USER_STP) {
67930- r = call_usermodehelper(BR_STP_PROG, argv, envp, 1);
67931+ r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
67932 printk(KERN_INFO "%s: userspace STP stopped, return code %d\n",
67933 br->dev->name, r);
67934
67935diff -urNp linux-2.6.32.42/net/bridge/br_sysfs_if.c linux-2.6.32.42/net/bridge/br_sysfs_if.c
67936--- linux-2.6.32.42/net/bridge/br_sysfs_if.c 2011-03-27 14:31:47.000000000 -0400
67937+++ linux-2.6.32.42/net/bridge/br_sysfs_if.c 2011-04-17 15:56:46.000000000 -0400
67938@@ -220,7 +220,7 @@ static ssize_t brport_store(struct kobje
67939 return ret;
67940 }
67941
67942-struct sysfs_ops brport_sysfs_ops = {
67943+const struct sysfs_ops brport_sysfs_ops = {
67944 .show = brport_show,
67945 .store = brport_store,
67946 };
67947diff -urNp linux-2.6.32.42/net/bridge/netfilter/ebtables.c linux-2.6.32.42/net/bridge/netfilter/ebtables.c
67948--- linux-2.6.32.42/net/bridge/netfilter/ebtables.c 2011-04-17 17:00:52.000000000 -0400
67949+++ linux-2.6.32.42/net/bridge/netfilter/ebtables.c 2011-05-16 21:46:57.000000000 -0400
67950@@ -1337,6 +1337,8 @@ static int copy_everything_to_user(struc
67951 unsigned int entries_size, nentries;
67952 char *entries;
67953
67954+ pax_track_stack();
67955+
67956 if (cmd == EBT_SO_GET_ENTRIES) {
67957 entries_size = t->private->entries_size;
67958 nentries = t->private->nentries;
67959diff -urNp linux-2.6.32.42/net/can/bcm.c linux-2.6.32.42/net/can/bcm.c
67960--- linux-2.6.32.42/net/can/bcm.c 2011-05-10 22:12:01.000000000 -0400
67961+++ linux-2.6.32.42/net/can/bcm.c 2011-05-10 22:12:34.000000000 -0400
67962@@ -164,9 +164,15 @@ static int bcm_proc_show(struct seq_file
67963 struct bcm_sock *bo = bcm_sk(sk);
67964 struct bcm_op *op;
67965
67966+#ifdef CONFIG_GRKERNSEC_HIDESYM
67967+ seq_printf(m, ">>> socket %p", NULL);
67968+ seq_printf(m, " / sk %p", NULL);
67969+ seq_printf(m, " / bo %p", NULL);
67970+#else
67971 seq_printf(m, ">>> socket %p", sk->sk_socket);
67972 seq_printf(m, " / sk %p", sk);
67973 seq_printf(m, " / bo %p", bo);
67974+#endif
67975 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
67976 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
67977 seq_printf(m, " <<<\n");
67978diff -urNp linux-2.6.32.42/net/core/dev.c linux-2.6.32.42/net/core/dev.c
67979--- linux-2.6.32.42/net/core/dev.c 2011-04-17 17:00:52.000000000 -0400
67980+++ linux-2.6.32.42/net/core/dev.c 2011-04-17 17:04:18.000000000 -0400
67981@@ -1047,10 +1047,14 @@ void dev_load(struct net *net, const cha
67982 if (no_module && capable(CAP_NET_ADMIN))
67983 no_module = request_module("netdev-%s", name);
67984 if (no_module && capable(CAP_SYS_MODULE)) {
67985+#ifdef CONFIG_GRKERNSEC_MODHARDEN
67986+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
67987+#else
67988 if (!request_module("%s", name))
67989 pr_err("Loading kernel module for a network device "
67990 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
67991 "instead\n", name);
67992+#endif
67993 }
67994 }
67995 EXPORT_SYMBOL(dev_load);
67996@@ -2063,7 +2067,7 @@ int netif_rx_ni(struct sk_buff *skb)
67997 }
67998 EXPORT_SYMBOL(netif_rx_ni);
67999
68000-static void net_tx_action(struct softirq_action *h)
68001+static void net_tx_action(void)
68002 {
68003 struct softnet_data *sd = &__get_cpu_var(softnet_data);
68004
68005@@ -2826,7 +2830,7 @@ void netif_napi_del(struct napi_struct *
68006 EXPORT_SYMBOL(netif_napi_del);
68007
68008
68009-static void net_rx_action(struct softirq_action *h)
68010+static void net_rx_action(void)
68011 {
68012 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
68013 unsigned long time_limit = jiffies + 2;
68014diff -urNp linux-2.6.32.42/net/core/flow.c linux-2.6.32.42/net/core/flow.c
68015--- linux-2.6.32.42/net/core/flow.c 2011-03-27 14:31:47.000000000 -0400
68016+++ linux-2.6.32.42/net/core/flow.c 2011-05-04 17:56:20.000000000 -0400
68017@@ -35,11 +35,11 @@ struct flow_cache_entry {
68018 atomic_t *object_ref;
68019 };
68020
68021-atomic_t flow_cache_genid = ATOMIC_INIT(0);
68022+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
68023
68024 static u32 flow_hash_shift;
68025 #define flow_hash_size (1 << flow_hash_shift)
68026-static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
68027+static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables);
68028
68029 #define flow_table(cpu) (per_cpu(flow_tables, cpu))
68030
68031@@ -52,7 +52,7 @@ struct flow_percpu_info {
68032 u32 hash_rnd;
68033 int count;
68034 };
68035-static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
68036+static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info);
68037
68038 #define flow_hash_rnd_recalc(cpu) \
68039 (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
68040@@ -69,7 +69,7 @@ struct flow_flush_info {
68041 atomic_t cpuleft;
68042 struct completion completion;
68043 };
68044-static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
68045+static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets);
68046
68047 #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
68048
68049@@ -190,7 +190,7 @@ void *flow_cache_lookup(struct net *net,
68050 if (fle->family == family &&
68051 fle->dir == dir &&
68052 flow_key_compare(key, &fle->key) == 0) {
68053- if (fle->genid == atomic_read(&flow_cache_genid)) {
68054+ if (fle->genid == atomic_read_unchecked(&flow_cache_genid)) {
68055 void *ret = fle->object;
68056
68057 if (ret)
68058@@ -228,7 +228,7 @@ nocache:
68059 err = resolver(net, key, family, dir, &obj, &obj_ref);
68060
68061 if (fle && !err) {
68062- fle->genid = atomic_read(&flow_cache_genid);
68063+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
68064
68065 if (fle->object)
68066 atomic_dec(fle->object_ref);
68067@@ -258,7 +258,7 @@ static void flow_cache_flush_tasklet(uns
68068
68069 fle = flow_table(cpu)[i];
68070 for (; fle; fle = fle->next) {
68071- unsigned genid = atomic_read(&flow_cache_genid);
68072+ unsigned genid = atomic_read_unchecked(&flow_cache_genid);
68073
68074 if (!fle->object || fle->genid == genid)
68075 continue;
68076diff -urNp linux-2.6.32.42/net/core/skbuff.c linux-2.6.32.42/net/core/skbuff.c
68077--- linux-2.6.32.42/net/core/skbuff.c 2011-03-27 14:31:47.000000000 -0400
68078+++ linux-2.6.32.42/net/core/skbuff.c 2011-05-16 21:46:57.000000000 -0400
68079@@ -1544,6 +1544,8 @@ int skb_splice_bits(struct sk_buff *skb,
68080 struct sk_buff *frag_iter;
68081 struct sock *sk = skb->sk;
68082
68083+ pax_track_stack();
68084+
68085 /*
68086 * __skb_splice_bits() only fails if the output has no room left,
68087 * so no point in going over the frag_list for the error case.
68088diff -urNp linux-2.6.32.42/net/core/sock.c linux-2.6.32.42/net/core/sock.c
68089--- linux-2.6.32.42/net/core/sock.c 2011-03-27 14:31:47.000000000 -0400
68090+++ linux-2.6.32.42/net/core/sock.c 2011-05-04 17:56:20.000000000 -0400
68091@@ -864,11 +864,15 @@ int sock_getsockopt(struct socket *sock,
68092 break;
68093
68094 case SO_PEERCRED:
68095+ {
68096+ struct ucred peercred;
68097 if (len > sizeof(sk->sk_peercred))
68098 len = sizeof(sk->sk_peercred);
68099- if (copy_to_user(optval, &sk->sk_peercred, len))
68100+ peercred = sk->sk_peercred;
68101+ if (copy_to_user(optval, &peercred, len))
68102 return -EFAULT;
68103 goto lenout;
68104+ }
68105
68106 case SO_PEERNAME:
68107 {
68108@@ -1892,7 +1896,7 @@ void sock_init_data(struct socket *sock,
68109 */
68110 smp_wmb();
68111 atomic_set(&sk->sk_refcnt, 1);
68112- atomic_set(&sk->sk_drops, 0);
68113+ atomic_set_unchecked(&sk->sk_drops, 0);
68114 }
68115 EXPORT_SYMBOL(sock_init_data);
68116
68117diff -urNp linux-2.6.32.42/net/decnet/sysctl_net_decnet.c linux-2.6.32.42/net/decnet/sysctl_net_decnet.c
68118--- linux-2.6.32.42/net/decnet/sysctl_net_decnet.c 2011-03-27 14:31:47.000000000 -0400
68119+++ linux-2.6.32.42/net/decnet/sysctl_net_decnet.c 2011-04-17 15:56:46.000000000 -0400
68120@@ -206,7 +206,7 @@ static int dn_node_address_handler(ctl_t
68121
68122 if (len > *lenp) len = *lenp;
68123
68124- if (copy_to_user(buffer, addr, len))
68125+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
68126 return -EFAULT;
68127
68128 *lenp = len;
68129@@ -327,7 +327,7 @@ static int dn_def_dev_handler(ctl_table
68130
68131 if (len > *lenp) len = *lenp;
68132
68133- if (copy_to_user(buffer, devname, len))
68134+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
68135 return -EFAULT;
68136
68137 *lenp = len;
68138diff -urNp linux-2.6.32.42/net/econet/Kconfig linux-2.6.32.42/net/econet/Kconfig
68139--- linux-2.6.32.42/net/econet/Kconfig 2011-03-27 14:31:47.000000000 -0400
68140+++ linux-2.6.32.42/net/econet/Kconfig 2011-04-17 15:56:46.000000000 -0400
68141@@ -4,7 +4,7 @@
68142
68143 config ECONET
68144 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
68145- depends on EXPERIMENTAL && INET
68146+ depends on EXPERIMENTAL && INET && BROKEN
68147 ---help---
68148 Econet is a fairly old and slow networking protocol mainly used by
68149 Acorn computers to access file and print servers. It uses native
68150diff -urNp linux-2.6.32.42/net/ieee802154/dgram.c linux-2.6.32.42/net/ieee802154/dgram.c
68151--- linux-2.6.32.42/net/ieee802154/dgram.c 2011-03-27 14:31:47.000000000 -0400
68152+++ linux-2.6.32.42/net/ieee802154/dgram.c 2011-05-04 17:56:28.000000000 -0400
68153@@ -318,7 +318,7 @@ out:
68154 static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
68155 {
68156 if (sock_queue_rcv_skb(sk, skb) < 0) {
68157- atomic_inc(&sk->sk_drops);
68158+ atomic_inc_unchecked(&sk->sk_drops);
68159 kfree_skb(skb);
68160 return NET_RX_DROP;
68161 }
68162diff -urNp linux-2.6.32.42/net/ieee802154/raw.c linux-2.6.32.42/net/ieee802154/raw.c
68163--- linux-2.6.32.42/net/ieee802154/raw.c 2011-03-27 14:31:47.000000000 -0400
68164+++ linux-2.6.32.42/net/ieee802154/raw.c 2011-05-04 17:56:28.000000000 -0400
68165@@ -206,7 +206,7 @@ out:
68166 static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
68167 {
68168 if (sock_queue_rcv_skb(sk, skb) < 0) {
68169- atomic_inc(&sk->sk_drops);
68170+ atomic_inc_unchecked(&sk->sk_drops);
68171 kfree_skb(skb);
68172 return NET_RX_DROP;
68173 }
68174diff -urNp linux-2.6.32.42/net/ipv4/inet_diag.c linux-2.6.32.42/net/ipv4/inet_diag.c
68175--- linux-2.6.32.42/net/ipv4/inet_diag.c 2011-04-17 17:00:52.000000000 -0400
68176+++ linux-2.6.32.42/net/ipv4/inet_diag.c 2011-06-20 19:31:13.000000000 -0400
68177@@ -113,8 +113,13 @@ static int inet_csk_diag_fill(struct soc
68178 r->idiag_retrans = 0;
68179
68180 r->id.idiag_if = sk->sk_bound_dev_if;
68181+#ifdef CONFIG_GRKERNSEC_HIDESYM
68182+ r->id.idiag_cookie[0] = 0;
68183+ r->id.idiag_cookie[1] = 0;
68184+#else
68185 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
68186 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
68187+#endif
68188
68189 r->id.idiag_sport = inet->sport;
68190 r->id.idiag_dport = inet->dport;
68191@@ -200,8 +205,15 @@ static int inet_twsk_diag_fill(struct in
68192 r->idiag_family = tw->tw_family;
68193 r->idiag_retrans = 0;
68194 r->id.idiag_if = tw->tw_bound_dev_if;
68195+
68196+#ifdef CONFIG_GRKERNSEC_HIDESYM
68197+ r->id.idiag_cookie[0] = 0;
68198+ r->id.idiag_cookie[1] = 0;
68199+#else
68200 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
68201 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
68202+#endif
68203+
68204 r->id.idiag_sport = tw->tw_sport;
68205 r->id.idiag_dport = tw->tw_dport;
68206 r->id.idiag_src[0] = tw->tw_rcv_saddr;
68207@@ -284,12 +296,14 @@ static int inet_diag_get_exact(struct sk
68208 if (sk == NULL)
68209 goto unlock;
68210
68211+#ifndef CONFIG_GRKERNSEC_HIDESYM
68212 err = -ESTALE;
68213 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
68214 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
68215 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
68216 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
68217 goto out;
68218+#endif
68219
68220 err = -ENOMEM;
68221 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
68222@@ -436,7 +450,7 @@ static int valid_cc(const void *bc, int
68223 return 0;
68224 if (cc == len)
68225 return 1;
68226- if (op->yes < 4)
68227+ if (op->yes < 4 || op->yes & 3)
68228 return 0;
68229 len -= op->yes;
68230 bc += op->yes;
68231@@ -446,11 +460,11 @@ static int valid_cc(const void *bc, int
68232
68233 static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
68234 {
68235- const unsigned char *bc = bytecode;
68236+ const void *bc = bytecode;
68237 int len = bytecode_len;
68238
68239 while (len > 0) {
68240- struct inet_diag_bc_op *op = (struct inet_diag_bc_op *)bc;
68241+ const struct inet_diag_bc_op *op = bc;
68242
68243 //printk("BC: %d %d %d {%d} / %d\n", op->code, op->yes, op->no, op[1].no, len);
68244 switch (op->code) {
68245@@ -461,22 +475,20 @@ static int inet_diag_bc_audit(const void
68246 case INET_DIAG_BC_S_LE:
68247 case INET_DIAG_BC_D_GE:
68248 case INET_DIAG_BC_D_LE:
68249- if (op->yes < 4 || op->yes > len + 4)
68250- return -EINVAL;
68251 case INET_DIAG_BC_JMP:
68252- if (op->no < 4 || op->no > len + 4)
68253+ if (op->no < 4 || op->no > len + 4 || op->no & 3)
68254 return -EINVAL;
68255 if (op->no < len &&
68256 !valid_cc(bytecode, bytecode_len, len - op->no))
68257 return -EINVAL;
68258 break;
68259 case INET_DIAG_BC_NOP:
68260- if (op->yes < 4 || op->yes > len + 4)
68261- return -EINVAL;
68262 break;
68263 default:
68264 return -EINVAL;
68265 }
68266+ if (op->yes < 4 || op->yes > len + 4 || op->yes & 3)
68267+ return -EINVAL;
68268 bc += op->yes;
68269 len -= op->yes;
68270 }
68271@@ -581,8 +593,14 @@ static int inet_diag_fill_req(struct sk_
68272 r->idiag_retrans = req->retrans;
68273
68274 r->id.idiag_if = sk->sk_bound_dev_if;
68275+
68276+#ifdef CONFIG_GRKERNSEC_HIDESYM
68277+ r->id.idiag_cookie[0] = 0;
68278+ r->id.idiag_cookie[1] = 0;
68279+#else
68280 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
68281 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
68282+#endif
68283
68284 tmo = req->expires - jiffies;
68285 if (tmo < 0)
68286diff -urNp linux-2.6.32.42/net/ipv4/inet_hashtables.c linux-2.6.32.42/net/ipv4/inet_hashtables.c
68287--- linux-2.6.32.42/net/ipv4/inet_hashtables.c 2011-03-27 14:31:47.000000000 -0400
68288+++ linux-2.6.32.42/net/ipv4/inet_hashtables.c 2011-04-17 15:56:46.000000000 -0400
68289@@ -18,11 +18,14 @@
68290 #include <linux/sched.h>
68291 #include <linux/slab.h>
68292 #include <linux/wait.h>
68293+#include <linux/security.h>
68294
68295 #include <net/inet_connection_sock.h>
68296 #include <net/inet_hashtables.h>
68297 #include <net/ip.h>
68298
68299+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
68300+
68301 /*
68302 * Allocate and initialize a new local port bind bucket.
68303 * The bindhash mutex for snum's hash chain must be held here.
68304@@ -490,6 +493,8 @@ ok:
68305 }
68306 spin_unlock(&head->lock);
68307
68308+ gr_update_task_in_ip_table(current, inet_sk(sk));
68309+
68310 if (tw) {
68311 inet_twsk_deschedule(tw, death_row);
68312 inet_twsk_put(tw);
68313diff -urNp linux-2.6.32.42/net/ipv4/inetpeer.c linux-2.6.32.42/net/ipv4/inetpeer.c
68314--- linux-2.6.32.42/net/ipv4/inetpeer.c 2011-03-27 14:31:47.000000000 -0400
68315+++ linux-2.6.32.42/net/ipv4/inetpeer.c 2011-05-16 21:46:57.000000000 -0400
68316@@ -366,6 +366,8 @@ struct inet_peer *inet_getpeer(__be32 da
68317 struct inet_peer *p, *n;
68318 struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
68319
68320+ pax_track_stack();
68321+
68322 /* Look up for the address quickly. */
68323 read_lock_bh(&peer_pool_lock);
68324 p = lookup(daddr, NULL);
68325@@ -389,7 +391,7 @@ struct inet_peer *inet_getpeer(__be32 da
68326 return NULL;
68327 n->v4daddr = daddr;
68328 atomic_set(&n->refcnt, 1);
68329- atomic_set(&n->rid, 0);
68330+ atomic_set_unchecked(&n->rid, 0);
68331 n->ip_id_count = secure_ip_id(daddr);
68332 n->tcp_ts_stamp = 0;
68333
68334diff -urNp linux-2.6.32.42/net/ipv4/ip_fragment.c linux-2.6.32.42/net/ipv4/ip_fragment.c
68335--- linux-2.6.32.42/net/ipv4/ip_fragment.c 2011-03-27 14:31:47.000000000 -0400
68336+++ linux-2.6.32.42/net/ipv4/ip_fragment.c 2011-04-17 15:56:46.000000000 -0400
68337@@ -255,7 +255,7 @@ static inline int ip_frag_too_far(struct
68338 return 0;
68339
68340 start = qp->rid;
68341- end = atomic_inc_return(&peer->rid);
68342+ end = atomic_inc_return_unchecked(&peer->rid);
68343 qp->rid = end;
68344
68345 rc = qp->q.fragments && (end - start) > max;
68346diff -urNp linux-2.6.32.42/net/ipv4/ip_sockglue.c linux-2.6.32.42/net/ipv4/ip_sockglue.c
68347--- linux-2.6.32.42/net/ipv4/ip_sockglue.c 2011-03-27 14:31:47.000000000 -0400
68348+++ linux-2.6.32.42/net/ipv4/ip_sockglue.c 2011-05-16 21:46:57.000000000 -0400
68349@@ -1015,6 +1015,8 @@ static int do_ip_getsockopt(struct sock
68350 int val;
68351 int len;
68352
68353+ pax_track_stack();
68354+
68355 if (level != SOL_IP)
68356 return -EOPNOTSUPP;
68357
68358diff -urNp linux-2.6.32.42/net/ipv4/netfilter/arp_tables.c linux-2.6.32.42/net/ipv4/netfilter/arp_tables.c
68359--- linux-2.6.32.42/net/ipv4/netfilter/arp_tables.c 2011-04-17 17:00:52.000000000 -0400
68360+++ linux-2.6.32.42/net/ipv4/netfilter/arp_tables.c 2011-04-17 17:04:18.000000000 -0400
68361@@ -934,6 +934,7 @@ static int get_info(struct net *net, voi
68362 private = &tmp;
68363 }
68364 #endif
68365+ memset(&info, 0, sizeof(info));
68366 info.valid_hooks = t->valid_hooks;
68367 memcpy(info.hook_entry, private->hook_entry,
68368 sizeof(info.hook_entry));
68369diff -urNp linux-2.6.32.42/net/ipv4/netfilter/ip_tables.c linux-2.6.32.42/net/ipv4/netfilter/ip_tables.c
68370--- linux-2.6.32.42/net/ipv4/netfilter/ip_tables.c 2011-04-17 17:00:52.000000000 -0400
68371+++ linux-2.6.32.42/net/ipv4/netfilter/ip_tables.c 2011-04-17 17:04:18.000000000 -0400
68372@@ -1141,6 +1141,7 @@ static int get_info(struct net *net, voi
68373 private = &tmp;
68374 }
68375 #endif
68376+ memset(&info, 0, sizeof(info));
68377 info.valid_hooks = t->valid_hooks;
68378 memcpy(info.hook_entry, private->hook_entry,
68379 sizeof(info.hook_entry));
68380diff -urNp linux-2.6.32.42/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-2.6.32.42/net/ipv4/netfilter/nf_nat_snmp_basic.c
68381--- linux-2.6.32.42/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-03-27 14:31:47.000000000 -0400
68382+++ linux-2.6.32.42/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-04-17 15:56:46.000000000 -0400
68383@@ -397,7 +397,7 @@ static unsigned char asn1_octets_decode(
68384
68385 *len = 0;
68386
68387- *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
68388+ *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
68389 if (*octets == NULL) {
68390 if (net_ratelimit())
68391 printk("OOM in bsalg (%d)\n", __LINE__);
68392diff -urNp linux-2.6.32.42/net/ipv4/raw.c linux-2.6.32.42/net/ipv4/raw.c
68393--- linux-2.6.32.42/net/ipv4/raw.c 2011-03-27 14:31:47.000000000 -0400
68394+++ linux-2.6.32.42/net/ipv4/raw.c 2011-05-04 17:59:08.000000000 -0400
68395@@ -292,7 +292,7 @@ static int raw_rcv_skb(struct sock * sk,
68396 /* Charge it to the socket. */
68397
68398 if (sock_queue_rcv_skb(sk, skb) < 0) {
68399- atomic_inc(&sk->sk_drops);
68400+ atomic_inc_unchecked(&sk->sk_drops);
68401 kfree_skb(skb);
68402 return NET_RX_DROP;
68403 }
68404@@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk,
68405 int raw_rcv(struct sock *sk, struct sk_buff *skb)
68406 {
68407 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
68408- atomic_inc(&sk->sk_drops);
68409+ atomic_inc_unchecked(&sk->sk_drops);
68410 kfree_skb(skb);
68411 return NET_RX_DROP;
68412 }
68413@@ -724,15 +724,22 @@ static int raw_init(struct sock *sk)
68414
68415 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
68416 {
68417+ struct icmp_filter filter;
68418+
68419+ if (optlen < 0)
68420+ return -EINVAL;
68421 if (optlen > sizeof(struct icmp_filter))
68422 optlen = sizeof(struct icmp_filter);
68423- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
68424+ if (copy_from_user(&filter, optval, optlen))
68425 return -EFAULT;
68426+ memcpy(&raw_sk(sk)->filter, &filter, optlen);
68427+
68428 return 0;
68429 }
68430
68431 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
68432 {
68433+ struct icmp_filter filter;
68434 int len, ret = -EFAULT;
68435
68436 if (get_user(len, optlen))
68437@@ -743,8 +750,9 @@ static int raw_geticmpfilter(struct sock
68438 if (len > sizeof(struct icmp_filter))
68439 len = sizeof(struct icmp_filter);
68440 ret = -EFAULT;
68441+ memcpy(&filter, &raw_sk(sk)->filter, len);
68442 if (put_user(len, optlen) ||
68443- copy_to_user(optval, &raw_sk(sk)->filter, len))
68444+ copy_to_user(optval, &filter, len))
68445 goto out;
68446 ret = 0;
68447 out: return ret;
68448@@ -954,7 +962,13 @@ static void raw_sock_seq_show(struct seq
68449 sk_wmem_alloc_get(sp),
68450 sk_rmem_alloc_get(sp),
68451 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
68452- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
68453+ atomic_read(&sp->sk_refcnt),
68454+#ifdef CONFIG_GRKERNSEC_HIDESYM
68455+ NULL,
68456+#else
68457+ sp,
68458+#endif
68459+ atomic_read_unchecked(&sp->sk_drops));
68460 }
68461
68462 static int raw_seq_show(struct seq_file *seq, void *v)
68463diff -urNp linux-2.6.32.42/net/ipv4/route.c linux-2.6.32.42/net/ipv4/route.c
68464--- linux-2.6.32.42/net/ipv4/route.c 2011-03-27 14:31:47.000000000 -0400
68465+++ linux-2.6.32.42/net/ipv4/route.c 2011-05-04 17:56:28.000000000 -0400
68466@@ -268,7 +268,7 @@ static inline unsigned int rt_hash(__be3
68467
68468 static inline int rt_genid(struct net *net)
68469 {
68470- return atomic_read(&net->ipv4.rt_genid);
68471+ return atomic_read_unchecked(&net->ipv4.rt_genid);
68472 }
68473
68474 #ifdef CONFIG_PROC_FS
68475@@ -888,7 +888,7 @@ static void rt_cache_invalidate(struct n
68476 unsigned char shuffle;
68477
68478 get_random_bytes(&shuffle, sizeof(shuffle));
68479- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
68480+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
68481 }
68482
68483 /*
68484@@ -3356,7 +3356,7 @@ static __net_initdata struct pernet_oper
68485
68486 static __net_init int rt_secret_timer_init(struct net *net)
68487 {
68488- atomic_set(&net->ipv4.rt_genid,
68489+ atomic_set_unchecked(&net->ipv4.rt_genid,
68490 (int) ((num_physpages ^ (num_physpages>>8)) ^
68491 (jiffies ^ (jiffies >> 7))));
68492
68493diff -urNp linux-2.6.32.42/net/ipv4/tcp.c linux-2.6.32.42/net/ipv4/tcp.c
68494--- linux-2.6.32.42/net/ipv4/tcp.c 2011-03-27 14:31:47.000000000 -0400
68495+++ linux-2.6.32.42/net/ipv4/tcp.c 2011-05-16 21:46:57.000000000 -0400
68496@@ -2085,6 +2085,8 @@ static int do_tcp_setsockopt(struct sock
68497 int val;
68498 int err = 0;
68499
68500+ pax_track_stack();
68501+
68502 /* This is a string value all the others are int's */
68503 if (optname == TCP_CONGESTION) {
68504 char name[TCP_CA_NAME_MAX];
68505@@ -2355,6 +2357,8 @@ static int do_tcp_getsockopt(struct sock
68506 struct tcp_sock *tp = tcp_sk(sk);
68507 int val, len;
68508
68509+ pax_track_stack();
68510+
68511 if (get_user(len, optlen))
68512 return -EFAULT;
68513
68514diff -urNp linux-2.6.32.42/net/ipv4/tcp_ipv4.c linux-2.6.32.42/net/ipv4/tcp_ipv4.c
68515--- linux-2.6.32.42/net/ipv4/tcp_ipv4.c 2011-03-27 14:31:47.000000000 -0400
68516+++ linux-2.6.32.42/net/ipv4/tcp_ipv4.c 2011-04-17 15:56:46.000000000 -0400
68517@@ -84,6 +84,9 @@
68518 int sysctl_tcp_tw_reuse __read_mostly;
68519 int sysctl_tcp_low_latency __read_mostly;
68520
68521+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68522+extern int grsec_enable_blackhole;
68523+#endif
68524
68525 #ifdef CONFIG_TCP_MD5SIG
68526 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
68527@@ -1542,6 +1545,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
68528 return 0;
68529
68530 reset:
68531+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68532+ if (!grsec_enable_blackhole)
68533+#endif
68534 tcp_v4_send_reset(rsk, skb);
68535 discard:
68536 kfree_skb(skb);
68537@@ -1603,12 +1609,20 @@ int tcp_v4_rcv(struct sk_buff *skb)
68538 TCP_SKB_CB(skb)->sacked = 0;
68539
68540 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
68541- if (!sk)
68542+ if (!sk) {
68543+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68544+ ret = 1;
68545+#endif
68546 goto no_tcp_socket;
68547+ }
68548
68549 process:
68550- if (sk->sk_state == TCP_TIME_WAIT)
68551+ if (sk->sk_state == TCP_TIME_WAIT) {
68552+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68553+ ret = 2;
68554+#endif
68555 goto do_time_wait;
68556+ }
68557
68558 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
68559 goto discard_and_relse;
68560@@ -1650,6 +1664,10 @@ no_tcp_socket:
68561 bad_packet:
68562 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
68563 } else {
68564+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68565+ if (!grsec_enable_blackhole || (ret == 1 &&
68566+ (skb->dev->flags & IFF_LOOPBACK)))
68567+#endif
68568 tcp_v4_send_reset(NULL, skb);
68569 }
68570
68571@@ -2237,7 +2255,11 @@ static void get_openreq4(struct sock *sk
68572 0, /* non standard timer */
68573 0, /* open_requests have no inode */
68574 atomic_read(&sk->sk_refcnt),
68575+#ifdef CONFIG_GRKERNSEC_HIDESYM
68576+ NULL,
68577+#else
68578 req,
68579+#endif
68580 len);
68581 }
68582
68583@@ -2279,7 +2301,12 @@ static void get_tcp4_sock(struct sock *s
68584 sock_i_uid(sk),
68585 icsk->icsk_probes_out,
68586 sock_i_ino(sk),
68587- atomic_read(&sk->sk_refcnt), sk,
68588+ atomic_read(&sk->sk_refcnt),
68589+#ifdef CONFIG_GRKERNSEC_HIDESYM
68590+ NULL,
68591+#else
68592+ sk,
68593+#endif
68594 jiffies_to_clock_t(icsk->icsk_rto),
68595 jiffies_to_clock_t(icsk->icsk_ack.ato),
68596 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
68597@@ -2307,7 +2334,13 @@ static void get_timewait4_sock(struct in
68598 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
68599 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
68600 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
68601- atomic_read(&tw->tw_refcnt), tw, len);
68602+ atomic_read(&tw->tw_refcnt),
68603+#ifdef CONFIG_GRKERNSEC_HIDESYM
68604+ NULL,
68605+#else
68606+ tw,
68607+#endif
68608+ len);
68609 }
68610
68611 #define TMPSZ 150
68612diff -urNp linux-2.6.32.42/net/ipv4/tcp_minisocks.c linux-2.6.32.42/net/ipv4/tcp_minisocks.c
68613--- linux-2.6.32.42/net/ipv4/tcp_minisocks.c 2011-03-27 14:31:47.000000000 -0400
68614+++ linux-2.6.32.42/net/ipv4/tcp_minisocks.c 2011-04-17 15:56:46.000000000 -0400
68615@@ -26,6 +26,10 @@
68616 #include <net/inet_common.h>
68617 #include <net/xfrm.h>
68618
68619+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68620+extern int grsec_enable_blackhole;
68621+#endif
68622+
68623 #ifdef CONFIG_SYSCTL
68624 #define SYNC_INIT 0 /* let the user enable it */
68625 #else
68626@@ -672,6 +676,10 @@ listen_overflow:
68627
68628 embryonic_reset:
68629 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
68630+
68631+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68632+ if (!grsec_enable_blackhole)
68633+#endif
68634 if (!(flg & TCP_FLAG_RST))
68635 req->rsk_ops->send_reset(sk, skb);
68636
68637diff -urNp linux-2.6.32.42/net/ipv4/tcp_output.c linux-2.6.32.42/net/ipv4/tcp_output.c
68638--- linux-2.6.32.42/net/ipv4/tcp_output.c 2011-03-27 14:31:47.000000000 -0400
68639+++ linux-2.6.32.42/net/ipv4/tcp_output.c 2011-05-16 21:46:57.000000000 -0400
68640@@ -2234,6 +2234,8 @@ struct sk_buff *tcp_make_synack(struct s
68641 __u8 *md5_hash_location;
68642 int mss;
68643
68644+ pax_track_stack();
68645+
68646 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
68647 if (skb == NULL)
68648 return NULL;
68649diff -urNp linux-2.6.32.42/net/ipv4/tcp_probe.c linux-2.6.32.42/net/ipv4/tcp_probe.c
68650--- linux-2.6.32.42/net/ipv4/tcp_probe.c 2011-03-27 14:31:47.000000000 -0400
68651+++ linux-2.6.32.42/net/ipv4/tcp_probe.c 2011-04-17 15:56:46.000000000 -0400
68652@@ -200,7 +200,7 @@ static ssize_t tcpprobe_read(struct file
68653 if (cnt + width >= len)
68654 break;
68655
68656- if (copy_to_user(buf + cnt, tbuf, width))
68657+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
68658 return -EFAULT;
68659 cnt += width;
68660 }
68661diff -urNp linux-2.6.32.42/net/ipv4/tcp_timer.c linux-2.6.32.42/net/ipv4/tcp_timer.c
68662--- linux-2.6.32.42/net/ipv4/tcp_timer.c 2011-03-27 14:31:47.000000000 -0400
68663+++ linux-2.6.32.42/net/ipv4/tcp_timer.c 2011-04-17 15:56:46.000000000 -0400
68664@@ -21,6 +21,10 @@
68665 #include <linux/module.h>
68666 #include <net/tcp.h>
68667
68668+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68669+extern int grsec_lastack_retries;
68670+#endif
68671+
68672 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
68673 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
68674 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
68675@@ -164,6 +168,13 @@ static int tcp_write_timeout(struct sock
68676 }
68677 }
68678
68679+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68680+ if ((sk->sk_state == TCP_LAST_ACK) &&
68681+ (grsec_lastack_retries > 0) &&
68682+ (grsec_lastack_retries < retry_until))
68683+ retry_until = grsec_lastack_retries;
68684+#endif
68685+
68686 if (retransmits_timed_out(sk, retry_until)) {
68687 /* Has it gone just too far? */
68688 tcp_write_err(sk);
68689diff -urNp linux-2.6.32.42/net/ipv4/udp.c linux-2.6.32.42/net/ipv4/udp.c
68690--- linux-2.6.32.42/net/ipv4/udp.c 2011-03-27 14:31:47.000000000 -0400
68691+++ linux-2.6.32.42/net/ipv4/udp.c 2011-05-04 17:57:28.000000000 -0400
68692@@ -86,6 +86,7 @@
68693 #include <linux/types.h>
68694 #include <linux/fcntl.h>
68695 #include <linux/module.h>
68696+#include <linux/security.h>
68697 #include <linux/socket.h>
68698 #include <linux/sockios.h>
68699 #include <linux/igmp.h>
68700@@ -106,6 +107,10 @@
68701 #include <net/xfrm.h>
68702 #include "udp_impl.h"
68703
68704+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68705+extern int grsec_enable_blackhole;
68706+#endif
68707+
68708 struct udp_table udp_table;
68709 EXPORT_SYMBOL(udp_table);
68710
68711@@ -371,6 +376,9 @@ found:
68712 return s;
68713 }
68714
68715+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
68716+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
68717+
68718 /*
68719 * This routine is called by the ICMP module when it gets some
68720 * sort of error condition. If err < 0 then the socket should
68721@@ -639,9 +647,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
68722 dport = usin->sin_port;
68723 if (dport == 0)
68724 return -EINVAL;
68725+
68726+ err = gr_search_udp_sendmsg(sk, usin);
68727+ if (err)
68728+ return err;
68729 } else {
68730 if (sk->sk_state != TCP_ESTABLISHED)
68731 return -EDESTADDRREQ;
68732+
68733+ err = gr_search_udp_sendmsg(sk, NULL);
68734+ if (err)
68735+ return err;
68736+
68737 daddr = inet->daddr;
68738 dport = inet->dport;
68739 /* Open fast path for connected socket.
68740@@ -945,6 +962,10 @@ try_again:
68741 if (!skb)
68742 goto out;
68743
68744+ err = gr_search_udp_recvmsg(sk, skb);
68745+ if (err)
68746+ goto out_free;
68747+
68748 ulen = skb->len - sizeof(struct udphdr);
68749 copied = len;
68750 if (copied > ulen)
68751@@ -1065,7 +1086,7 @@ static int __udp_queue_rcv_skb(struct so
68752 if (rc == -ENOMEM) {
68753 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
68754 is_udplite);
68755- atomic_inc(&sk->sk_drops);
68756+ atomic_inc_unchecked(&sk->sk_drops);
68757 }
68758 goto drop;
68759 }
68760@@ -1335,6 +1356,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
68761 goto csum_error;
68762
68763 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
68764+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68765+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
68766+#endif
68767 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
68768
68769 /*
68770@@ -1755,8 +1779,13 @@ static void udp4_format_sock(struct sock
68771 sk_wmem_alloc_get(sp),
68772 sk_rmem_alloc_get(sp),
68773 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
68774- atomic_read(&sp->sk_refcnt), sp,
68775- atomic_read(&sp->sk_drops), len);
68776+ atomic_read(&sp->sk_refcnt),
68777+#ifdef CONFIG_GRKERNSEC_HIDESYM
68778+ NULL,
68779+#else
68780+ sp,
68781+#endif
68782+ atomic_read_unchecked(&sp->sk_drops), len);
68783 }
68784
68785 int udp4_seq_show(struct seq_file *seq, void *v)
68786diff -urNp linux-2.6.32.42/net/ipv6/inet6_connection_sock.c linux-2.6.32.42/net/ipv6/inet6_connection_sock.c
68787--- linux-2.6.32.42/net/ipv6/inet6_connection_sock.c 2011-03-27 14:31:47.000000000 -0400
68788+++ linux-2.6.32.42/net/ipv6/inet6_connection_sock.c 2011-05-04 17:56:28.000000000 -0400
68789@@ -152,7 +152,7 @@ void __inet6_csk_dst_store(struct sock *
68790 #ifdef CONFIG_XFRM
68791 {
68792 struct rt6_info *rt = (struct rt6_info *)dst;
68793- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
68794+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
68795 }
68796 #endif
68797 }
68798@@ -167,7 +167,7 @@ struct dst_entry *__inet6_csk_dst_check(
68799 #ifdef CONFIG_XFRM
68800 if (dst) {
68801 struct rt6_info *rt = (struct rt6_info *)dst;
68802- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
68803+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
68804 sk->sk_dst_cache = NULL;
68805 dst_release(dst);
68806 dst = NULL;
68807diff -urNp linux-2.6.32.42/net/ipv6/inet6_hashtables.c linux-2.6.32.42/net/ipv6/inet6_hashtables.c
68808--- linux-2.6.32.42/net/ipv6/inet6_hashtables.c 2011-03-27 14:31:47.000000000 -0400
68809+++ linux-2.6.32.42/net/ipv6/inet6_hashtables.c 2011-05-04 17:56:28.000000000 -0400
68810@@ -118,7 +118,7 @@ out:
68811 }
68812 EXPORT_SYMBOL(__inet6_lookup_established);
68813
68814-static int inline compute_score(struct sock *sk, struct net *net,
68815+static inline int compute_score(struct sock *sk, struct net *net,
68816 const unsigned short hnum,
68817 const struct in6_addr *daddr,
68818 const int dif)
68819diff -urNp linux-2.6.32.42/net/ipv6/ipv6_sockglue.c linux-2.6.32.42/net/ipv6/ipv6_sockglue.c
68820--- linux-2.6.32.42/net/ipv6/ipv6_sockglue.c 2011-03-27 14:31:47.000000000 -0400
68821+++ linux-2.6.32.42/net/ipv6/ipv6_sockglue.c 2011-05-16 21:46:57.000000000 -0400
68822@@ -130,6 +130,8 @@ static int do_ipv6_setsockopt(struct soc
68823 int val, valbool;
68824 int retv = -ENOPROTOOPT;
68825
68826+ pax_track_stack();
68827+
68828 if (optval == NULL)
68829 val=0;
68830 else {
68831@@ -881,6 +883,8 @@ static int do_ipv6_getsockopt(struct soc
68832 int len;
68833 int val;
68834
68835+ pax_track_stack();
68836+
68837 if (ip6_mroute_opt(optname))
68838 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
68839
68840diff -urNp linux-2.6.32.42/net/ipv6/netfilter/ip6_tables.c linux-2.6.32.42/net/ipv6/netfilter/ip6_tables.c
68841--- linux-2.6.32.42/net/ipv6/netfilter/ip6_tables.c 2011-04-17 17:00:52.000000000 -0400
68842+++ linux-2.6.32.42/net/ipv6/netfilter/ip6_tables.c 2011-04-17 17:04:18.000000000 -0400
68843@@ -1173,6 +1173,7 @@ static int get_info(struct net *net, voi
68844 private = &tmp;
68845 }
68846 #endif
68847+ memset(&info, 0, sizeof(info));
68848 info.valid_hooks = t->valid_hooks;
68849 memcpy(info.hook_entry, private->hook_entry,
68850 sizeof(info.hook_entry));
68851diff -urNp linux-2.6.32.42/net/ipv6/raw.c linux-2.6.32.42/net/ipv6/raw.c
68852--- linux-2.6.32.42/net/ipv6/raw.c 2011-03-27 14:31:47.000000000 -0400
68853+++ linux-2.6.32.42/net/ipv6/raw.c 2011-05-16 21:46:57.000000000 -0400
68854@@ -375,14 +375,14 @@ static inline int rawv6_rcv_skb(struct s
68855 {
68856 if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
68857 skb_checksum_complete(skb)) {
68858- atomic_inc(&sk->sk_drops);
68859+ atomic_inc_unchecked(&sk->sk_drops);
68860 kfree_skb(skb);
68861 return NET_RX_DROP;
68862 }
68863
68864 /* Charge it to the socket. */
68865 if (sock_queue_rcv_skb(sk,skb)<0) {
68866- atomic_inc(&sk->sk_drops);
68867+ atomic_inc_unchecked(&sk->sk_drops);
68868 kfree_skb(skb);
68869 return NET_RX_DROP;
68870 }
68871@@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
68872 struct raw6_sock *rp = raw6_sk(sk);
68873
68874 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
68875- atomic_inc(&sk->sk_drops);
68876+ atomic_inc_unchecked(&sk->sk_drops);
68877 kfree_skb(skb);
68878 return NET_RX_DROP;
68879 }
68880@@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
68881
68882 if (inet->hdrincl) {
68883 if (skb_checksum_complete(skb)) {
68884- atomic_inc(&sk->sk_drops);
68885+ atomic_inc_unchecked(&sk->sk_drops);
68886 kfree_skb(skb);
68887 return NET_RX_DROP;
68888 }
68889@@ -518,7 +518,7 @@ csum_copy_err:
68890 as some normal condition.
68891 */
68892 err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
68893- atomic_inc(&sk->sk_drops);
68894+ atomic_inc_unchecked(&sk->sk_drops);
68895 goto out;
68896 }
68897
68898@@ -600,7 +600,7 @@ out:
68899 return err;
68900 }
68901
68902-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
68903+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
68904 struct flowi *fl, struct rt6_info *rt,
68905 unsigned int flags)
68906 {
68907@@ -738,6 +738,8 @@ static int rawv6_sendmsg(struct kiocb *i
68908 u16 proto;
68909 int err;
68910
68911+ pax_track_stack();
68912+
68913 /* Rough check on arithmetic overflow,
68914 better check is made in ip6_append_data().
68915 */
68916@@ -916,12 +918,17 @@ do_confirm:
68917 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
68918 char __user *optval, int optlen)
68919 {
68920+ struct icmp6_filter filter;
68921+
68922 switch (optname) {
68923 case ICMPV6_FILTER:
68924+ if (optlen < 0)
68925+ return -EINVAL;
68926 if (optlen > sizeof(struct icmp6_filter))
68927 optlen = sizeof(struct icmp6_filter);
68928- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
68929+ if (copy_from_user(&filter, optval, optlen))
68930 return -EFAULT;
68931+ memcpy(&raw6_sk(sk)->filter, &filter, optlen);
68932 return 0;
68933 default:
68934 return -ENOPROTOOPT;
68935@@ -933,6 +940,7 @@ static int rawv6_seticmpfilter(struct so
68936 static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
68937 char __user *optval, int __user *optlen)
68938 {
68939+ struct icmp6_filter filter;
68940 int len;
68941
68942 switch (optname) {
68943@@ -945,7 +953,8 @@ static int rawv6_geticmpfilter(struct so
68944 len = sizeof(struct icmp6_filter);
68945 if (put_user(len, optlen))
68946 return -EFAULT;
68947- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
68948+ memcpy(&filter, &raw6_sk(sk)->filter, len);
68949+ if (copy_to_user(optval, &filter, len))
68950 return -EFAULT;
68951 return 0;
68952 default:
68953@@ -1241,7 +1250,13 @@ static void raw6_sock_seq_show(struct se
68954 0, 0L, 0,
68955 sock_i_uid(sp), 0,
68956 sock_i_ino(sp),
68957- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
68958+ atomic_read(&sp->sk_refcnt),
68959+#ifdef CONFIG_GRKERNSEC_HIDESYM
68960+ NULL,
68961+#else
68962+ sp,
68963+#endif
68964+ atomic_read_unchecked(&sp->sk_drops));
68965 }
68966
68967 static int raw6_seq_show(struct seq_file *seq, void *v)
68968diff -urNp linux-2.6.32.42/net/ipv6/tcp_ipv6.c linux-2.6.32.42/net/ipv6/tcp_ipv6.c
68969--- linux-2.6.32.42/net/ipv6/tcp_ipv6.c 2011-03-27 14:31:47.000000000 -0400
68970+++ linux-2.6.32.42/net/ipv6/tcp_ipv6.c 2011-04-17 15:56:46.000000000 -0400
68971@@ -88,6 +88,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
68972 }
68973 #endif
68974
68975+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68976+extern int grsec_enable_blackhole;
68977+#endif
68978+
68979 static void tcp_v6_hash(struct sock *sk)
68980 {
68981 if (sk->sk_state != TCP_CLOSE) {
68982@@ -1578,6 +1582,9 @@ static int tcp_v6_do_rcv(struct sock *sk
68983 return 0;
68984
68985 reset:
68986+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68987+ if (!grsec_enable_blackhole)
68988+#endif
68989 tcp_v6_send_reset(sk, skb);
68990 discard:
68991 if (opt_skb)
68992@@ -1655,12 +1662,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
68993 TCP_SKB_CB(skb)->sacked = 0;
68994
68995 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
68996- if (!sk)
68997+ if (!sk) {
68998+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68999+ ret = 1;
69000+#endif
69001 goto no_tcp_socket;
69002+ }
69003
69004 process:
69005- if (sk->sk_state == TCP_TIME_WAIT)
69006+ if (sk->sk_state == TCP_TIME_WAIT) {
69007+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69008+ ret = 2;
69009+#endif
69010 goto do_time_wait;
69011+ }
69012
69013 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
69014 goto discard_and_relse;
69015@@ -1700,6 +1715,10 @@ no_tcp_socket:
69016 bad_packet:
69017 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
69018 } else {
69019+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69020+ if (!grsec_enable_blackhole || (ret == 1 &&
69021+ (skb->dev->flags & IFF_LOOPBACK)))
69022+#endif
69023 tcp_v6_send_reset(NULL, skb);
69024 }
69025
69026@@ -1915,7 +1934,13 @@ static void get_openreq6(struct seq_file
69027 uid,
69028 0, /* non standard timer */
69029 0, /* open_requests have no inode */
69030- 0, req);
69031+ 0,
69032+#ifdef CONFIG_GRKERNSEC_HIDESYM
69033+ NULL
69034+#else
69035+ req
69036+#endif
69037+ );
69038 }
69039
69040 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
69041@@ -1965,7 +1990,12 @@ static void get_tcp6_sock(struct seq_fil
69042 sock_i_uid(sp),
69043 icsk->icsk_probes_out,
69044 sock_i_ino(sp),
69045- atomic_read(&sp->sk_refcnt), sp,
69046+ atomic_read(&sp->sk_refcnt),
69047+#ifdef CONFIG_GRKERNSEC_HIDESYM
69048+ NULL,
69049+#else
69050+ sp,
69051+#endif
69052 jiffies_to_clock_t(icsk->icsk_rto),
69053 jiffies_to_clock_t(icsk->icsk_ack.ato),
69054 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
69055@@ -2000,7 +2030,13 @@ static void get_timewait6_sock(struct se
69056 dest->s6_addr32[2], dest->s6_addr32[3], destp,
69057 tw->tw_substate, 0, 0,
69058 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
69059- atomic_read(&tw->tw_refcnt), tw);
69060+ atomic_read(&tw->tw_refcnt),
69061+#ifdef CONFIG_GRKERNSEC_HIDESYM
69062+ NULL
69063+#else
69064+ tw
69065+#endif
69066+ );
69067 }
69068
69069 static int tcp6_seq_show(struct seq_file *seq, void *v)
69070diff -urNp linux-2.6.32.42/net/ipv6/udp.c linux-2.6.32.42/net/ipv6/udp.c
69071--- linux-2.6.32.42/net/ipv6/udp.c 2011-03-27 14:31:47.000000000 -0400
69072+++ linux-2.6.32.42/net/ipv6/udp.c 2011-05-04 17:58:16.000000000 -0400
69073@@ -49,6 +49,10 @@
69074 #include <linux/seq_file.h>
69075 #include "udp_impl.h"
69076
69077+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69078+extern int grsec_enable_blackhole;
69079+#endif
69080+
69081 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
69082 {
69083 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
69084@@ -388,7 +392,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
69085 if (rc == -ENOMEM) {
69086 UDP6_INC_STATS_BH(sock_net(sk),
69087 UDP_MIB_RCVBUFERRORS, is_udplite);
69088- atomic_inc(&sk->sk_drops);
69089+ atomic_inc_unchecked(&sk->sk_drops);
69090 }
69091 goto drop;
69092 }
69093@@ -587,6 +591,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
69094 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
69095 proto == IPPROTO_UDPLITE);
69096
69097+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69098+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
69099+#endif
69100 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
69101
69102 kfree_skb(skb);
69103@@ -1206,8 +1213,13 @@ static void udp6_sock_seq_show(struct se
69104 0, 0L, 0,
69105 sock_i_uid(sp), 0,
69106 sock_i_ino(sp),
69107- atomic_read(&sp->sk_refcnt), sp,
69108- atomic_read(&sp->sk_drops));
69109+ atomic_read(&sp->sk_refcnt),
69110+#ifdef CONFIG_GRKERNSEC_HIDESYM
69111+ NULL,
69112+#else
69113+ sp,
69114+#endif
69115+ atomic_read_unchecked(&sp->sk_drops));
69116 }
69117
69118 int udp6_seq_show(struct seq_file *seq, void *v)
69119diff -urNp linux-2.6.32.42/net/irda/ircomm/ircomm_tty.c linux-2.6.32.42/net/irda/ircomm/ircomm_tty.c
69120--- linux-2.6.32.42/net/irda/ircomm/ircomm_tty.c 2011-03-27 14:31:47.000000000 -0400
69121+++ linux-2.6.32.42/net/irda/ircomm/ircomm_tty.c 2011-04-17 15:56:46.000000000 -0400
69122@@ -280,16 +280,16 @@ static int ircomm_tty_block_til_ready(st
69123 add_wait_queue(&self->open_wait, &wait);
69124
69125 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
69126- __FILE__,__LINE__, tty->driver->name, self->open_count );
69127+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
69128
69129 /* As far as I can see, we protect open_count - Jean II */
69130 spin_lock_irqsave(&self->spinlock, flags);
69131 if (!tty_hung_up_p(filp)) {
69132 extra_count = 1;
69133- self->open_count--;
69134+ local_dec(&self->open_count);
69135 }
69136 spin_unlock_irqrestore(&self->spinlock, flags);
69137- self->blocked_open++;
69138+ local_inc(&self->blocked_open);
69139
69140 while (1) {
69141 if (tty->termios->c_cflag & CBAUD) {
69142@@ -329,7 +329,7 @@ static int ircomm_tty_block_til_ready(st
69143 }
69144
69145 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
69146- __FILE__,__LINE__, tty->driver->name, self->open_count );
69147+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
69148
69149 schedule();
69150 }
69151@@ -340,13 +340,13 @@ static int ircomm_tty_block_til_ready(st
69152 if (extra_count) {
69153 /* ++ is not atomic, so this should be protected - Jean II */
69154 spin_lock_irqsave(&self->spinlock, flags);
69155- self->open_count++;
69156+ local_inc(&self->open_count);
69157 spin_unlock_irqrestore(&self->spinlock, flags);
69158 }
69159- self->blocked_open--;
69160+ local_dec(&self->blocked_open);
69161
69162 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
69163- __FILE__,__LINE__, tty->driver->name, self->open_count);
69164+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
69165
69166 if (!retval)
69167 self->flags |= ASYNC_NORMAL_ACTIVE;
69168@@ -415,14 +415,14 @@ static int ircomm_tty_open(struct tty_st
69169 }
69170 /* ++ is not atomic, so this should be protected - Jean II */
69171 spin_lock_irqsave(&self->spinlock, flags);
69172- self->open_count++;
69173+ local_inc(&self->open_count);
69174
69175 tty->driver_data = self;
69176 self->tty = tty;
69177 spin_unlock_irqrestore(&self->spinlock, flags);
69178
69179 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
69180- self->line, self->open_count);
69181+ self->line, local_read(&self->open_count));
69182
69183 /* Not really used by us, but lets do it anyway */
69184 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
69185@@ -511,7 +511,7 @@ static void ircomm_tty_close(struct tty_
69186 return;
69187 }
69188
69189- if ((tty->count == 1) && (self->open_count != 1)) {
69190+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
69191 /*
69192 * Uh, oh. tty->count is 1, which means that the tty
69193 * structure will be freed. state->count should always
69194@@ -521,16 +521,16 @@ static void ircomm_tty_close(struct tty_
69195 */
69196 IRDA_DEBUG(0, "%s(), bad serial port count; "
69197 "tty->count is 1, state->count is %d\n", __func__ ,
69198- self->open_count);
69199- self->open_count = 1;
69200+ local_read(&self->open_count));
69201+ local_set(&self->open_count, 1);
69202 }
69203
69204- if (--self->open_count < 0) {
69205+ if (local_dec_return(&self->open_count) < 0) {
69206 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
69207- __func__, self->line, self->open_count);
69208- self->open_count = 0;
69209+ __func__, self->line, local_read(&self->open_count));
69210+ local_set(&self->open_count, 0);
69211 }
69212- if (self->open_count) {
69213+ if (local_read(&self->open_count)) {
69214 spin_unlock_irqrestore(&self->spinlock, flags);
69215
69216 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
69217@@ -562,7 +562,7 @@ static void ircomm_tty_close(struct tty_
69218 tty->closing = 0;
69219 self->tty = NULL;
69220
69221- if (self->blocked_open) {
69222+ if (local_read(&self->blocked_open)) {
69223 if (self->close_delay)
69224 schedule_timeout_interruptible(self->close_delay);
69225 wake_up_interruptible(&self->open_wait);
69226@@ -1017,7 +1017,7 @@ static void ircomm_tty_hangup(struct tty
69227 spin_lock_irqsave(&self->spinlock, flags);
69228 self->flags &= ~ASYNC_NORMAL_ACTIVE;
69229 self->tty = NULL;
69230- self->open_count = 0;
69231+ local_set(&self->open_count, 0);
69232 spin_unlock_irqrestore(&self->spinlock, flags);
69233
69234 wake_up_interruptible(&self->open_wait);
69235@@ -1369,7 +1369,7 @@ static void ircomm_tty_line_info(struct
69236 seq_putc(m, '\n');
69237
69238 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
69239- seq_printf(m, "Open count: %d\n", self->open_count);
69240+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
69241 seq_printf(m, "Max data size: %d\n", self->max_data_size);
69242 seq_printf(m, "Max header size: %d\n", self->max_header_size);
69243
69244diff -urNp linux-2.6.32.42/net/iucv/af_iucv.c linux-2.6.32.42/net/iucv/af_iucv.c
69245--- linux-2.6.32.42/net/iucv/af_iucv.c 2011-03-27 14:31:47.000000000 -0400
69246+++ linux-2.6.32.42/net/iucv/af_iucv.c 2011-05-04 17:56:28.000000000 -0400
69247@@ -651,10 +651,10 @@ static int iucv_sock_autobind(struct soc
69248
69249 write_lock_bh(&iucv_sk_list.lock);
69250
69251- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
69252+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
69253 while (__iucv_get_sock_by_name(name)) {
69254 sprintf(name, "%08x",
69255- atomic_inc_return(&iucv_sk_list.autobind_name));
69256+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
69257 }
69258
69259 write_unlock_bh(&iucv_sk_list.lock);
69260diff -urNp linux-2.6.32.42/net/key/af_key.c linux-2.6.32.42/net/key/af_key.c
69261--- linux-2.6.32.42/net/key/af_key.c 2011-03-27 14:31:47.000000000 -0400
69262+++ linux-2.6.32.42/net/key/af_key.c 2011-05-16 21:46:57.000000000 -0400
69263@@ -2489,6 +2489,8 @@ static int pfkey_migrate(struct sock *sk
69264 struct xfrm_migrate m[XFRM_MAX_DEPTH];
69265 struct xfrm_kmaddress k;
69266
69267+ pax_track_stack();
69268+
69269 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
69270 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
69271 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
69272@@ -3660,7 +3662,11 @@ static int pfkey_seq_show(struct seq_fil
69273 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
69274 else
69275 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
69276+#ifdef CONFIG_GRKERNSEC_HIDESYM
69277+ NULL,
69278+#else
69279 s,
69280+#endif
69281 atomic_read(&s->sk_refcnt),
69282 sk_rmem_alloc_get(s),
69283 sk_wmem_alloc_get(s),
69284diff -urNp linux-2.6.32.42/net/mac80211/cfg.c linux-2.6.32.42/net/mac80211/cfg.c
69285--- linux-2.6.32.42/net/mac80211/cfg.c 2011-03-27 14:31:47.000000000 -0400
69286+++ linux-2.6.32.42/net/mac80211/cfg.c 2011-04-17 15:56:46.000000000 -0400
69287@@ -1369,7 +1369,7 @@ static int ieee80211_set_bitrate_mask(st
69288 return err;
69289 }
69290
69291-struct cfg80211_ops mac80211_config_ops = {
69292+const struct cfg80211_ops mac80211_config_ops = {
69293 .add_virtual_intf = ieee80211_add_iface,
69294 .del_virtual_intf = ieee80211_del_iface,
69295 .change_virtual_intf = ieee80211_change_iface,
69296diff -urNp linux-2.6.32.42/net/mac80211/cfg.h linux-2.6.32.42/net/mac80211/cfg.h
69297--- linux-2.6.32.42/net/mac80211/cfg.h 2011-03-27 14:31:47.000000000 -0400
69298+++ linux-2.6.32.42/net/mac80211/cfg.h 2011-04-17 15:56:46.000000000 -0400
69299@@ -4,6 +4,6 @@
69300 #ifndef __CFG_H
69301 #define __CFG_H
69302
69303-extern struct cfg80211_ops mac80211_config_ops;
69304+extern const struct cfg80211_ops mac80211_config_ops;
69305
69306 #endif /* __CFG_H */
69307diff -urNp linux-2.6.32.42/net/mac80211/debugfs_key.c linux-2.6.32.42/net/mac80211/debugfs_key.c
69308--- linux-2.6.32.42/net/mac80211/debugfs_key.c 2011-03-27 14:31:47.000000000 -0400
69309+++ linux-2.6.32.42/net/mac80211/debugfs_key.c 2011-04-17 15:56:46.000000000 -0400
69310@@ -211,9 +211,13 @@ static ssize_t key_key_read(struct file
69311 size_t count, loff_t *ppos)
69312 {
69313 struct ieee80211_key *key = file->private_data;
69314- int i, res, bufsize = 2 * key->conf.keylen + 2;
69315+ int i, bufsize = 2 * key->conf.keylen + 2;
69316 char *buf = kmalloc(bufsize, GFP_KERNEL);
69317 char *p = buf;
69318+ ssize_t res;
69319+
69320+ if (buf == NULL)
69321+ return -ENOMEM;
69322
69323 for (i = 0; i < key->conf.keylen; i++)
69324 p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]);
69325diff -urNp linux-2.6.32.42/net/mac80211/debugfs_sta.c linux-2.6.32.42/net/mac80211/debugfs_sta.c
69326--- linux-2.6.32.42/net/mac80211/debugfs_sta.c 2011-03-27 14:31:47.000000000 -0400
69327+++ linux-2.6.32.42/net/mac80211/debugfs_sta.c 2011-05-16 21:46:57.000000000 -0400
69328@@ -124,6 +124,8 @@ static ssize_t sta_agg_status_read(struc
69329 int i;
69330 struct sta_info *sta = file->private_data;
69331
69332+ pax_track_stack();
69333+
69334 spin_lock_bh(&sta->lock);
69335 p += scnprintf(p, sizeof(buf)+buf-p, "next dialog_token is %#02x\n",
69336 sta->ampdu_mlme.dialog_token_allocator + 1);
69337diff -urNp linux-2.6.32.42/net/mac80211/ieee80211_i.h linux-2.6.32.42/net/mac80211/ieee80211_i.h
69338--- linux-2.6.32.42/net/mac80211/ieee80211_i.h 2011-03-27 14:31:47.000000000 -0400
69339+++ linux-2.6.32.42/net/mac80211/ieee80211_i.h 2011-04-17 15:56:46.000000000 -0400
69340@@ -25,6 +25,7 @@
69341 #include <linux/etherdevice.h>
69342 #include <net/cfg80211.h>
69343 #include <net/mac80211.h>
69344+#include <asm/local.h>
69345 #include "key.h"
69346 #include "sta_info.h"
69347
69348@@ -635,7 +636,7 @@ struct ieee80211_local {
69349 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
69350 spinlock_t queue_stop_reason_lock;
69351
69352- int open_count;
69353+ local_t open_count;
69354 int monitors, cooked_mntrs;
69355 /* number of interfaces with corresponding FIF_ flags */
69356 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll;
69357diff -urNp linux-2.6.32.42/net/mac80211/iface.c linux-2.6.32.42/net/mac80211/iface.c
69358--- linux-2.6.32.42/net/mac80211/iface.c 2011-03-27 14:31:47.000000000 -0400
69359+++ linux-2.6.32.42/net/mac80211/iface.c 2011-04-17 15:56:46.000000000 -0400
69360@@ -166,7 +166,7 @@ static int ieee80211_open(struct net_dev
69361 break;
69362 }
69363
69364- if (local->open_count == 0) {
69365+ if (local_read(&local->open_count) == 0) {
69366 res = drv_start(local);
69367 if (res)
69368 goto err_del_bss;
69369@@ -196,7 +196,7 @@ static int ieee80211_open(struct net_dev
69370 * Validate the MAC address for this device.
69371 */
69372 if (!is_valid_ether_addr(dev->dev_addr)) {
69373- if (!local->open_count)
69374+ if (!local_read(&local->open_count))
69375 drv_stop(local);
69376 return -EADDRNOTAVAIL;
69377 }
69378@@ -292,7 +292,7 @@ static int ieee80211_open(struct net_dev
69379
69380 hw_reconf_flags |= __ieee80211_recalc_idle(local);
69381
69382- local->open_count++;
69383+ local_inc(&local->open_count);
69384 if (hw_reconf_flags) {
69385 ieee80211_hw_config(local, hw_reconf_flags);
69386 /*
69387@@ -320,7 +320,7 @@ static int ieee80211_open(struct net_dev
69388 err_del_interface:
69389 drv_remove_interface(local, &conf);
69390 err_stop:
69391- if (!local->open_count)
69392+ if (!local_read(&local->open_count))
69393 drv_stop(local);
69394 err_del_bss:
69395 sdata->bss = NULL;
69396@@ -420,7 +420,7 @@ static int ieee80211_stop(struct net_dev
69397 WARN_ON(!list_empty(&sdata->u.ap.vlans));
69398 }
69399
69400- local->open_count--;
69401+ local_dec(&local->open_count);
69402
69403 switch (sdata->vif.type) {
69404 case NL80211_IFTYPE_AP_VLAN:
69405@@ -526,7 +526,7 @@ static int ieee80211_stop(struct net_dev
69406
69407 ieee80211_recalc_ps(local, -1);
69408
69409- if (local->open_count == 0) {
69410+ if (local_read(&local->open_count) == 0) {
69411 ieee80211_clear_tx_pending(local);
69412 ieee80211_stop_device(local);
69413
69414diff -urNp linux-2.6.32.42/net/mac80211/main.c linux-2.6.32.42/net/mac80211/main.c
69415--- linux-2.6.32.42/net/mac80211/main.c 2011-05-10 22:12:02.000000000 -0400
69416+++ linux-2.6.32.42/net/mac80211/main.c 2011-05-10 22:12:34.000000000 -0400
69417@@ -145,7 +145,7 @@ int ieee80211_hw_config(struct ieee80211
69418 local->hw.conf.power_level = power;
69419 }
69420
69421- if (changed && local->open_count) {
69422+ if (changed && local_read(&local->open_count)) {
69423 ret = drv_config(local, changed);
69424 /*
69425 * Goal:
69426diff -urNp linux-2.6.32.42/net/mac80211/mlme.c linux-2.6.32.42/net/mac80211/mlme.c
69427--- linux-2.6.32.42/net/mac80211/mlme.c 2011-03-27 14:31:47.000000000 -0400
69428+++ linux-2.6.32.42/net/mac80211/mlme.c 2011-05-16 21:46:57.000000000 -0400
69429@@ -1438,6 +1438,8 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee
69430 bool have_higher_than_11mbit = false, newsta = false;
69431 u16 ap_ht_cap_flags;
69432
69433+ pax_track_stack();
69434+
69435 /*
69436 * AssocResp and ReassocResp have identical structure, so process both
69437 * of them in this function.
69438diff -urNp linux-2.6.32.42/net/mac80211/pm.c linux-2.6.32.42/net/mac80211/pm.c
69439--- linux-2.6.32.42/net/mac80211/pm.c 2011-03-27 14:31:47.000000000 -0400
69440+++ linux-2.6.32.42/net/mac80211/pm.c 2011-04-17 15:56:46.000000000 -0400
69441@@ -107,7 +107,7 @@ int __ieee80211_suspend(struct ieee80211
69442 }
69443
69444 /* stop hardware - this must stop RX */
69445- if (local->open_count)
69446+ if (local_read(&local->open_count))
69447 ieee80211_stop_device(local);
69448
69449 local->suspended = true;
69450diff -urNp linux-2.6.32.42/net/mac80211/rate.c linux-2.6.32.42/net/mac80211/rate.c
69451--- linux-2.6.32.42/net/mac80211/rate.c 2011-03-27 14:31:47.000000000 -0400
69452+++ linux-2.6.32.42/net/mac80211/rate.c 2011-04-17 15:56:46.000000000 -0400
69453@@ -287,7 +287,7 @@ int ieee80211_init_rate_ctrl_alg(struct
69454 struct rate_control_ref *ref, *old;
69455
69456 ASSERT_RTNL();
69457- if (local->open_count)
69458+ if (local_read(&local->open_count))
69459 return -EBUSY;
69460
69461 ref = rate_control_alloc(name, local);
69462diff -urNp linux-2.6.32.42/net/mac80211/tx.c linux-2.6.32.42/net/mac80211/tx.c
69463--- linux-2.6.32.42/net/mac80211/tx.c 2011-03-27 14:31:47.000000000 -0400
69464+++ linux-2.6.32.42/net/mac80211/tx.c 2011-04-17 15:56:46.000000000 -0400
69465@@ -173,7 +173,7 @@ static __le16 ieee80211_duration(struct
69466 return cpu_to_le16(dur);
69467 }
69468
69469-static int inline is_ieee80211_device(struct ieee80211_local *local,
69470+static inline int is_ieee80211_device(struct ieee80211_local *local,
69471 struct net_device *dev)
69472 {
69473 return local == wdev_priv(dev->ieee80211_ptr);
69474diff -urNp linux-2.6.32.42/net/mac80211/util.c linux-2.6.32.42/net/mac80211/util.c
69475--- linux-2.6.32.42/net/mac80211/util.c 2011-03-27 14:31:47.000000000 -0400
69476+++ linux-2.6.32.42/net/mac80211/util.c 2011-04-17 15:56:46.000000000 -0400
69477@@ -1042,7 +1042,7 @@ int ieee80211_reconfig(struct ieee80211_
69478 local->resuming = true;
69479
69480 /* restart hardware */
69481- if (local->open_count) {
69482+ if (local_read(&local->open_count)) {
69483 /*
69484 * Upon resume hardware can sometimes be goofy due to
69485 * various platform / driver / bus issues, so restarting
69486diff -urNp linux-2.6.32.42/net/netfilter/ipvs/ip_vs_app.c linux-2.6.32.42/net/netfilter/ipvs/ip_vs_app.c
69487--- linux-2.6.32.42/net/netfilter/ipvs/ip_vs_app.c 2011-03-27 14:31:47.000000000 -0400
69488+++ linux-2.6.32.42/net/netfilter/ipvs/ip_vs_app.c 2011-05-17 19:26:34.000000000 -0400
69489@@ -564,7 +564,7 @@ static const struct file_operations ip_v
69490 .open = ip_vs_app_open,
69491 .read = seq_read,
69492 .llseek = seq_lseek,
69493- .release = seq_release,
69494+ .release = seq_release_net,
69495 };
69496 #endif
69497
69498diff -urNp linux-2.6.32.42/net/netfilter/ipvs/ip_vs_conn.c linux-2.6.32.42/net/netfilter/ipvs/ip_vs_conn.c
69499--- linux-2.6.32.42/net/netfilter/ipvs/ip_vs_conn.c 2011-03-27 14:31:47.000000000 -0400
69500+++ linux-2.6.32.42/net/netfilter/ipvs/ip_vs_conn.c 2011-05-17 19:26:34.000000000 -0400
69501@@ -453,10 +453,10 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
69502 /* if the connection is not template and is created
69503 * by sync, preserve the activity flag.
69504 */
69505- cp->flags |= atomic_read(&dest->conn_flags) &
69506+ cp->flags |= atomic_read_unchecked(&dest->conn_flags) &
69507 (~IP_VS_CONN_F_INACTIVE);
69508 else
69509- cp->flags |= atomic_read(&dest->conn_flags);
69510+ cp->flags |= atomic_read_unchecked(&dest->conn_flags);
69511 cp->dest = dest;
69512
69513 IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
69514@@ -723,7 +723,7 @@ ip_vs_conn_new(int af, int proto, const
69515 atomic_set(&cp->refcnt, 1);
69516
69517 atomic_set(&cp->n_control, 0);
69518- atomic_set(&cp->in_pkts, 0);
69519+ atomic_set_unchecked(&cp->in_pkts, 0);
69520
69521 atomic_inc(&ip_vs_conn_count);
69522 if (flags & IP_VS_CONN_F_NO_CPORT)
69523@@ -871,7 +871,7 @@ static const struct file_operations ip_v
69524 .open = ip_vs_conn_open,
69525 .read = seq_read,
69526 .llseek = seq_lseek,
69527- .release = seq_release,
69528+ .release = seq_release_net,
69529 };
69530
69531 static const char *ip_vs_origin_name(unsigned flags)
69532@@ -934,7 +934,7 @@ static const struct file_operations ip_v
69533 .open = ip_vs_conn_sync_open,
69534 .read = seq_read,
69535 .llseek = seq_lseek,
69536- .release = seq_release,
69537+ .release = seq_release_net,
69538 };
69539
69540 #endif
69541@@ -961,7 +961,7 @@ static inline int todrop_entry(struct ip
69542
69543 /* Don't drop the entry if its number of incoming packets is not
69544 located in [0, 8] */
69545- i = atomic_read(&cp->in_pkts);
69546+ i = atomic_read_unchecked(&cp->in_pkts);
69547 if (i > 8 || i < 0) return 0;
69548
69549 if (!todrop_rate[i]) return 0;
69550diff -urNp linux-2.6.32.42/net/netfilter/ipvs/ip_vs_core.c linux-2.6.32.42/net/netfilter/ipvs/ip_vs_core.c
69551--- linux-2.6.32.42/net/netfilter/ipvs/ip_vs_core.c 2011-03-27 14:31:47.000000000 -0400
69552+++ linux-2.6.32.42/net/netfilter/ipvs/ip_vs_core.c 2011-05-04 17:56:28.000000000 -0400
69553@@ -485,7 +485,7 @@ int ip_vs_leave(struct ip_vs_service *sv
69554 ret = cp->packet_xmit(skb, cp, pp);
69555 /* do not touch skb anymore */
69556
69557- atomic_inc(&cp->in_pkts);
69558+ atomic_inc_unchecked(&cp->in_pkts);
69559 ip_vs_conn_put(cp);
69560 return ret;
69561 }
69562@@ -1357,7 +1357,7 @@ ip_vs_in(unsigned int hooknum, struct sk
69563 * Sync connection if it is about to close to
69564 * encorage the standby servers to update the connections timeout
69565 */
69566- pkts = atomic_add_return(1, &cp->in_pkts);
69567+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
69568 if (af == AF_INET &&
69569 (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
69570 (((cp->protocol != IPPROTO_TCP ||
69571diff -urNp linux-2.6.32.42/net/netfilter/ipvs/ip_vs_ctl.c linux-2.6.32.42/net/netfilter/ipvs/ip_vs_ctl.c
69572--- linux-2.6.32.42/net/netfilter/ipvs/ip_vs_ctl.c 2011-03-27 14:31:47.000000000 -0400
69573+++ linux-2.6.32.42/net/netfilter/ipvs/ip_vs_ctl.c 2011-05-17 19:26:34.000000000 -0400
69574@@ -792,7 +792,7 @@ __ip_vs_update_dest(struct ip_vs_service
69575 ip_vs_rs_hash(dest);
69576 write_unlock_bh(&__ip_vs_rs_lock);
69577 }
69578- atomic_set(&dest->conn_flags, conn_flags);
69579+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
69580
69581 /* bind the service */
69582 if (!dest->svc) {
69583@@ -1888,7 +1888,7 @@ static int ip_vs_info_seq_show(struct se
69584 " %-7s %-6d %-10d %-10d\n",
69585 &dest->addr.in6,
69586 ntohs(dest->port),
69587- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
69588+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
69589 atomic_read(&dest->weight),
69590 atomic_read(&dest->activeconns),
69591 atomic_read(&dest->inactconns));
69592@@ -1899,7 +1899,7 @@ static int ip_vs_info_seq_show(struct se
69593 "%-7s %-6d %-10d %-10d\n",
69594 ntohl(dest->addr.ip),
69595 ntohs(dest->port),
69596- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
69597+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
69598 atomic_read(&dest->weight),
69599 atomic_read(&dest->activeconns),
69600 atomic_read(&dest->inactconns));
69601@@ -1927,7 +1927,7 @@ static const struct file_operations ip_v
69602 .open = ip_vs_info_open,
69603 .read = seq_read,
69604 .llseek = seq_lseek,
69605- .release = seq_release_private,
69606+ .release = seq_release_net,
69607 };
69608
69609 #endif
69610@@ -1976,7 +1976,7 @@ static const struct file_operations ip_v
69611 .open = ip_vs_stats_seq_open,
69612 .read = seq_read,
69613 .llseek = seq_lseek,
69614- .release = single_release,
69615+ .release = single_release_net,
69616 };
69617
69618 #endif
69619@@ -2292,7 +2292,7 @@ __ip_vs_get_dest_entries(const struct ip
69620
69621 entry.addr = dest->addr.ip;
69622 entry.port = dest->port;
69623- entry.conn_flags = atomic_read(&dest->conn_flags);
69624+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
69625 entry.weight = atomic_read(&dest->weight);
69626 entry.u_threshold = dest->u_threshold;
69627 entry.l_threshold = dest->l_threshold;
69628@@ -2353,6 +2353,8 @@ do_ip_vs_get_ctl(struct sock *sk, int cm
69629 unsigned char arg[128];
69630 int ret = 0;
69631
69632+ pax_track_stack();
69633+
69634 if (!capable(CAP_NET_ADMIN))
69635 return -EPERM;
69636
69637@@ -2802,7 +2804,7 @@ static int ip_vs_genl_fill_dest(struct s
69638 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
69639
69640 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
69641- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
69642+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
69643 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
69644 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
69645 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
69646diff -urNp linux-2.6.32.42/net/netfilter/ipvs/ip_vs_sync.c linux-2.6.32.42/net/netfilter/ipvs/ip_vs_sync.c
69647--- linux-2.6.32.42/net/netfilter/ipvs/ip_vs_sync.c 2011-03-27 14:31:47.000000000 -0400
69648+++ linux-2.6.32.42/net/netfilter/ipvs/ip_vs_sync.c 2011-05-04 17:56:28.000000000 -0400
69649@@ -438,7 +438,7 @@ static void ip_vs_process_message(const
69650
69651 if (opt)
69652 memcpy(&cp->in_seq, opt, sizeof(*opt));
69653- atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
69654+ atomic_set_unchecked(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
69655 cp->state = state;
69656 cp->old_state = cp->state;
69657 /*
69658diff -urNp linux-2.6.32.42/net/netfilter/ipvs/ip_vs_xmit.c linux-2.6.32.42/net/netfilter/ipvs/ip_vs_xmit.c
69659--- linux-2.6.32.42/net/netfilter/ipvs/ip_vs_xmit.c 2011-03-27 14:31:47.000000000 -0400
69660+++ linux-2.6.32.42/net/netfilter/ipvs/ip_vs_xmit.c 2011-05-04 17:56:28.000000000 -0400
69661@@ -875,7 +875,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
69662 else
69663 rc = NF_ACCEPT;
69664 /* do not touch skb anymore */
69665- atomic_inc(&cp->in_pkts);
69666+ atomic_inc_unchecked(&cp->in_pkts);
69667 goto out;
69668 }
69669
69670@@ -949,7 +949,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
69671 else
69672 rc = NF_ACCEPT;
69673 /* do not touch skb anymore */
69674- atomic_inc(&cp->in_pkts);
69675+ atomic_inc_unchecked(&cp->in_pkts);
69676 goto out;
69677 }
69678
69679diff -urNp linux-2.6.32.42/net/netfilter/Kconfig linux-2.6.32.42/net/netfilter/Kconfig
69680--- linux-2.6.32.42/net/netfilter/Kconfig 2011-03-27 14:31:47.000000000 -0400
69681+++ linux-2.6.32.42/net/netfilter/Kconfig 2011-04-17 15:56:46.000000000 -0400
69682@@ -635,6 +635,16 @@ config NETFILTER_XT_MATCH_ESP
69683
69684 To compile it as a module, choose M here. If unsure, say N.
69685
69686+config NETFILTER_XT_MATCH_GRADM
69687+ tristate '"gradm" match support'
69688+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
69689+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
69690+ ---help---
69691+ The gradm match allows to match on grsecurity RBAC being enabled.
69692+ It is useful when iptables rules are applied early on bootup to
69693+ prevent connections to the machine (except from a trusted host)
69694+ while the RBAC system is disabled.
69695+
69696 config NETFILTER_XT_MATCH_HASHLIMIT
69697 tristate '"hashlimit" match support'
69698 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
69699diff -urNp linux-2.6.32.42/net/netfilter/Makefile linux-2.6.32.42/net/netfilter/Makefile
69700--- linux-2.6.32.42/net/netfilter/Makefile 2011-03-27 14:31:47.000000000 -0400
69701+++ linux-2.6.32.42/net/netfilter/Makefile 2011-04-17 15:56:46.000000000 -0400
69702@@ -68,6 +68,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRAC
69703 obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
69704 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
69705 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
69706+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
69707 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
69708 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
69709 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
69710diff -urNp linux-2.6.32.42/net/netfilter/nf_conntrack_netlink.c linux-2.6.32.42/net/netfilter/nf_conntrack_netlink.c
69711--- linux-2.6.32.42/net/netfilter/nf_conntrack_netlink.c 2011-03-27 14:31:47.000000000 -0400
69712+++ linux-2.6.32.42/net/netfilter/nf_conntrack_netlink.c 2011-04-17 15:56:46.000000000 -0400
69713@@ -706,7 +706,7 @@ ctnetlink_parse_tuple_proto(struct nlatt
69714 static int
69715 ctnetlink_parse_tuple(const struct nlattr * const cda[],
69716 struct nf_conntrack_tuple *tuple,
69717- enum ctattr_tuple type, u_int8_t l3num)
69718+ enum ctattr_type type, u_int8_t l3num)
69719 {
69720 struct nlattr *tb[CTA_TUPLE_MAX+1];
69721 int err;
69722diff -urNp linux-2.6.32.42/net/netfilter/nfnetlink_log.c linux-2.6.32.42/net/netfilter/nfnetlink_log.c
69723--- linux-2.6.32.42/net/netfilter/nfnetlink_log.c 2011-03-27 14:31:47.000000000 -0400
69724+++ linux-2.6.32.42/net/netfilter/nfnetlink_log.c 2011-05-04 17:56:28.000000000 -0400
69725@@ -68,7 +68,7 @@ struct nfulnl_instance {
69726 };
69727
69728 static DEFINE_RWLOCK(instances_lock);
69729-static atomic_t global_seq;
69730+static atomic_unchecked_t global_seq;
69731
69732 #define INSTANCE_BUCKETS 16
69733 static struct hlist_head instance_table[INSTANCE_BUCKETS];
69734@@ -493,7 +493,7 @@ __build_packet_message(struct nfulnl_ins
69735 /* global sequence number */
69736 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
69737 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
69738- htonl(atomic_inc_return(&global_seq)));
69739+ htonl(atomic_inc_return_unchecked(&global_seq)));
69740
69741 if (data_len) {
69742 struct nlattr *nla;
69743diff -urNp linux-2.6.32.42/net/netfilter/xt_gradm.c linux-2.6.32.42/net/netfilter/xt_gradm.c
69744--- linux-2.6.32.42/net/netfilter/xt_gradm.c 1969-12-31 19:00:00.000000000 -0500
69745+++ linux-2.6.32.42/net/netfilter/xt_gradm.c 2011-04-17 15:56:46.000000000 -0400
69746@@ -0,0 +1,51 @@
69747+/*
69748+ * gradm match for netfilter
69749